From 018d19de0f6178c15686b81f0accfae5c146e1d1 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 24 Jun 2019 07:22:55 -0700 Subject: [PATCH 001/439] simple_lmk: Introduce Simple Low Memory Killer for Android This is a complete low memory killer solution for Android that is small and simple. Processes are killed according to the priorities that Android gives them, so that the least important processes are always killed first. Processes are killed until memory deficits are satisfied, as observed from kswapd struggling to free up pages. Simple LMK stops killing processes when kswapd finally goes back to sleep. The only tunables are the desired amount of memory to be freed per reclaim event and desired frequency of reclaim events. Simple LMK tries to free at least the desired amount of memory per reclaim and waits until all of its victims' memory is freed before proceeding to kill more processes. Signed-off-by: Sultan Alsawaf --- drivers/android/Kconfig | 44 +++++ drivers/android/Makefile | 1 + drivers/android/simple_lmk.c | 332 +++++++++++++++++++++++++++++++++++ include/linux/simple_lmk.h | 26 +++ kernel/fork.c | 2 + mm/vmscan.c | 4 + 6 files changed, 409 insertions(+) create mode 100644 drivers/android/simple_lmk.c create mode 100644 include/linux/simple_lmk.h diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index ee4880bfdcdc..b572f685374f 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -42,6 +42,50 @@ config ANDROID_BINDER_IPC_SELFTEST exhaustively with combinations of various buffer sizes and alignments. +config ANDROID_SIMPLE_LMK + bool "Simple Android Low Memory Killer" + depends on !ANDROID_LOW_MEMORY_KILLER && !MEMCG + ---help--- + This is a complete low memory killer solution for Android that is + small and simple. Processes are killed according to the priorities + that Android gives them, so that the least important processes are + always killed first. Processes are killed until memory deficits are + satisfied, as observed from kswapd struggling to free up pages. Simple + LMK stops killing processes when kswapd finally goes back to sleep. + +if ANDROID_SIMPLE_LMK + +config ANDROID_SIMPLE_LMK_AGGRESSION + int "Reclaim frequency selection" + range 1 3 + default 1 + help + This value determines how frequently Simple LMK will perform memory + reclaims. A lower value corresponds to less frequent reclaims, which + maximizes memory usage. The range of values has a logarithmic + correlation; 2 is twice as aggressive as 1, and 3 is twice as + aggressive as 2, which makes 3 four times as aggressive as 1. + + The aggression is set as a factor of kswapd's scan depth. This means + that a system with more memory will have a more expensive aggression + factor compared to a system with less memory. For example, setting an + aggression factor of 1 with 4 GiB of memory would be like setting a + factor of 2 with 8 GiB of memory; the more memory a system has, the + more expensive it is to use a lower value. + + Choosing a value of 1 here works well with systems that have 4 GiB of + memory. If the default doesn't work well, then this value should be + tweaked based on empirical results using different values. + +config ANDROID_SIMPLE_LMK_MINFREE + int "Minimum MiB of memory to free per reclaim" + range 8 512 + default 100 + help + Simple LMK will try to free at least this much memory per reclaim. + +endif + endif # if ANDROID endmenu diff --git a/drivers/android/Makefile b/drivers/android/Makefile index a01254c43ee3..81cc79664cf9 100644 --- a/drivers/android/Makefile +++ b/drivers/android/Makefile @@ -2,3 +2,4 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o +obj-$(CONFIG_ANDROID_SIMPLE_LMK) += simple_lmk.o diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c new file mode 100644 index 000000000000..29637ecc5be5 --- /dev/null +++ b/drivers/android/simple_lmk.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Sultan Alsawaf . + */ + +#define pr_fmt(fmt) "simple_lmk: " fmt + +#include +#include +#include +#include +#include +#include +#include + +/* The sched_param struct is located elsewhere in newer kernels */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +#include +#endif + +/* SEND_SIG_FORCED isn't present in newer kernels */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define SIG_INFO_TYPE SEND_SIG_FORCED +#else +#define SIG_INFO_TYPE SEND_SIG_PRIV +#endif + +/* The group argument to do_send_sig_info is different in newer kernels */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +#define KILL_GROUP_TYPE true +#else +#define KILL_GROUP_TYPE PIDTYPE_TGID +#endif + +/* The minimum number of pages to free per reclaim */ +#define MIN_FREE_PAGES (CONFIG_ANDROID_SIMPLE_LMK_MINFREE * SZ_1M / PAGE_SIZE) + +/* Kill up to this many victims per reclaim */ +#define MAX_VICTIMS 1024 + +struct victim_info { + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long size; +}; + +/* Pulled from the Android framework. Lower adj means higher priority. */ +static const short adj_prio[] = { + 906, /* CACHED_APP_MAX_ADJ */ + 905, /* Cached app */ + 904, /* Cached app */ + 903, /* Cached app */ + 902, /* Cached app */ + 901, /* Cached app */ + 900, /* CACHED_APP_MIN_ADJ */ + 800, /* SERVICE_B_ADJ */ + 700, /* PREVIOUS_APP_ADJ */ + 600, /* HOME_APP_ADJ */ + 500, /* SERVICE_ADJ */ + 400, /* HEAVY_WEIGHT_APP_ADJ */ + 300, /* BACKUP_APP_ADJ */ + 200, /* PERCEPTIBLE_APP_ADJ */ + 100, /* VISIBLE_APP_ADJ */ + 0 /* FOREGROUND_APP_ADJ */ +}; + +static struct victim_info victims[MAX_VICTIMS]; +static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); +static DECLARE_COMPLETION(reclaim_done); +static int victims_to_kill; +static bool needs_reclaim; + +static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) +{ + const struct victim_info *lhs = (typeof(lhs))lhs_ptr; + const struct victim_info *rhs = (typeof(rhs))rhs_ptr; + + return rhs->size - lhs->size; +} + +static bool vtsk_is_duplicate(struct victim_info *varr, int vlen, + struct task_struct *vtsk) +{ + int i; + + for (i = 0; i < vlen; i++) { + if (same_thread_group(varr[i].tsk, vtsk)) + return true; + } + + return false; +} + +static unsigned long find_victims(struct victim_info *varr, int *vindex, + int vmaxlen, short target_adj) +{ + unsigned long pages_found = 0; + int old_vindex = *vindex; + struct task_struct *tsk; + + for_each_process(tsk) { + struct task_struct *vtsk; + unsigned long tasksize; + + /* + * Search for tasks with the targeted importance (adj). Since + * only tasks with a positive adj can be targeted, that + * naturally excludes tasks which shouldn't be killed, like init + * and kthreads. Although oom_score_adj can still be changed + * while this code runs, it doesn't really matter. We just need + * to make sure that if the adj changes, we won't deadlock + * trying to lock a task that we locked earlier. + */ + if (READ_ONCE(tsk->signal->oom_score_adj) != target_adj || + vtsk_is_duplicate(varr, *vindex, tsk)) + continue; + + vtsk = find_lock_task_mm(tsk); + if (!vtsk) + continue; + + /* Store this potential victim away for later */ + varr[*vindex].tsk = vtsk; + varr[*vindex].mm = vtsk->mm; + varr[*vindex].size = get_mm_rss(vtsk->mm); + + /* Keep track of the number of pages that have been found */ + pages_found += tasksize; + + /* Make sure there's space left in the victim array */ + if (++*vindex == vmaxlen) + break; + } + + /* + * Sort the victims in descending order of size to prioritize killing + * the larger ones first. + */ + if (pages_found) + sort(&varr[old_vindex], *vindex - old_vindex, sizeof(*varr), + victim_size_cmp, NULL); + + return pages_found; +} + +static int process_victims(struct victim_info *varr, int vlen, + unsigned long pages_needed) +{ + unsigned long pages_found = 0; + int i, nr_to_kill = 0; + + /* + * Calculate the number of tasks that need to be killed and quickly + * release the references to those that'll live. + */ + for (i = 0; i < vlen; i++) { + struct victim_info *victim = &victims[i]; + struct task_struct *vtsk = victim->tsk; + + /* The victim's mm lock is taken in find_victims; release it */ + if (pages_found >= pages_needed) { + task_unlock(vtsk); + continue; + } + + pages_found += victim->size; + nr_to_kill++; + } + + return nr_to_kill; +} + +static void scan_and_kill(unsigned long pages_needed) +{ + int i, nr_to_kill = 0, nr_victims = 0; + unsigned long pages_found = 0; + + /* + * Hold the tasklist lock so tasks don't disappear while scanning. This + * is preferred to holding an RCU read lock so that the list of tasks + * is guaranteed to be up to date. + */ + read_lock(&tasklist_lock); + for (i = 0; i < ARRAY_SIZE(adj_prio); i++) { + pages_found += find_victims(victims, &nr_victims, MAX_VICTIMS, + adj_prio[i]); + if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) + break; + } + read_unlock(&tasklist_lock); + + /* Pretty unlikely but it can happen */ + if (unlikely(!nr_victims)) + return; + + /* First round of victim processing to weed out unneeded victims */ + nr_to_kill = process_victims(victims, nr_victims, pages_needed); + + /* + * Try to kill as few of the chosen victims as possible by sorting the + * chosen victims by size, which means larger victims that have a lower + * adj can be killed in place of smaller victims with a high adj. + */ + sort(victims, nr_to_kill, sizeof(*victims), victim_size_cmp, NULL); + + /* Second round of victim processing to finally select the victims */ + nr_to_kill = process_victims(victims, nr_to_kill, pages_needed); + + /* Kill the victims */ + WRITE_ONCE(victims_to_kill, nr_to_kill); + for (i = 0; i < nr_to_kill; i++) { + struct victim_info *victim = &victims[i]; + struct task_struct *vtsk = victim->tsk; + + pr_info("Killing %s with adj %d to free %lu KiB\n", vtsk->comm, + vtsk->signal->oom_score_adj, + victim->size << (PAGE_SHIFT - 10)); + + /* Accelerate the victim's death by forcing the kill signal */ + do_send_sig_info(SIGKILL, SIG_INFO_TYPE, vtsk, KILL_GROUP_TYPE); + + /* Grab a reference to the victim for later before unlocking */ + get_task_struct(vtsk); + task_unlock(vtsk); + } + + /* Try to speed up the death process now that we can schedule again */ + for (i = 0; i < nr_to_kill; i++) { + struct task_struct *vtsk = victims[i].tsk; + + /* Increase the victim's priority to make it die faster */ + set_user_nice(vtsk, MIN_NICE); + + /* Allow the victim to run on any CPU */ + set_cpus_allowed_ptr(vtsk, cpu_all_mask); + + /* Finally release the victim reference acquired earlier */ + put_task_struct(vtsk); + } + + /* Wait until all the victims die */ + wait_for_completion(&reclaim_done); +} + +static int simple_lmk_reclaim_thread(void *data) +{ + static const struct sched_param sched_max_rt_prio = { + .sched_priority = MAX_RT_PRIO - 1 + }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); + + while (1) { + bool should_stop; + + wait_event(oom_waitq, (should_stop = kthread_should_stop()) || + READ_ONCE(needs_reclaim)); + + if (should_stop) + break; + + /* + * Kill a batch of processes and wait for their memory to be + * freed. After their memory is freed, sleep for 20 ms to give + * OOM'd allocations a chance to scavenge for the newly-freed + * pages. Rinse and repeat while there are still OOM'd + * allocations. + */ + do { + scan_and_kill(MIN_FREE_PAGES); + msleep(20); + } while (READ_ONCE(needs_reclaim)); + } + + return 0; +} + +void simple_lmk_decide_reclaim(int kswapd_priority) +{ + if (kswapd_priority != CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) + return; + + if (!cmpxchg(&needs_reclaim, false, true)) + wake_up(&oom_waitq); +} + +void simple_lmk_stop_reclaim(void) +{ + WRITE_ONCE(needs_reclaim, false); +} + +void simple_lmk_mm_freed(struct mm_struct *mm) +{ + static atomic_t nr_killed = ATOMIC_INIT(0); + int i, nr_to_kill; + + nr_to_kill = READ_ONCE(victims_to_kill); + for (i = 0; i < nr_to_kill; i++) { + if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { + if (atomic_inc_return(&nr_killed) == nr_to_kill) { + WRITE_ONCE(victims_to_kill, 0); + nr_killed = (atomic_t)ATOMIC_INIT(0); + complete(&reclaim_done); + } + break; + } + } +} + +/* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ +static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) +{ + static bool init_done; + struct task_struct *thread; + + if (cmpxchg(&init_done, false, true)) + return 0; + + thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); + BUG_ON(IS_ERR(thread)); + + return 0; +} + +static const struct kernel_param_ops simple_lmk_init_ops = { + .set = simple_lmk_init_set +}; + +/* Needed to prevent Android from thinking there's no LMK and thus rebooting */ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "lowmemorykiller." +module_param_cb(minfree, &simple_lmk_init_ops, NULL, 0200); diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h new file mode 100644 index 000000000000..b0c247f2f2a5 --- /dev/null +++ b/include/linux/simple_lmk.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Sultan Alsawaf . + */ +#ifndef _SIMPLE_LMK_H_ +#define _SIMPLE_LMK_H_ + +struct mm_struct; + +#ifdef CONFIG_ANDROID_SIMPLE_LMK +void simple_lmk_decide_reclaim(int kswapd_priority); +void simple_lmk_stop_reclaim(void); +void simple_lmk_mm_freed(struct mm_struct *mm); +#else +static inline void simple_lmk_decide_reclaim(int kswapd_priority) +{ +} +static inline void simple_lmk_stop_reclaim(void) +{ +} +static inline void simple_lmk_mm_freed(struct mm_struct *mm) +{ +} +#endif + +#endif /* _SIMPLE_LMK_H_ */ diff --git a/kernel/fork.c b/kernel/fork.c index 40cebd5b7163..11a1af672858 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include @@ -941,6 +942,7 @@ static inline void __mmput(struct mm_struct *mm) } if (mm->binfmt) module_put(mm->binfmt->module); + simple_lmk_mm_freed(mm); mmdrop(mm); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 665d622ee4ff..1fa8a9d117c9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -3655,6 +3656,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) unsigned long nr_reclaimed = sc.nr_reclaimed; bool raise_priority = true; + simple_lmk_decide_reclaim(sc.priority); sc.reclaim_idx = classzone_idx; /* @@ -3787,6 +3789,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * succeed. */ if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { + simple_lmk_stop_reclaim(); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. @@ -3823,6 +3826,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o */ if (!remaining && prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { + simple_lmk_stop_reclaim(); trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* From 3254e3610b24205de53cc37f7210b2b591d48338 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 20 Jul 2019 09:54:58 -0700 Subject: [PATCH 002/439] simple_lmk: Fix pages_found calculation Previously, pages_found would be calculated using an uninitialized variable. Fix it. Reported-by: Julian Liu Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 29637ecc5be5..e7e91b7b4c7b 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -100,7 +100,6 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, for_each_process(tsk) { struct task_struct *vtsk; - unsigned long tasksize; /* * Search for tasks with the targeted importance (adj). Since @@ -125,7 +124,7 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, varr[*vindex].size = get_mm_rss(vtsk->mm); /* Keep track of the number of pages that have been found */ - pages_found += tasksize; + pages_found += varr[*vindex].size; /* Make sure there's space left in the victim array */ if (++*vindex == vmaxlen) From d0bb4b05b7d887803b899f8720a73924e5b5fba3 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 21 Aug 2019 08:30:55 -0700 Subject: [PATCH 003/439] simple_lmk: Remove kthread_should_stop() exit condition Simple LMK's reclaim thread should never stop; there's no need to have this check. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index e7e91b7b4c7b..845679a4cbed 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -250,13 +250,7 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - bool should_stop; - - wait_event(oom_waitq, (should_stop = kthread_should_stop()) || - READ_ONCE(needs_reclaim)); - - if (should_stop) - break; + wait_event(oom_waitq, READ_ONCE(needs_reclaim)); /* * Kill a batch of processes and wait for their memory to be From c17a72f71ce6a3833bcc55bb0cd335df12fd8f5b Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 21 Aug 2019 08:37:04 -0700 Subject: [PATCH 004/439] simple_lmk: Use proper atomic_* operations where needed cmpxchg() is only atomic with respect to the local CPU, so it cannot be relied on with how it's used in Simple LMK. Switch to fully atomic operations instead for full atomic guarantees. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 845679a4cbed..d1bc94027e5d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -68,7 +68,7 @@ static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); static int victims_to_kill; -static bool needs_reclaim; +static atomic_t needs_reclaim = ATOMIC_INIT(0); static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) { @@ -250,7 +250,7 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, READ_ONCE(needs_reclaim)); + wait_event(oom_waitq, atomic_read(&needs_reclaim)); /* * Kill a batch of processes and wait for their memory to be @@ -262,7 +262,7 @@ static int simple_lmk_reclaim_thread(void *data) do { scan_and_kill(MIN_FREE_PAGES); msleep(20); - } while (READ_ONCE(needs_reclaim)); + } while (atomic_read(&needs_reclaim)); } return 0; @@ -273,13 +273,13 @@ void simple_lmk_decide_reclaim(int kswapd_priority) if (kswapd_priority != CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) return; - if (!cmpxchg(&needs_reclaim, false, true)) + if (!atomic_cmpxchg(&needs_reclaim, 0, 1)) wake_up(&oom_waitq); } void simple_lmk_stop_reclaim(void) { - WRITE_ONCE(needs_reclaim, false); + atomic_set(&needs_reclaim, 0); } void simple_lmk_mm_freed(struct mm_struct *mm) @@ -303,10 +303,10 @@ void simple_lmk_mm_freed(struct mm_struct *mm) /* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) { - static bool init_done; + static atomic_t init_done = ATOMIC_INIT(0); struct task_struct *thread; - if (cmpxchg(&init_done, false, true)) + if (atomic_cmpxchg(&init_done, 0, 1)) return 0; thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); From e98a5c008330b6b424fe18c03be437c6c5402f4e Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 4 Nov 2019 10:56:15 -0800 Subject: [PATCH 005/439] simple_lmk: Fix broken multicopy atomicity for victims_to_kill When the reclaim thread writes to victims_to_kill on one CPU, it expects the updated value to be immediately reflected on all CPUs in order for simple_lmk_mm_freed() to work correctly. Due to the lack of memory barriers to guarantee multicopy atomicity, simple_lmk_mm_freed() can be given a victim's mm without knowing the correct victims_to_kill value, which can cause the reclaim thread to remain stuck waiting forever for all victims to be freed. This scenario, despite being rare, has been observed. Fix this by using proper atomic helpers with memory barriers. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index d1bc94027e5d..079281fdcb57 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -67,7 +67,7 @@ static const short adj_prio[] = { static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); -static int victims_to_kill; +static atomic_t victims_to_kill = ATOMIC_INIT(0); static atomic_t needs_reclaim = ATOMIC_INIT(0); static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) @@ -206,7 +206,7 @@ static void scan_and_kill(unsigned long pages_needed) nr_to_kill = process_victims(victims, nr_to_kill, pages_needed); /* Kill the victims */ - WRITE_ONCE(victims_to_kill, nr_to_kill); + atomic_set_release(&victims_to_kill, nr_to_kill); for (i = 0; i < nr_to_kill; i++) { struct victim_info *victim = &victims[i]; struct task_struct *vtsk = victim->tsk; @@ -287,11 +287,11 @@ void simple_lmk_mm_freed(struct mm_struct *mm) static atomic_t nr_killed = ATOMIC_INIT(0); int i, nr_to_kill; - nr_to_kill = READ_ONCE(victims_to_kill); + nr_to_kill = atomic_read_acquire(&victims_to_kill); for (i = 0; i < nr_to_kill; i++) { if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { if (atomic_inc_return(&nr_killed) == nr_to_kill) { - WRITE_ONCE(victims_to_kill, 0); + atomic_set(&victims_to_kill, 0); nr_killed = (atomic_t)ATOMIC_INIT(0); complete(&reclaim_done); } From 882830acf86700e10ba2afbd8f17d4adb94fb2cd Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 4 Nov 2019 11:06:13 -0800 Subject: [PATCH 006/439] simple_lmk: Make reclaim deterministic The 20 ms delay in the reclaim thread is a hacky fudge factor that can cause Simple LMK to behave wildly differently depending on the circumstances of when it is invoked. When kswapd doesn't get enough CPU time to finish up and go back to sleep within 20 ms, Simple LMK performs superfluous reclaims. This is suboptimal, so make Simple LMK more deterministic by eliminating the delay and instead queuing up reclaim requests from kswapd. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 38 +++++++++++++----------------------- include/linux/simple_lmk.h | 4 ---- mm/vmscan.c | 2 -- 3 files changed, 14 insertions(+), 30 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 079281fdcb57..bce8cf651b81 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -5,7 +5,6 @@ #define pr_fmt(fmt) "simple_lmk: " fmt -#include #include #include #include @@ -250,19 +249,8 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, atomic_read(&needs_reclaim)); - - /* - * Kill a batch of processes and wait for their memory to be - * freed. After their memory is freed, sleep for 20 ms to give - * OOM'd allocations a chance to scavenge for the newly-freed - * pages. Rinse and repeat while there are still OOM'd - * allocations. - */ - do { - scan_and_kill(MIN_FREE_PAGES); - msleep(20); - } while (atomic_read(&needs_reclaim)); + wait_event(oom_waitq, atomic_add_unless(&needs_reclaim, -1, 0)); + scan_and_kill(MIN_FREE_PAGES); } return 0; @@ -270,16 +258,18 @@ static int simple_lmk_reclaim_thread(void *data) void simple_lmk_decide_reclaim(int kswapd_priority) { - if (kswapd_priority != CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) - return; - - if (!atomic_cmpxchg(&needs_reclaim, 0, 1)) - wake_up(&oom_waitq); -} - -void simple_lmk_stop_reclaim(void) -{ - atomic_set(&needs_reclaim, 0); + if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) { + int v, v1; + + for (v = 0;; v = v1) { + v1 = atomic_cmpxchg(&needs_reclaim, v, v + 1); + if (likely(v1 == v)) { + if (!v) + wake_up(&oom_waitq); + break; + } + } + } } void simple_lmk_mm_freed(struct mm_struct *mm) diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h index b0c247f2f2a5..46cdb389be51 100644 --- a/include/linux/simple_lmk.h +++ b/include/linux/simple_lmk.h @@ -9,15 +9,11 @@ struct mm_struct; #ifdef CONFIG_ANDROID_SIMPLE_LMK void simple_lmk_decide_reclaim(int kswapd_priority); -void simple_lmk_stop_reclaim(void); void simple_lmk_mm_freed(struct mm_struct *mm); #else static inline void simple_lmk_decide_reclaim(int kswapd_priority) { } -static inline void simple_lmk_stop_reclaim(void) -{ -} static inline void simple_lmk_mm_freed(struct mm_struct *mm) { } diff --git a/mm/vmscan.c b/mm/vmscan.c index 1fa8a9d117c9..f8e0ea574421 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3789,7 +3789,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * succeed. */ if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { - simple_lmk_stop_reclaim(); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. @@ -3826,7 +3825,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o */ if (!remaining && prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { - simple_lmk_stop_reclaim(); trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* From 4e39ed12c6827910dea9f400112fb0b7e9a7ae0f Mon Sep 17 00:00:00 2001 From: Petr Vorel Date: Mon, 29 Oct 2018 22:10:58 +0100 Subject: [PATCH 007/439] merge_config.sh: Allow to define config prefix with CONFIG_ environment variable. merge_config.sh uses CONFIG_ which is used in kernel and other projects. There are some projects which use kconfig with different prefixes (e.g. buildroot: BR2_ prefix). CONFIG_ variable is already used for this purpose in kconfig binary (scripts/kconfig/lkc.h), let's use the same rule for in merge_config.sh. Suggested-by: Masahiro Yamada Signed-off-by: Petr Vorel Signed-off-by: Masahiro Yamada --- scripts/kconfig/merge_config.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 67d131447631..da66e7742282 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -33,12 +33,15 @@ usage() { echo " -n use allnoconfig instead of alldefconfig" echo " -r list redundant entries when merging fragments" echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead." + echo + echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable." } RUNMAKE=true ALLTARGET=alldefconfig WARNREDUN=false OUTPUT=. +CONFIG_PREFIX=${CONFIG_-CONFIG_} while true; do case $1 in @@ -99,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then fi MERGE_LIST=$* -SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p" +SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p" + TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" From 8559d81850865d486605e3442f8fff9430788f32 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 4 Nov 2019 11:27:29 -0800 Subject: [PATCH 008/439] simple_lmk: Clean up some code style nitpicks Using a parameter to pass around a unmodified pointer to a global variable is crufty; just use the `victims` variable directly instead. Also, compress the code in simple_lmk_init_set() a bit to make it look cleaner. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 45 ++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index bce8cf651b81..44f7319defc0 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -77,21 +77,19 @@ static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) return rhs->size - lhs->size; } -static bool vtsk_is_duplicate(struct victim_info *varr, int vlen, - struct task_struct *vtsk) +static bool vtsk_is_duplicate(int vlen, struct task_struct *vtsk) { int i; for (i = 0; i < vlen; i++) { - if (same_thread_group(varr[i].tsk, vtsk)) + if (same_thread_group(victims[i].tsk, vtsk)) return true; } return false; } -static unsigned long find_victims(struct victim_info *varr, int *vindex, - int vmaxlen, short target_adj) +static unsigned long find_victims(int *vindex, short target_adj) { unsigned long pages_found = 0; int old_vindex = *vindex; @@ -110,7 +108,7 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, * trying to lock a task that we locked earlier. */ if (READ_ONCE(tsk->signal->oom_score_adj) != target_adj || - vtsk_is_duplicate(varr, *vindex, tsk)) + vtsk_is_duplicate(*vindex, tsk)) continue; vtsk = find_lock_task_mm(tsk); @@ -118,15 +116,15 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, continue; /* Store this potential victim away for later */ - varr[*vindex].tsk = vtsk; - varr[*vindex].mm = vtsk->mm; - varr[*vindex].size = get_mm_rss(vtsk->mm); + victims[*vindex].tsk = vtsk; + victims[*vindex].mm = vtsk->mm; + victims[*vindex].size = get_mm_rss(vtsk->mm); /* Keep track of the number of pages that have been found */ - pages_found += varr[*vindex].size; + pages_found += victims[*vindex].size; /* Make sure there's space left in the victim array */ - if (++*vindex == vmaxlen) + if (++*vindex == MAX_VICTIMS) break; } @@ -135,14 +133,13 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, * the larger ones first. */ if (pages_found) - sort(&varr[old_vindex], *vindex - old_vindex, sizeof(*varr), - victim_size_cmp, NULL); + sort(&victims[old_vindex], *vindex - old_vindex, + sizeof(*victims), victim_size_cmp, NULL); return pages_found; } -static int process_victims(struct victim_info *varr, int vlen, - unsigned long pages_needed) +static int process_victims(int vlen, unsigned long pages_needed) { unsigned long pages_found = 0; int i, nr_to_kill = 0; @@ -180,8 +177,7 @@ static void scan_and_kill(unsigned long pages_needed) */ read_lock(&tasklist_lock); for (i = 0; i < ARRAY_SIZE(adj_prio); i++) { - pages_found += find_victims(victims, &nr_victims, MAX_VICTIMS, - adj_prio[i]); + pages_found += find_victims(&nr_victims, adj_prio[i]); if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) break; } @@ -192,7 +188,7 @@ static void scan_and_kill(unsigned long pages_needed) return; /* First round of victim processing to weed out unneeded victims */ - nr_to_kill = process_victims(victims, nr_victims, pages_needed); + nr_to_kill = process_victims(nr_victims, pages_needed); /* * Try to kill as few of the chosen victims as possible by sorting the @@ -202,7 +198,7 @@ static void scan_and_kill(unsigned long pages_needed) sort(victims, nr_to_kill, sizeof(*victims), victim_size_cmp, NULL); /* Second round of victim processing to finally select the victims */ - nr_to_kill = process_victims(victims, nr_to_kill, pages_needed); + nr_to_kill = process_victims(nr_to_kill, pages_needed); /* Kill the victims */ atomic_set_release(&victims_to_kill, nr_to_kill); @@ -296,12 +292,11 @@ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) static atomic_t init_done = ATOMIC_INIT(0); struct task_struct *thread; - if (atomic_cmpxchg(&init_done, 0, 1)) - return 0; - - thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); - BUG_ON(IS_ERR(thread)); - + if (!atomic_cmpxchg(&init_done, 0, 1)) { + thread = kthread_run(simple_lmk_reclaim_thread, NULL, + "simple_lmkd"); + BUG_ON(IS_ERR(thread)); + } return 0; } From c596471f26ef7b652e7fc31bdcecf658bef3e8f2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 5 Nov 2018 17:19:36 +0900 Subject: [PATCH 009/439] kconfig: merge_config: avoid false positive matches from comment lines The current SED_CONFIG_EXP could match to comment lines in config fragment files, especially when CONFIG_PREFIX_ is empty. For example, Buildroot uses empty prefixing; starting symbols with BR2_ is just convention. Make the sed expression more robust against false positives from comment lines. The new sed expression matches to only valid patterns. Signed-off-by: Masahiro Yamada Reviewed-by: Petr Vorel Reviewed-by: Arnout Vandecappelle (Essensium/Mind) --- scripts/kconfig/merge_config.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index da66e7742282..0ef906499646 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -102,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then fi MERGE_LIST=$* -SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p" +SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p" +SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p" TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) @@ -116,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2 exit 1 fi - CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE) + CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE) for CFG in $CFG_LIST ; do grep -q -w $CFG $TMP_FILE || continue @@ -159,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET # Check all specified config values took (might have missed-dependency issues) -for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do +for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE) ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG") From f1222e1d692e187ac3c23e8823073367bd4c3750 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 6 Nov 2019 10:02:57 -0800 Subject: [PATCH 010/439] simple_lmk: Increase default minfree value After commit "simple_lmk: Make reclaim deterministic", Simple LMK's behavior changed and thus requires some slight re-tuning to make it work well again. Signed-off-by: Sultan Alsawaf --- drivers/android/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index b572f685374f..f126cf569529 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -80,7 +80,7 @@ config ANDROID_SIMPLE_LMK_AGGRESSION config ANDROID_SIMPLE_LMK_MINFREE int "Minimum MiB of memory to free per reclaim" range 8 512 - default 100 + default 128 help Simple LMK will try to free at least this much memory per reclaim. From 9f01f647ab120383fe0b44c6ab27d996a20b4c17 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 12 Nov 2018 09:38:55 +0100 Subject: [PATCH 011/439] scripts/kconfig/merge_config: don't redefine 'y' to 'm' In today's merge_config.sh the order of the config fragment files dictates the output of a config option. With this approach we will get different .config files depending on the order of the config fragment files. So doing something like: $ ./merge/kconfig/merge_config.sh selftest.config drm.config Where selftest.config defines DRM=y and drm.config defines DRM=m, the result will be "DRM=m". Rework to add a switch to get builtin '=y' precedence over modules '=m', this will result in "DRM=y". If we do something like this: $ ./merge/kconfig/merge_config.sh -y selftest.config drm.config Suggested-by: Arnd Bergmann Signed-off-by: Anders Roxell Signed-off-by: Masahiro Yamada --- scripts/kconfig/merge_config.sh | 37 ++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 0ef906499646..9b89791b202c 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -22,6 +22,7 @@ clean_up() { rm -f $TMP_FILE + rm -f $MERGE_FILE exit } trap clean_up HUP INT TERM @@ -32,6 +33,7 @@ usage() { echo " -m only merge the fragments, do not execute the make command" echo " -n use allnoconfig instead of alldefconfig" echo " -r list redundant entries when merging fragments" + echo " -y make builtin have precedence over modules" echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead." echo echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable." @@ -40,6 +42,7 @@ usage() { RUNMAKE=true ALLTARGET=alldefconfig WARNREDUN=false +BUILTIN=false OUTPUT=. CONFIG_PREFIX=${CONFIG_-CONFIG_} @@ -64,6 +67,11 @@ while true; do shift continue ;; + "-y") + BUILTIN=true + shift + continue + ;; "-O") if [ -d $2 ];then OUTPUT=$(echo $2 | sed 's/\/*$//') @@ -106,32 +114,45 @@ SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p" SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p" TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) +MERGE_FILE=$(mktemp ./.merge_tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" cat $INITFILE > $TMP_FILE # Merge files, printing warnings on overridden values -for MERGE_FILE in $MERGE_LIST ; do - echo "Merging $MERGE_FILE" - if [ ! -r "$MERGE_FILE" ]; then - echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2 +for ORIG_MERGE_FILE in $MERGE_LIST ; do + echo "Merging $ORIG_MERGE_FILE" + if [ ! -r "$ORIG_MERGE_FILE" ]; then + echo "The merge file '$ORIG_MERGE_FILE' does not exist. Exit." >&2 exit 1 fi + cat $ORIG_MERGE_FILE > $MERGE_FILE CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE) for CFG in $CFG_LIST ; do grep -q -w $CFG $TMP_FILE || continue PREV_VAL=$(grep -w $CFG $TMP_FILE) NEW_VAL=$(grep -w $CFG $MERGE_FILE) - if [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then - echo Value of $CFG is redefined by fragment $MERGE_FILE: + BUILTIN_FLAG=false + if [ "$BUILTIN" = "true" ] && [ "${NEW_VAL#CONFIG_*=}" = "m" ] && [ "${PREV_VAL#CONFIG_*=}" = "y" ]; then + echo Previous value: $PREV_VAL + echo New value: $NEW_VAL + echo -y passed, will not demote y to m + echo + BUILTIN_FLAG=true + elif [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then + echo Value of $CFG is redefined by fragment $ORIG_MERGE_FILE: echo Previous value: $PREV_VAL echo New value: $NEW_VAL echo elif [ "$WARNREDUN" = "true" ]; then - echo Value of $CFG is redundant by fragment $MERGE_FILE: + echo Value of $CFG is redundant by fragment $ORIG_MERGE_FILE: + fi + if [ "$BUILTIN_FLAG" = "false" ]; then + sed -i "/$CFG[ =]/d" $TMP_FILE + else + sed -i "/$CFG[ =]/d" $MERGE_FILE fi - sed -i "/$CFG[ =]/d" $TMP_FILE done cat $MERGE_FILE >> $TMP_FILE done From 8a04ab5528b5b194154ae1f37d23d394b4d6e87f Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 20 Jan 2020 16:03:44 -0800 Subject: [PATCH 012/439] simple_lmk: Don't queue up new reclaim requests during reclaim Queuing up reclaim requests while a reclaim is in progress doesn't make sense, since the additional reclaims may not be needed after the existing reclaim completes. This would cause Simple LMK to go berserk during periods of high memory pressure where kswapd would fire off reclaim requests nonstop. Make Simple LMK ignore new reclaim requests until an existing reclaim is finished to prevent a slaughter-fest. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 44f7319defc0..28b808a40a5a 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -245,8 +245,9 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, atomic_add_unless(&needs_reclaim, -1, 0)); + wait_event(oom_waitq, atomic_read_acquire(&needs_reclaim)); scan_and_kill(MIN_FREE_PAGES); + atomic_set_release(&needs_reclaim, 0); } return 0; @@ -254,18 +255,9 @@ static int simple_lmk_reclaim_thread(void *data) void simple_lmk_decide_reclaim(int kswapd_priority) { - if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) { - int v, v1; - - for (v = 0;; v = v1) { - v1 = atomic_cmpxchg(&needs_reclaim, v, v + 1); - if (likely(v1 == v)) { - if (!v) - wake_up(&oom_waitq); - break; - } - } - } + if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION && + !atomic_cmpxchg(&needs_reclaim, 0, 1)) + wake_up(&oom_waitq); } void simple_lmk_mm_freed(struct mm_struct *mm) From 9f4c0761c421b55a1bddba6133a6f6845e4b5667 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 19 Aug 2019 21:06:50 +0100 Subject: [PATCH 013/439] merge_config.sh: Check error codes from make When we execute make after merging the configurations we ignore any errors it produces causing whatever is running merge_config.sh to be unaware of any failures. This issue was noticed by Guillaume Tucker while looking at problems with testing of clang only builds in KernelCI which caused Kbuild to be unable to find a working host compiler. This implementation was suggested by Yamada-san. Suggested-by: Masahiro Yamada Reported-by: Guillaume Tucker Signed-off-by: Mark Brown Signed-off-by: Masahiro Yamada --- scripts/kconfig/merge_config.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 9b89791b202c..784db50a894e 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -20,12 +20,12 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. +set -e + clean_up() { rm -f $TMP_FILE rm -f $MERGE_FILE - exit } -trap clean_up HUP INT TERM usage() { echo "Usage: $0 [OPTIONS] [CONFIG [...]]" @@ -117,6 +117,9 @@ TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) MERGE_FILE=$(mktemp ./.merge_tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" + +trap clean_up EXIT + cat $INITFILE > $TMP_FILE # Merge files, printing warnings on overridden values @@ -162,7 +165,6 @@ if [ "$RUNMAKE" = "false" ]; then echo "#" echo "# merged configuration written to $KCONFIG_CONFIG (needs make)" echo "#" - clean_up exit fi @@ -192,5 +194,3 @@ for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do echo "" fi done - -clean_up From a6a2fe610a7b1d6678448dc39dfd1869e910781d Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 6 Feb 2020 20:57:53 -0800 Subject: [PATCH 014/439] simple_lmk: Update copyright to 2020 Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 2 +- include/linux/simple_lmk.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 28b808a40a5a..2884030276ea 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2019 Sultan Alsawaf . + * Copyright (C) 2019-2020 Sultan Alsawaf . */ #define pr_fmt(fmt) "simple_lmk: " fmt diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h index 46cdb389be51..28103c1b1d4c 100644 --- a/include/linux/simple_lmk.h +++ b/include/linux/simple_lmk.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Sultan Alsawaf . + * Copyright (C) 2019-2020 Sultan Alsawaf . */ #ifndef _SIMPLE_LMK_H_ #define _SIMPLE_LMK_H_ From 27f77511f3aa2c74d03ad9ee022230dbed110cc4 Mon Sep 17 00:00:00 2001 From: Guillaume Tucker Date: Mon, 2 Sep 2019 16:18:36 +0100 Subject: [PATCH 015/439] merge_config.sh: ignore unwanted grep errors The merge_config.sh script verifies that all the config options have their expected value in the resulting file and prints any issues as warnings. These checks aren't intended to be treated as errors given the current implementation. However, since "set -e" was added, if the grep command to look for a config option does not find it the script will then abort prematurely. Handle the case where the grep exit status is non-zero by setting ACTUAL_VAL to an empty string to restore previous functionality. Fixes: cdfca821571d ("merge_config.sh: Check error codes from make") Signed-off-by: Guillaume Tucker Acked-by: Jon Hunter Tested-by: Jon Hunter Signed-off-by: Masahiro Yamada --- scripts/kconfig/merge_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 784db50a894e..20a776cb4dfe 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -186,7 +186,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE) - ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG") + ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG" || true) if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then echo "Value requested for $CFG not in final .config" echo "Requested value: $REQUESTED_VAL" From 0f7200fca9c25528c65d62c050fd6d51a9bea3ee Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 6 Feb 2020 20:59:22 -0800 Subject: [PATCH 016/439] simple_lmk: Remove compat cruft not specific to 4.14 Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 2884030276ea..3816f8bf3946 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -10,26 +10,7 @@ #include #include #include -#include - -/* The sched_param struct is located elsewhere in newer kernels */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #include -#endif - -/* SEND_SIG_FORCED isn't present in newer kernels */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) -#define SIG_INFO_TYPE SEND_SIG_FORCED -#else -#define SIG_INFO_TYPE SEND_SIG_PRIV -#endif - -/* The group argument to do_send_sig_info is different in newer kernels */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) -#define KILL_GROUP_TYPE true -#else -#define KILL_GROUP_TYPE PIDTYPE_TGID -#endif /* The minimum number of pages to free per reclaim */ #define MIN_FREE_PAGES (CONFIG_ANDROID_SIMPLE_LMK_MINFREE * SZ_1M / PAGE_SIZE) @@ -211,7 +192,7 @@ static void scan_and_kill(unsigned long pages_needed) victim->size << (PAGE_SHIFT - 10)); /* Accelerate the victim's death by forcing the kill signal */ - do_send_sig_info(SIGKILL, SIG_INFO_TYPE, vtsk, KILL_GROUP_TYPE); + do_send_sig_info(SIGKILL, SEND_SIG_FORCED, vtsk, true); /* Grab a reference to the victim for later before unlocking */ get_task_struct(vtsk); From 57cb1066be6a416afb27239076359eda72573021 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 6 Feb 2020 21:03:24 -0800 Subject: [PATCH 017/439] simple_lmk: Print a message when there are no processes to kill Makes it clear that Simple LMK tried its best but there was nothing it could do. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 3816f8bf3946..76f40e99b80d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -165,8 +165,10 @@ static void scan_and_kill(unsigned long pages_needed) read_unlock(&tasklist_lock); /* Pretty unlikely but it can happen */ - if (unlikely(!nr_victims)) + if (unlikely(!nr_victims)) { + pr_err("No processes available to kill!\n"); return; + } /* First round of victim processing to weed out unneeded victims */ nr_to_kill = process_victims(nr_victims, pages_needed); From 4c4c1baaa76db6d126201b3be3d66b95f7646110 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Fri, 7 Feb 2020 23:36:58 -0800 Subject: [PATCH 018/439] simple_lmk: Disable OOM killer when Simple LMK is enabled The OOM killer only serves to be a liability when Simple LMK is used. Signed-off-by: Sultan Alsawaf --- mm/oom_kill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index cb2c8f527e67..05375bc8135e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1061,7 +1061,7 @@ bool out_of_memory(struct oom_control *oc) unsigned long freed = 0; enum oom_constraint constraint = CONSTRAINT_NONE; - if (oom_killer_disabled) + if (oom_killer_disabled || IS_ENABLED(CONFIG_ANDROID_SIMPLE_LMK)) return false; if (!is_memcg_oom(oc)) { From 555c6e1ebf33c782327f82ae0a5ce073d6451617 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 8 Feb 2020 17:03:35 -0800 Subject: [PATCH 019/439] simple_lmk: Mark victim thread group with TIF_MEMDIE The OOM killer sets the TIF_MEMDIE thread flag for its victims to alert other kernel code that the current process was killed due to memory pressure, and needs to finish whatever it's doing quickly. In the page allocator this allows victim processes to quickly allocate memory using emergency reserves. This is especially important when memory pressure is high; if all processes are taking a while to allocate memory, then our victim processes will face the same problem and can potentially get stuck in the page allocator for a while rather than die expeditiously. To ensure that victim processes die quickly, set TIF_MEMDIE for the entire victim thread group. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 8 +++++++- kernel/exit.c | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 76f40e99b80d..77172da82701 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -187,7 +187,7 @@ static void scan_and_kill(unsigned long pages_needed) atomic_set_release(&victims_to_kill, nr_to_kill); for (i = 0; i < nr_to_kill; i++) { struct victim_info *victim = &victims[i]; - struct task_struct *vtsk = victim->tsk; + struct task_struct *t, *vtsk = victim->tsk; pr_info("Killing %s with adj %d to free %lu KiB\n", vtsk->comm, vtsk->signal->oom_score_adj, @@ -196,6 +196,12 @@ static void scan_and_kill(unsigned long pages_needed) /* Accelerate the victim's death by forcing the kill signal */ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, vtsk, true); + /* Mark the thread group dead so that other kernel code knows */ + rcu_read_lock(); + for_each_thread(vtsk, t) + set_tsk_thread_flag(t, TIF_MEMDIE); + rcu_read_unlock(); + /* Grab a reference to the victim for later before unlocking */ get_task_struct(vtsk); task_unlock(vtsk); diff --git a/kernel/exit.c b/kernel/exit.c index 8ebc7dfc244a..bc46817e96c0 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -549,8 +549,12 @@ static void exit_mm(void) task_unlock(current); mm_update_next_owner(mm); mmput(mm); +#ifdef CONFIG_ANDROID_SIMPLE_LMK + clear_thread_flag(TIF_MEMDIE); +#else if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim(); +#endif } static struct task_struct *find_alive_thread(struct task_struct *p) From dd2c2e1508d624aac644dc12ff8b99c2babcc6ab Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sun, 9 Feb 2020 16:24:29 -0800 Subject: [PATCH 020/439] simple_lmk: Report mm as freed as soon as exit_mmap() finishes exit_mmap() is responsible for freeing the vast majority of an mm's memory; in order to unblock Simple LMK faster, report an mm as freed as soon as exit_mmap() finishes. Signed-off-by: Sultan Alsawaf --- kernel/fork.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index 11a1af672858..0e06ac6c9c39 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -933,6 +933,7 @@ static inline void __mmput(struct mm_struct *mm) ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); + simple_lmk_mm_freed(mm); mm_put_huge_zero_page(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { @@ -942,7 +943,6 @@ static inline void __mmput(struct mm_struct *mm) } if (mm->binfmt) module_put(mm->binfmt->module); - simple_lmk_mm_freed(mm); mmdrop(mm); } From a748debfe80178c43d078e4782bcbfb50809f6ae Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 8 Feb 2020 00:00:48 -0800 Subject: [PATCH 021/439] simple_lmk: Simplify tricks used to speed up the death process set_user_nice() doesn't schedule, and although set_cpus_allowed_ptr() can schedule, it will only do so when the specified task cannot run on the new set of allowed CPUs. Since cpu_all_mask is used, set_cpus_allowed_ptr() will never schedule. Therefore, both the priority elevation and cpus_allowed change can be moved to inside the task lock to simplify and speed things up. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 77172da82701..224299997dd4 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -202,23 +202,14 @@ static void scan_and_kill(unsigned long pages_needed) set_tsk_thread_flag(t, TIF_MEMDIE); rcu_read_unlock(); - /* Grab a reference to the victim for later before unlocking */ - get_task_struct(vtsk); - task_unlock(vtsk); - } - - /* Try to speed up the death process now that we can schedule again */ - for (i = 0; i < nr_to_kill; i++) { - struct task_struct *vtsk = victims[i].tsk; - /* Increase the victim's priority to make it die faster */ set_user_nice(vtsk, MIN_NICE); - /* Allow the victim to run on any CPU */ + /* Allow the victim to run on any CPU. This won't schedule. */ set_cpus_allowed_ptr(vtsk, cpu_all_mask); - /* Finally release the victim reference acquired earlier */ - put_task_struct(vtsk); + /* Finally release the victim's task lock acquired earlier */ + task_unlock(vtsk); } /* Wait until all the victims die */ From e46a701b542a80e8f519021370edc2671cc28a58 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 8 Feb 2020 03:21:01 -0800 Subject: [PATCH 022/439] simple_lmk: Ignore tasks that won't free memory Dying processes aren't going to help free memory, so ignore them. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 224299997dd4..77695a763a05 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -77,18 +77,22 @@ static unsigned long find_victims(int *vindex, short target_adj) struct task_struct *tsk; for_each_process(tsk) { + struct signal_struct *sig; struct task_struct *vtsk; /* - * Search for tasks with the targeted importance (adj). Since - * only tasks with a positive adj can be targeted, that + * Search for suitable tasks with the targeted importance (adj). + * Since only tasks with a positive adj can be targeted, that * naturally excludes tasks which shouldn't be killed, like init * and kthreads. Although oom_score_adj can still be changed * while this code runs, it doesn't really matter. We just need * to make sure that if the adj changes, we won't deadlock * trying to lock a task that we locked earlier. */ - if (READ_ONCE(tsk->signal->oom_score_adj) != target_adj || + sig = tsk->signal; + if (READ_ONCE(sig->oom_score_adj) != target_adj || + sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP) || + (thread_group_empty(tsk) && tsk->flags & PF_EXITING) || vtsk_is_duplicate(*vindex, tsk)) continue; From eab51fd9f73901a14c29a4766d1de4b140e33741 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 8 Feb 2020 03:22:44 -0800 Subject: [PATCH 023/439] simple_lmk: Add a timeout to stop waiting for victims to die Simple LMK tries to wait until all of the victims it kills have their memory freed; however, sometimes victims can take a while to die, which can block Simple LMK from killing more processes in time when needed. After the specified timeout elapses, Simple LMK will stop waiting and make itself available to kill more processes. Signed-off-by: Sultan Alsawaf --- drivers/android/Kconfig | 11 ++++++++++ drivers/android/simple_lmk.c | 42 +++++++++++++++++++++++++----------- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index f126cf569529..7f65391e5e73 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -84,6 +84,17 @@ config ANDROID_SIMPLE_LMK_MINFREE help Simple LMK will try to free at least this much memory per reclaim. +config ANDROID_SIMPLE_LMK_TIMEOUT_MSEC + int "Reclaim timeout in milliseconds" + range 50 1000 + default 200 + help + Simple LMK tries to wait until all of the victims it kills have their + memory freed; however, sometimes victims can take a while to die, + which can block Simple LMK from killing more processes in time when + needed. After the specified timeout elapses, Simple LMK will stop + waiting and make itself available to kill more processes. + endif endif # if ANDROID diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 77695a763a05..21895409ff8f 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -18,6 +18,9 @@ /* Kill up to this many victims per reclaim */ #define MAX_VICTIMS 1024 +/* Timeout in jiffies for each reclaim */ +#define RECLAIM_EXPIRES msecs_to_jiffies(CONFIG_ANDROID_SIMPLE_LMK_TIMEOUT_MSEC) + struct victim_info { struct task_struct *tsk; struct mm_struct *mm; @@ -47,8 +50,10 @@ static const short adj_prio[] = { static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); -static atomic_t victims_to_kill = ATOMIC_INIT(0); +static DEFINE_RWLOCK(mm_free_lock); +static int victims_to_kill; static atomic_t needs_reclaim = ATOMIC_INIT(0); +static atomic_t nr_killed = ATOMIC_INIT(0); static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) { @@ -152,7 +157,7 @@ static int process_victims(int vlen, unsigned long pages_needed) static void scan_and_kill(unsigned long pages_needed) { - int i, nr_to_kill = 0, nr_victims = 0; + int i, nr_to_kill = 0, nr_victims = 0, ret; unsigned long pages_found = 0; /* @@ -187,8 +192,12 @@ static void scan_and_kill(unsigned long pages_needed) /* Second round of victim processing to finally select the victims */ nr_to_kill = process_victims(nr_to_kill, pages_needed); + /* Store the final number of victims for simple_lmk_mm_freed() */ + write_lock(&mm_free_lock); + victims_to_kill = nr_to_kill; + write_unlock(&mm_free_lock); + /* Kill the victims */ - atomic_set_release(&victims_to_kill, nr_to_kill); for (i = 0; i < nr_to_kill; i++) { struct victim_info *victim = &victims[i]; struct task_struct *t, *vtsk = victim->tsk; @@ -216,8 +225,18 @@ static void scan_and_kill(unsigned long pages_needed) task_unlock(vtsk); } - /* Wait until all the victims die */ - wait_for_completion(&reclaim_done); + /* Wait until all the victims die or until the timeout is reached */ + ret = wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); + write_lock(&mm_free_lock); + if (!ret) { + /* Extra clean-up is needed when the timeout is hit */ + reinit_completion(&reclaim_done); + for (i = 0; i < nr_to_kill; i++) + victims[i].mm = NULL; + } + victims_to_kill = 0; + nr_killed = (atomic_t)ATOMIC_INIT(0); + write_unlock(&mm_free_lock); } static int simple_lmk_reclaim_thread(void *data) @@ -246,20 +265,17 @@ void simple_lmk_decide_reclaim(int kswapd_priority) void simple_lmk_mm_freed(struct mm_struct *mm) { - static atomic_t nr_killed = ATOMIC_INIT(0); - int i, nr_to_kill; + int i; - nr_to_kill = atomic_read_acquire(&victims_to_kill); - for (i = 0; i < nr_to_kill; i++) { + read_lock(&mm_free_lock); + for (i = 0; i < victims_to_kill; i++) { if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { - if (atomic_inc_return(&nr_killed) == nr_to_kill) { - atomic_set(&victims_to_kill, 0); - nr_killed = (atomic_t)ATOMIC_INIT(0); + if (atomic_inc_return(&nr_killed) == victims_to_kill) complete(&reclaim_done); - } break; } } + read_unlock(&mm_free_lock); } /* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ From 5c6877dd0fd789fdef095868b95ab8d76d1d771f Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 8 Feb 2020 14:26:31 -0800 Subject: [PATCH 024/439] simple_lmk: Place victims onto SCHED_RR Just increasing the victim's priority to the maximum niceness isn't enough to make it totally preempt everything in SCHED_FAIR, which is important to make sure victims die quickly. Resource-wise, this isn't very burdensome since the RT priority is just set to zero, and because dying victims don't have much to do: they only need to finish whatever they're doing quickly. SCHED_RR is used over SCHED_FIFO so that CPU time between the victims is divided evenly to help them all finish at around the same time, as fast as possible. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 21895409ff8f..ce2d1872c2d3 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -199,6 +199,7 @@ static void scan_and_kill(unsigned long pages_needed) /* Kill the victims */ for (i = 0; i < nr_to_kill; i++) { + static const struct sched_param sched_zero_prio; struct victim_info *victim = &victims[i]; struct task_struct *t, *vtsk = victim->tsk; @@ -215,8 +216,8 @@ static void scan_and_kill(unsigned long pages_needed) set_tsk_thread_flag(t, TIF_MEMDIE); rcu_read_unlock(); - /* Increase the victim's priority to make it die faster */ - set_user_nice(vtsk, MIN_NICE); + /* Elevate the victim to SCHED_RR with zero RT priority */ + sched_setscheduler_nocheck(vtsk, SCHED_RR, &sched_zero_prio); /* Allow the victim to run on any CPU. This won't schedule. */ set_cpus_allowed_ptr(vtsk, cpu_all_mask); From 34b03051c7d241a93d70374ce010e539269a02b9 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Tue, 18 Feb 2020 22:37:48 -0800 Subject: [PATCH 025/439] simple_lmk: Relax memory barriers and clean up some styling wake_up() executes a full memory barrier when waking a process up, so there's no need for the acquire in the wait event. Additionally, because of this, the atomic_cmpxchg() only needs a read barrier. The cmpxchg() in simple_lmk_mm_freed() is atomic when it doesn't need to be, so replace it with an extra line of code. The atomic_inc_return() in simple_lmk_mm_freed() lies within a lock, so it doesn't need explicit memory barriers. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index ce2d1872c2d3..3372fe21962d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -145,11 +145,10 @@ static int process_victims(int vlen, unsigned long pages_needed) /* The victim's mm lock is taken in find_victims; release it */ if (pages_found >= pages_needed) { task_unlock(vtsk); - continue; + } else { + pages_found += victim->size; + nr_to_kill++; } - - pages_found += victim->size; - nr_to_kill++; } return nr_to_kill; @@ -249,7 +248,7 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, atomic_read_acquire(&needs_reclaim)); + wait_event(oom_waitq, atomic_read(&needs_reclaim)); scan_and_kill(MIN_FREE_PAGES); atomic_set_release(&needs_reclaim, 0); } @@ -260,7 +259,7 @@ static int simple_lmk_reclaim_thread(void *data) void simple_lmk_decide_reclaim(int kswapd_priority) { if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION && - !atomic_cmpxchg(&needs_reclaim, 0, 1)) + !atomic_cmpxchg_acquire(&needs_reclaim, 0, 1)) wake_up(&oom_waitq); } @@ -270,11 +269,13 @@ void simple_lmk_mm_freed(struct mm_struct *mm) read_lock(&mm_free_lock); for (i = 0; i < victims_to_kill; i++) { - if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { - if (atomic_inc_return(&nr_killed) == victims_to_kill) - complete(&reclaim_done); - break; - } + if (victims[i].mm != mm) + continue; + + victims[i].mm = NULL; + if (atomic_inc_return_relaxed(&nr_killed) == victims_to_kill) + complete(&reclaim_done); + break; } read_unlock(&mm_free_lock); } From 60d0969f243e8730a102d285f24cc4e202806701 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Tue, 18 Feb 2020 22:39:41 -0800 Subject: [PATCH 026/439] simple_lmk: Include swap memory usage in the size of victims Swap memory usage is important when determining what to kill, so include it in the victim size calculation. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 3372fe21962d..215ee674d82d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -75,6 +75,17 @@ static bool vtsk_is_duplicate(int vlen, struct task_struct *vtsk) return false; } +static unsigned long get_total_mm_pages(struct mm_struct *mm) +{ + unsigned long pages = 0; + int i; + + for (i = 0; i < NR_MM_COUNTERS; i++) + pages += get_mm_counter(mm, i); + + return pages; +} + static unsigned long find_victims(int *vindex, short target_adj) { unsigned long pages_found = 0; @@ -108,7 +119,7 @@ static unsigned long find_victims(int *vindex, short target_adj) /* Store this potential victim away for later */ victims[*vindex].tsk = vtsk; victims[*vindex].mm = vtsk->mm; - victims[*vindex].size = get_mm_rss(vtsk->mm); + victims[*vindex].size = get_total_mm_pages(vtsk->mm); /* Keep track of the number of pages that have been found */ pages_found += victims[*vindex].size; From b27d5504c8e15c8588dc23d2d913f60b22ef8dd2 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 19 Feb 2020 14:47:13 -0800 Subject: [PATCH 027/439] mm: Stop kswapd early when nothing's waiting for it to free pages Keeping kswapd running when all the failed allocations that invoked it are satisfied incurs a high overhead due to unnecessary page eviction and writeback, as well as spurious VM pressure events to various registered shrinkers. When kswapd doesn't need to work to make an allocation succeed anymore, stop it prematurely to save resources. Signed-off-by: Sultan Alsawaf --- include/linux/mmzone.h | 1 + mm/page_alloc.c | 17 ++++++++++++++--- mm/vmscan.c | 3 ++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cecf0a58cf66..619ccfdf47d7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -656,6 +656,7 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; + atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5de05dc023a5..1018bc303bdf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3913,6 +3913,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int reserve_flags; unsigned long pages_reclaimed = 0; int retry_loop_count = 0; + pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; + bool woke_kswapd = false; unsigned long jiffies_s = jiffies; u64 utime, stime_s, stime_e, stime_d; @@ -3950,8 +3952,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (!ac->preferred_zoneref->zone) goto nopage; - if (gfp_mask & __GFP_KSWAPD_RECLAIM) + if (gfp_mask & __GFP_KSWAPD_RECLAIM) { + if (!woke_kswapd) { + atomic_inc(&pgdat->kswapd_waiters); + woke_kswapd = true; + } wake_all_kswapds(order, ac); + } /* * The adjusted alloc_flags might result in immediate success, so try @@ -4147,9 +4154,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto retry; } fail: - warn_alloc(gfp_mask, ac->nodemask, - "page allocation failure: order:%u", order); got_pg: + if (woke_kswapd) + atomic_dec(&pgdat->kswapd_waiters); + if (!page) + warn_alloc(gfp_mask, ac->nodemask, + "page allocation failure: order:%u", order); task_cputime(current, &utime, &stime_e); stime_d = stime_e - stime_s; if (stime_d / NSEC_PER_MSEC > 256) { @@ -6119,6 +6129,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); spin_lock_init(&pgdat->lru_lock); lruvec_init(node_lruvec(pgdat)); + pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); pgdat->per_cpu_nodestats = &boot_nodestats; diff --git a/mm/vmscan.c b/mm/vmscan.c index f8e0ea574421..3d2ee7d76f90 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3728,7 +3728,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) wake_up_all(&pgdat->pfmemalloc_wait); /* Check if kswapd should be suspending */ - if (try_to_freeze() || kthread_should_stop()) + if (try_to_freeze() || kthread_should_stop() || + !atomic_read(&pgdat->kswapd_waiters)) break; /* From 737f9611d2631644385fe79c398bad0396ef19b6 Mon Sep 17 00:00:00 2001 From: David Ng Date: Mon, 26 Mar 2018 12:46:49 -0700 Subject: [PATCH 028/439] mm, vmpressure: int cast vmpressure level/model for -1 comparison Resolve -Wenum-compare issue when comparing vmpressure level/model against -1 (invalid state). Change-Id: I1c76667ee8390e2d396c96e5ed73f30d0700ffa8 Signed-off-by: David Ng --- mm/vmpressure.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index f5ae7ed17a5d..5a881c46ac4e 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -401,7 +401,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg, /* Find required level */ token = strsep(&spec, ","); level = str_to_level(token); - if (level == -1) { + if ((int)level == -1) { ret = -EINVAL; goto out; } @@ -410,7 +410,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg, token = strsep(&spec, ","); if (token) { mode = str_to_mode(token); - if (mode == -1) { + if ((int)mode == -1) { ret = -EINVAL; goto out; } From 19acd3ff38c94fb61db4aca9b48ad927b07916c8 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Wed, 4 Mar 2015 16:38:28 +0530 Subject: [PATCH 029/439] mm: vmpressure: allow in-kernel clients to subscribe for events Currently, vmpressure is tied to memcg and its events are available only to userspace clients. This patch removes the dependency on CONFIG_MEMCG and adds a mechanism for in-kernel clients to subscribe for vmpressure events (in fact raw vmpressure values are delivered instead of vmpressure levels, to provide clients more flexibility to take actions on custom pressure levels which are not currently defined by vmpressure module). Change-Id: I38010f166546e8d7f12f5f355b5dbfd6ba04d587 Signed-off-by: Vinayak Menon --- include/linux/vmpressure.h | 12 ++-- mm/Makefile | 4 +- mm/vmpressure.c | 138 ++++++++++++++++++++++++++++++------- 3 files changed, 121 insertions(+), 33 deletions(-) diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index ed471d595891..5719c9437458 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -30,11 +30,13 @@ struct vmpressure { struct mem_cgroup; -#ifdef CONFIG_MEMCG +extern int vmpressure_notifier_register(struct notifier_block *nb); +extern int vmpressure_notifier_unregister(struct notifier_block *nb); extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); +#ifdef CONFIG_MEMCG extern void vmpressure_init(struct vmpressure *vmpr); extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); @@ -45,9 +47,9 @@ extern int vmpressure_register_event(struct mem_cgroup *memcg, extern void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); #else -static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) {} -static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, - int prio) {} +static inline struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) +{ + return NULL; +} #endif /* CONFIG_MEMCG */ #endif /* __LINUX_VMPRESSURE_H */ diff --git a/mm/Makefile b/mm/Makefile index 28644484d033..1b890be7d4f1 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o vmacache.o swap_slots.o \ interval_tree.o list_lru.o workingset.o \ - debug.o $(mmu-y) showmem_extra.o + debug.o $(mmu-y) showmem_extra.o vmpressure.o obj-y += init-mm.o @@ -80,7 +80,7 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 5a881c46ac4e..ee6515e9a4e5 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include /* @@ -49,6 +51,24 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; static const unsigned int vmpressure_level_med = CONFIG_VMPRESSURE_LEVEL_MED; static const unsigned int vmpressure_level_critical = 95; +static struct vmpressure global_vmpressure; +static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); + +int vmpressure_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmpressure_notifier, nb); +} + +int vmpressure_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&vmpressure_notifier, nb); +} + +static void vmpressure_notify(unsigned long pressure) +{ + blocking_notifier_call_chain(&vmpressure_notifier, pressure, NULL); +} + /* * When there are too little pages left to scan, vmpressure() may miss the * critical pressure as number of pages will be less than "window size". @@ -75,6 +95,7 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work) return container_of(work, struct vmpressure, work); } +#ifdef CONFIG_MEMCG static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) { struct cgroup_subsys_state *css = vmpressure_to_css(vmpr); @@ -85,6 +106,12 @@ static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) return NULL; return memcg_to_vmpressure(memcg); } +#else +static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) +{ + return NULL; +} +#endif enum vmpressure_levels { VMPRESSURE_LOW = 0, @@ -121,7 +148,7 @@ static enum vmpressure_levels vmpressure_level(unsigned long pressure) return VMPRESSURE_LOW; } -static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, +static unsigned long vmpressure_calc_pressure(unsigned long scanned, unsigned long reclaimed, struct vmpressure *vmpr) { unsigned long scale = scanned + reclaimed; @@ -149,7 +176,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, scanned, reclaimed); - return vmpressure_level(pressure); + return pressure; } struct vmpressure_event { @@ -187,6 +214,7 @@ static void vmpressure_work_fn(struct work_struct *work) struct vmpressure *vmpr = work_to_vmpressure(work); unsigned long scanned; unsigned long reclaimed; + unsigned long pressure; enum vmpressure_levels level; bool ancestor = false; bool signalled = false; @@ -211,7 +239,8 @@ static void vmpressure_work_fn(struct work_struct *work) vmpr->tree_reclaimed = 0; spin_unlock(&vmpr->sr_lock); - level = vmpressure_calc_level(scanned, reclaimed, vmpr); + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + level = vmpressure_level(pressure); do { if (vmpressure_event(vmpr, level, ancestor, signalled)) @@ -220,28 +249,8 @@ static void vmpressure_work_fn(struct work_struct *work) } while ((vmpr = vmpressure_parent(vmpr))); } -/** - * vmpressure() - Account memory pressure through scanned/reclaimed ratio - * @gfp: reclaimer's gfp mask - * @memcg: cgroup memory controller handle - * @tree: legacy subtree mode - * @scanned: number of pages scanned - * @reclaimed: number of pages reclaimed - * - * This function should be called from the vmscan reclaim path to account - * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw - * pressure index is then further refined and averaged over time. - * - * If @tree is set, vmpressure is in traditional userspace reporting - * mode: @memcg is considered the pressure root and userspace is - * notified of the entire subtree's reclaim efficiency. - * - * If @tree is not set, reclaim efficiency is recorded for @memcg, and - * only in-kernel users are notified. - * - * This function does not return any value. - */ -void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, +#ifdef CONFIG_MEMCG +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); @@ -282,6 +291,7 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, schedule_work(&vmpr->work); } else { enum vmpressure_levels level; + unsigned long pressure; /* For now, no users for root-level efficiency */ if (!memcg || memcg == root_mem_cgroup) @@ -297,7 +307,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, vmpr->scanned = vmpr->reclaimed = 0; spin_unlock(&vmpr->sr_lock); - level = vmpressure_calc_level(scanned, reclaimed, vmpr); + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + level = vmpressure_level(pressure); if (level > VMPRESSURE_LOW) { /* @@ -312,6 +323,74 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } } } +#else +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed) +{ +} +#endif + +static void vmpressure_global(gfp_t gfp, unsigned long scanned, + unsigned long reclaimed) +{ + struct vmpressure *vmpr = &global_vmpressure; + unsigned long pressure; + + if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) + return; + + if (!scanned) + return; + + spin_lock(&vmpr->sr_lock); + vmpr->scanned += scanned; + vmpr->reclaimed += reclaimed; + scanned = vmpr->scanned; + reclaimed = vmpr->reclaimed; + spin_unlock(&vmpr->sr_lock); + + if (scanned < vmpressure_win) + return; + + spin_lock(&vmpr->sr_lock); + vmpr->scanned = 0; + vmpr->reclaimed = 0; + spin_unlock(&vmpr->sr_lock); + + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + vmpressure_notify(pressure); +} + +/** + * vmpressure() - Account memory pressure through scanned/reclaimed ratio + * @gfp: reclaimer's gfp mask + * @memcg: cgroup memory controller handle + * @tree: legacy subtree mode + * @scanned: number of pages scanned + * @reclaimed: number of pages reclaimed + * + * This function should be called from the vmscan reclaim path to account + * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw + * pressure index is then further refined and averaged over time. + * + * If @tree is set, vmpressure is in traditional userspace reporting + * mode: @memcg is considered the pressure root and userspace is + * notified of the entire subtree's reclaim efficiency. + * + * If @tree is not set, reclaim efficiency is recorded for @memcg, and + * only in-kernel users are notified. + * + * This function does not return any value. + */ +void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed) +{ + if (!memcg && tree) + vmpressure_global(gfp, scanned, reclaimed); + + if (IS_ENABLED(CONFIG_MEMCG)) + vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed); +} /** * vmpressure_prio() - Account memory pressure through reclaimer priority level @@ -492,3 +571,10 @@ void vmpressure_cleanup(struct vmpressure *vmpr) */ flush_work(&vmpr->work); } + +static int vmpressure_global_init(void) +{ + vmpressure_init(&global_vmpressure); + return 0; +} +late_initcall(vmpressure_global_init); From 9701af1254a8395a028894d922dc4bd993867ec8 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Tue, 31 Mar 2015 11:06:29 +0530 Subject: [PATCH 030/439] mm: vmpressure: scale pressure based on reclaim context The existing calculation of vmpressure takes into account only the ratio of reclaimed to scanned pages, but not the time spent or the difficulty in reclaiming those pages. For e.g. when there are quite a number of file pages in the system, an allocation request can be satisfied by reclaiming the file pages alone. If such a reclaim is successful, the vmpressure value will remain low irrespective of the time spent by the reclaim code to free up the file pages. With a feature like lowmemorykiller, killing a task can be faster than reclaiming the file pages alone. So if the vmpressure values reflect the reclaim difficulty level, clients can make a decision based on that, for e.g. to kill a task early. This patch monitors the number of pages scanned in the direct reclaim path and scales the vmpressure level according to that. Signed-off-by: Vinayak Menon Change-Id: I6e643d29a9a1aa0814309253a8b690ad86ec0b13 --- include/linux/vmpressure.h | 1 + mm/vmpressure.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 5719c9437458..8f33ef96dd5d 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -17,6 +17,7 @@ struct vmpressure { unsigned long tree_scanned; unsigned long tree_reclaimed; + unsigned long stall; /* The lock is used to keep the scanned/reclaimed above in sync. */ struct spinlock sr_lock; diff --git a/mm/vmpressure.c b/mm/vmpressure.c index ee6515e9a4e5..1d085bb6c2a7 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -51,6 +52,10 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; static const unsigned int vmpressure_level_med = CONFIG_VMPRESSURE_LEVEL_MED; static const unsigned int vmpressure_level_critical = 95; +static unsigned long vmpressure_scale_max = 100; +module_param_named(vmpressure_scale_max, vmpressure_scale_max, + ulong, 0644); + static struct vmpressure global_vmpressure; static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); @@ -179,6 +184,15 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned, return pressure; } +static unsigned long vmpressure_account_stall(unsigned long pressure, + unsigned long stall, unsigned long scanned) +{ + unsigned long scale = + ((vmpressure_scale_max - pressure) * stall) / scanned; + + return pressure + scale; +} + struct vmpressure_event { struct eventfd_ctx *efd; enum vmpressure_levels level; @@ -335,6 +349,7 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, { struct vmpressure *vmpr = &global_vmpressure; unsigned long pressure; + unsigned long stall; if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) return; @@ -345,6 +360,11 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, spin_lock(&vmpr->sr_lock); vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; + + if (!current_is_kswapd()) + vmpr->stall += scanned; + + stall = vmpr->stall; scanned = vmpr->scanned; reclaimed = vmpr->reclaimed; spin_unlock(&vmpr->sr_lock); @@ -355,9 +375,11 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, spin_lock(&vmpr->sr_lock); vmpr->scanned = 0; vmpr->reclaimed = 0; + vmpr->stall = 0; spin_unlock(&vmpr->sr_lock); pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + pressure = vmpressure_account_stall(pressure, stall, scanned); vmpressure_notify(pressure); } From c50c2cb2823c42d8ddece05d4e91af22377099fd Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Wed, 19 Aug 2015 16:16:39 +0530 Subject: [PATCH 031/439] mm: vmpressure: account allocstalls only on higher pressures At present any vmpressure value is scaled up if the pages are reclaimed through direct reclaim. This can result in false vmpressure values. Consider a case where a device is booted up and most of the memory is occuppied by file pages. kswapd will make sure that high watermark is maintained. Now when a sudden huge allocation request comes in, the system will definitely have to get into direct reclaims. The vmpressures can be very low, but because of allocstall accounting logic even these low values will be scaled to values nearing 100. This can result in unnecessary LMK kills for example. So define a tunable threshold for vmpressure above which the allocstalls will be accounted. Change-Id: Idd7c6724264ac89f1f68f2e9d70a32390ffca3e5 Signed-off-by: Vinayak Menon --- mm/vmpressure.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 1d085bb6c2a7..36fd0b53aa1b 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -56,6 +56,11 @@ static unsigned long vmpressure_scale_max = 100; module_param_named(vmpressure_scale_max, vmpressure_scale_max, ulong, 0644); +/* vmpressure values >= this will be scaled based on allocstalls */ +static unsigned long allocstall_threshold = 70; +module_param_named(allocstall_threshold, allocstall_threshold, + ulong, 0644); + static struct vmpressure global_vmpressure; static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); @@ -187,8 +192,12 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned, static unsigned long vmpressure_account_stall(unsigned long pressure, unsigned long stall, unsigned long scanned) { - unsigned long scale = - ((vmpressure_scale_max - pressure) * stall) / scanned; + unsigned long scale; + + if (pressure < allocstall_threshold) + return pressure; + + scale = ((vmpressure_scale_max - pressure) * stall) / scanned; return pressure + scale; } From 765e6092df592ede3882126033baa23e30fbc2b7 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Mon, 19 Sep 2016 12:44:15 +0530 Subject: [PATCH 032/439] mm: vmpressure: make vmpressure window variable Right now the vmpressure window is of constant size 2MB, which works well with the following exceptions. 1) False vmpressure triggers are seen when the RAM size is greater than 3GB. This results in lowmemorykiller, which uses vmpressure events, killing tasks unnecessarily. 2) Vmpressure events are received late under memory pressure. This behaviour is seen prominently in <=2GB RAM targets. This results in lowmemorykiller kicking in late to kill tasks resulting in avoidable page cache reclaim. The problem analysis shows that the issue is with the constant size of the vmpressure window which does not adapt to the varying memory conditions. This patch recalculates the vmpressure window size at the end of each window. The chosen window size is proportional to the total of free and cached memory at that point. Change-Id: I7e9ef4ddd82e2c2dd04ce09ec8d58a8829cfb64d Signed-off-by: Vinayak Menon --- mm/vmpressure.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 36fd0b53aa1b..16ed04650270 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -41,7 +41,7 @@ * TODO: Make the window size depend on machine size, as we do for vmstat * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). */ -static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; +static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; /* * These thresholds are used when we account memory pressure through @@ -353,6 +353,29 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } #endif +static void calculate_vmpressure_win(void) +{ + long x; + + x = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages() + + global_zone_page_state(NR_FREE_PAGES); + if (x < 1) + x = 1; + /* + * For low (free + cached), vmpressure window should be + * small, and high for higher values of (free + cached). + * But it should not be linear as well. This ensures + * timely vmpressure notifications when system is under + * memory pressure, and optimal number of events when + * cached is high. The sqaure root function is empirically + * found to serve the purpose. + */ + x = int_sqrt(x); + vmpressure_win = x; +} + static void vmpressure_global(gfp_t gfp, unsigned long scanned, unsigned long reclaimed) { @@ -367,6 +390,9 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, return; spin_lock(&vmpr->sr_lock); + if (!vmpr->scanned) + calculate_vmpressure_win(); + vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; From f1c0630205bf19d27e22eb1899c8f8b642cb04a9 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 24 Feb 2020 19:03:04 -0800 Subject: [PATCH 033/439] simple_lmk: Use vmpressure notifier to trigger kills Using kswapd's scan depth to trigger task kills is inconsistent and unreliable. When memory pressure quickly spikes, the kswapd scan depth trigger fails to kick off Simple LMK fast enough, causing severe lag. Additionally, kswapd could stop scanning prematurely before reaching the desired scan depth to trigger Simple LMK, which could also cause stalls. To remedy this, use the vmpressure framework instead, since it provides more consistent and accurate readings on memory pressure. This is not very tunable though, so remove CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION. Triggering Simple LMK to kill when the reported memory pressure is 100 should yield good results on all setups. Signed-off-by: Sultan Alsawaf --- drivers/android/Kconfig | 22 ---------------------- drivers/android/simple_lmk.c | 24 +++++++++++++++++------- include/linux/simple_lmk.h | 4 ---- mm/vmscan.c | 2 -- 4 files changed, 17 insertions(+), 35 deletions(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 7f65391e5e73..6a87d1298c5e 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -55,28 +55,6 @@ config ANDROID_SIMPLE_LMK if ANDROID_SIMPLE_LMK -config ANDROID_SIMPLE_LMK_AGGRESSION - int "Reclaim frequency selection" - range 1 3 - default 1 - help - This value determines how frequently Simple LMK will perform memory - reclaims. A lower value corresponds to less frequent reclaims, which - maximizes memory usage. The range of values has a logarithmic - correlation; 2 is twice as aggressive as 1, and 3 is twice as - aggressive as 2, which makes 3 four times as aggressive as 1. - - The aggression is set as a factor of kswapd's scan depth. This means - that a system with more memory will have a more expensive aggression - factor compared to a system with less memory. For example, setting an - aggression factor of 1 with 4 GiB of memory would be like setting a - factor of 2 with 8 GiB of memory; the more memory a system has, the - more expensive it is to use a lower value. - - Choosing a value of 1 here works well with systems that have 4 GiB of - memory. If the default doesn't work well, then this value should be - tweaked based on empirical results using different values. - config ANDROID_SIMPLE_LMK_MINFREE int "Minimum MiB of memory to free per reclaim" range 8 512 diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 215ee674d82d..2a3316100c79 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -10,6 +10,7 @@ #include #include #include +#include #include /* The minimum number of pages to free per reclaim */ @@ -267,13 +268,6 @@ static int simple_lmk_reclaim_thread(void *data) return 0; } -void simple_lmk_decide_reclaim(int kswapd_priority) -{ - if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION && - !atomic_cmpxchg_acquire(&needs_reclaim, 0, 1)) - wake_up(&oom_waitq); -} - void simple_lmk_mm_freed(struct mm_struct *mm) { int i; @@ -291,6 +285,20 @@ void simple_lmk_mm_freed(struct mm_struct *mm) read_unlock(&mm_free_lock); } +static int simple_lmk_vmpressure_cb(struct notifier_block *nb, + unsigned long pressure, void *data) +{ + if (pressure == 100 && !atomic_cmpxchg_acquire(&needs_reclaim, 0, 1)) + wake_up(&oom_waitq); + + return NOTIFY_OK; +} + +static struct notifier_block vmpressure_notif = { + .notifier_call = simple_lmk_vmpressure_cb, + .priority = INT_MAX +}; + /* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) { @@ -301,7 +309,9 @@ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); BUG_ON(IS_ERR(thread)); + BUG_ON(vmpressure_notifier_register(&vmpressure_notif)); } + return 0; } diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h index 28103c1b1d4c..b02d1bec9731 100644 --- a/include/linux/simple_lmk.h +++ b/include/linux/simple_lmk.h @@ -8,12 +8,8 @@ struct mm_struct; #ifdef CONFIG_ANDROID_SIMPLE_LMK -void simple_lmk_decide_reclaim(int kswapd_priority); void simple_lmk_mm_freed(struct mm_struct *mm); #else -static inline void simple_lmk_decide_reclaim(int kswapd_priority) -{ -} static inline void simple_lmk_mm_freed(struct mm_struct *mm) { } diff --git a/mm/vmscan.c b/mm/vmscan.c index 3d2ee7d76f90..d1cb6dfa3800 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include @@ -3656,7 +3655,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) unsigned long nr_reclaimed = sc.nr_reclaimed; bool raise_priority = true; - simple_lmk_decide_reclaim(sc.priority); sc.reclaim_idx = classzone_idx; /* From e75687a445db6c573daa3f383c8eb6bc85984493 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 26 Feb 2020 10:14:18 -0800 Subject: [PATCH 034/439] simple_lmk: Update adj targeting for Android 10 Android 10 changed its adj assignments. Update Simple LMK to use the new adjs, which also requires looking at each pair of adjs as a range. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 43 ++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 2a3316100c79..f502eb5da8d7 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -29,23 +29,21 @@ struct victim_info { }; /* Pulled from the Android framework. Lower adj means higher priority. */ -static const short adj_prio[] = { - 906, /* CACHED_APP_MAX_ADJ */ - 905, /* Cached app */ - 904, /* Cached app */ - 903, /* Cached app */ - 902, /* Cached app */ - 901, /* Cached app */ - 900, /* CACHED_APP_MIN_ADJ */ - 800, /* SERVICE_B_ADJ */ - 700, /* PREVIOUS_APP_ADJ */ - 600, /* HOME_APP_ADJ */ - 500, /* SERVICE_ADJ */ - 400, /* HEAVY_WEIGHT_APP_ADJ */ - 300, /* BACKUP_APP_ADJ */ - 200, /* PERCEPTIBLE_APP_ADJ */ - 100, /* VISIBLE_APP_ADJ */ - 0 /* FOREGROUND_APP_ADJ */ +static const short adjs[] = { + 1000, /* CACHED_APP_MAX_ADJ + 1 */ + 950, /* CACHED_APP_LMK_FIRST_ADJ */ + 900, /* CACHED_APP_MIN_ADJ */ + 800, /* SERVICE_B_ADJ */ + 700, /* PREVIOUS_APP_ADJ */ + 600, /* HOME_APP_ADJ */ + 500, /* SERVICE_ADJ */ + 400, /* HEAVY_WEIGHT_APP_ADJ */ + 300, /* BACKUP_APP_ADJ */ + 250, /* PERCEPTIBLE_LOW_APP_ADJ */ + 200, /* PERCEPTIBLE_APP_ADJ */ + 100, /* VISIBLE_APP_ADJ */ + 50, /* PERCEPTIBLE_RECENT_FOREGROUND_APP_ADJ */ + 0 /* FOREGROUND_APP_ADJ */ }; static struct victim_info victims[MAX_VICTIMS]; @@ -87,7 +85,8 @@ static unsigned long get_total_mm_pages(struct mm_struct *mm) return pages; } -static unsigned long find_victims(int *vindex, short target_adj) +static unsigned long find_victims(int *vindex, short target_adj_min, + short target_adj_max) { unsigned long pages_found = 0; int old_vindex = *vindex; @@ -96,6 +95,7 @@ static unsigned long find_victims(int *vindex, short target_adj) for_each_process(tsk) { struct signal_struct *sig; struct task_struct *vtsk; + short adj; /* * Search for suitable tasks with the targeted importance (adj). @@ -107,7 +107,8 @@ static unsigned long find_victims(int *vindex, short target_adj) * trying to lock a task that we locked earlier. */ sig = tsk->signal; - if (READ_ONCE(sig->oom_score_adj) != target_adj || + adj = READ_ONCE(sig->oom_score_adj); + if (adj < target_adj_min || adj > target_adj_max - 1 || sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP) || (thread_group_empty(tsk) && tsk->flags & PF_EXITING) || vtsk_is_duplicate(*vindex, tsk)) @@ -177,8 +178,8 @@ static void scan_and_kill(unsigned long pages_needed) * is guaranteed to be up to date. */ read_lock(&tasklist_lock); - for (i = 0; i < ARRAY_SIZE(adj_prio); i++) { - pages_found += find_victims(&nr_victims, adj_prio[i]); + for (i = 1; i < ARRAY_SIZE(adjs); i++) { + pages_found += find_victims(&nr_victims, adjs[i], adjs[i - 1]); if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) break; } From 61379f12a0ab25a2fe71f3e92689c4de8edc66c6 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Fri, 28 Feb 2020 12:43:54 -0800 Subject: [PATCH 035/439] mm: vmpressure: Don't exclude any allocation types Although userspace processes can't directly help with kernel memory pressure, killing userspace processes can relieve kernel memory if they are responsible for that pressure in the first place. It doesn't make sense to exclude any allocation types knowing that userspace can indeed affect all memory pressure, so don't exclude any allocation types from the pressure calculations. Signed-off-by: Sultan Alsawaf --- mm/vmpressure.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 16ed04650270..63bf9c7fb754 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -278,20 +278,6 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); - /* - * Here we only want to account pressure that userland is able to - * help us with. For example, suppose that DMA zone is under - * pressure; if we notify userland about that kind of pressure, - * then it will be mostly a waste as it will trigger unnecessary - * freeing of memory by userland (since userland is more likely to - * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That - * is why we include only movable, highmem and FS/IO pages. - * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so - * we account it too. - */ - if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) - return; - /* * If we got here with no pages scanned, then that is an indicator * that reclaimer was unable to find any shrinkable LRUs at the @@ -383,9 +369,6 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, unsigned long pressure; unsigned long stall; - if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) - return; - if (!scanned) return; From 59dd1d7257659088ddeb9424e9a0474542915062 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Fri, 28 Feb 2020 12:38:10 -0800 Subject: [PATCH 036/439] mm: vmpressure: Interpret zero scanned pages as 100% pressure When no pages are scanned, it usually means no zones were reclaimable and nothing could be done. In this case, the reported pressure should be 100 to elicit help from any listeners. This fixes the vmpressure framework not working when memory pressure is very high. Signed-off-by: Sultan Alsawaf --- mm/vmpressure.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 63bf9c7fb754..fb486f79f55a 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -369,26 +369,25 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, unsigned long pressure; unsigned long stall; - if (!scanned) - return; - - spin_lock(&vmpr->sr_lock); - if (!vmpr->scanned) - calculate_vmpressure_win(); + if (scanned) { + spin_lock(&vmpr->sr_lock); + if (!vmpr->scanned) + calculate_vmpressure_win(); - vmpr->scanned += scanned; - vmpr->reclaimed += reclaimed; + vmpr->scanned += scanned; + vmpr->reclaimed += reclaimed; - if (!current_is_kswapd()) - vmpr->stall += scanned; + if (!current_is_kswapd()) + vmpr->stall += scanned; - stall = vmpr->stall; - scanned = vmpr->scanned; - reclaimed = vmpr->reclaimed; - spin_unlock(&vmpr->sr_lock); + stall = vmpr->stall; + scanned = vmpr->scanned; + reclaimed = vmpr->reclaimed; + spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win) - return; + if (scanned < vmpressure_win) + return; + } spin_lock(&vmpr->sr_lock); vmpr->scanned = 0; @@ -396,8 +395,12 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, vmpr->stall = 0; spin_unlock(&vmpr->sr_lock); - pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); - pressure = vmpressure_account_stall(pressure, stall, scanned); + if (scanned) { + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + pressure = vmpressure_account_stall(pressure, stall, scanned); + } else { + pressure = 100; + } vmpressure_notify(pressure); } From 2e661928134a16f5725687a97e16fd97bcb02111 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Fri, 28 Feb 2020 12:57:20 -0800 Subject: [PATCH 037/439] mm: vmpressure: Don't cache the window size Caching the window size can result in delayed or inaccurate pressure reports. Since calculating a fresh window size is cheap, do so all the time instead of relying on a stale, cached value. Signed-off-by: Sultan Alsawaf --- mm/vmpressure.c | 112 ++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index fb486f79f55a..58a8ad031c5e 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -27,22 +27,6 @@ #include #include -/* - * The window size (vmpressure_win) is the number of scanned pages before - * we try to analyze scanned/reclaimed ratio. So the window is used as a - * rate-limit tunable for the "low" level notification, and also for - * averaging the ratio for medium/critical levels. Using small window - * sizes can cause lot of false positives, but too big window size will - * delay the notifications. - * - * As the vmscan reclaimer logic works with chunks which are multiple of - * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. - * - * TODO: Make the window size depend on machine size, as we do for vmstat - * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). - */ -static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; - /* * These thresholds are used when we account memory pressure through * scanned/reclaimed ratio. The current values were chosen empirically. In @@ -272,9 +256,32 @@ static void vmpressure_work_fn(struct work_struct *work) } while ((vmpr = vmpressure_parent(vmpr))); } +static unsigned long calculate_vmpressure_win(void) +{ + long x; + + x = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages() + + global_zone_page_state(NR_FREE_PAGES); + if (x < 1) + return 1; + /* + * For low (free + cached), vmpressure window should be + * small, and high for higher values of (free + cached). + * But it should not be linear as well. This ensures + * timely vmpressure notifications when system is under + * memory pressure, and optimal number of events when + * cached is high. The sqaure root function is empirically + * found to serve the purpose. + */ + return int_sqrt(x); +} + #ifdef CONFIG_MEMCG -static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool critical, + bool tree, unsigned long scanned, + unsigned long reclaimed) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); @@ -286,7 +293,9 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, * (scanning depth) goes too high (deep), we will be notified * through vmpressure_prio(). But so far, keep calm. */ - if (!scanned) + if (critical) + scanned = calculate_vmpressure_win(); + else if (!scanned) return; if (tree) { @@ -295,7 +304,7 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, vmpr->tree_reclaimed += reclaimed; spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win) + if (!critical && scanned < calculate_vmpressure_win()) return; schedule_work(&vmpr->work); } else { @@ -309,7 +318,7 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, spin_lock(&vmpr->sr_lock); scanned = vmpr->scanned += scanned; reclaimed = vmpr->reclaimed += reclaimed; - if (scanned < vmpressure_win) { + if (!critical && scanned < calculate_vmpressure_win()) { spin_unlock(&vmpr->sr_lock); return; } @@ -333,47 +342,23 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } } #else -static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) -{ -} +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool critical, + bool tree, unsigned long scanned, + unsigned long reclaimed) { } #endif -static void calculate_vmpressure_win(void) -{ - long x; - - x = global_node_page_state(NR_FILE_PAGES) - - global_node_page_state(NR_SHMEM) - - total_swapcache_pages() + - global_zone_page_state(NR_FREE_PAGES); - if (x < 1) - x = 1; - /* - * For low (free + cached), vmpressure window should be - * small, and high for higher values of (free + cached). - * But it should not be linear as well. This ensures - * timely vmpressure notifications when system is under - * memory pressure, and optimal number of events when - * cached is high. The sqaure root function is empirically - * found to serve the purpose. - */ - x = int_sqrt(x); - vmpressure_win = x; -} - -static void vmpressure_global(gfp_t gfp, unsigned long scanned, - unsigned long reclaimed) +static void vmpressure_global(gfp_t gfp, unsigned long scanned, bool critical, + unsigned long reclaimed) { struct vmpressure *vmpr = &global_vmpressure; unsigned long pressure; unsigned long stall; + if (critical) + scanned = calculate_vmpressure_win(); + if (scanned) { spin_lock(&vmpr->sr_lock); - if (!vmpr->scanned) - calculate_vmpressure_win(); - vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; @@ -385,7 +370,7 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, reclaimed = vmpr->reclaimed; spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win) + if (!critical && scanned < calculate_vmpressure_win()) return; } @@ -404,6 +389,17 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, vmpressure_notify(pressure); } +static void __vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool critical, + bool tree, unsigned long scanned, + unsigned long reclaimed) +{ + if (!memcg && tree) + vmpressure_global(gfp, scanned, critical, reclaimed); + + if (IS_ENABLED(CONFIG_MEMCG)) + vmpressure_memcg(gfp, memcg, critical, tree, scanned, reclaimed); +} + /** * vmpressure() - Account memory pressure through scanned/reclaimed ratio * @gfp: reclaimer's gfp mask @@ -428,11 +424,7 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) { - if (!memcg && tree) - vmpressure_global(gfp, scanned, reclaimed); - - if (IS_ENABLED(CONFIG_MEMCG)) - vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed); + __vmpressure(gfp, memcg, false, tree, scanned, reclaimed); } /** @@ -462,7 +454,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) * to the vmpressure() basically means that we signal 'critical' * level. */ - vmpressure(gfp, memcg, true, vmpressure_win, 0); + __vmpressure(gfp, memcg, true, true, 0, 0); } static enum vmpressure_levels str_to_level(const char *arg) From 80ebd554789467f591a1afe0f6bf6a265c659785 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 26 Mar 2020 21:42:01 -0700 Subject: [PATCH 038/439] mm: Adjust tsk_is_oom_victim() for Simple LMK The page allocator uses tsk_is_oom_victim() to determine when to fast-path memory allocations in order to get an allocating process out of the page allocator and into do_exit() quickly. Unfortunately, tsk_is_oom_victim()'s check to see if a process is killed for OOM purposes is to look for the presence of an OOM reaper artifact that only the OOM killer sets. This means that for processes killed by Simple LMK, there is no fast-pathing done in the page allocator to get them to die faster. Remedy this by changing tsk_is_oom_victim() to look for the existence of the TIF_MEMDIE flag, which Simple LMK sets for its victims. Signed-off-by: Sultan Alsawaf --- include/linux/oom.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/oom.h b/include/linux/oom.h index 9502b0b057a2..c4d85b912941 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -63,7 +63,11 @@ static inline bool oom_task_origin(const struct task_struct *p) static inline bool tsk_is_oom_victim(struct task_struct * tsk) { +#ifdef CONFIG_ANDROID_SIMPLE_LMK + return test_ti_thread_flag(task_thread_info(tsk), TIF_MEMDIE); +#else return tsk->signal->oom_mm; +#endif } /* From 77fb279a7800f25cd531b932ae072e17b1049baa Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 26 Mar 2020 21:44:28 -0700 Subject: [PATCH 039/439] mm: Don't warn on page allocation failures for OOM-killed processes It can be normal for a dying process to have its page allocation request fail when it has an OOM or LMK kill pending. In this case, it's actually detrimental to print out a massive allocation failure message because this means the running process needs to die quickly and release its memory, which is slowed down slightly by the massive kmsg splat. The allocation failure message is also a false positive in this case, since the failure is intentional rather than being the result of an inability to allocate memory. Suppress the allocation failure warning for processes that are killed to release memory in order to expedite their death and remedy the kmsg confusion from seeing spurious allocation failure messages. Signed-off-by: Sultan Alsawaf --- mm/page_alloc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1018bc303bdf..2dce6a289597 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4099,8 +4099,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && (alloc_flags == ALLOC_OOM || - (gfp_mask & __GFP_NOMEMALLOC))) + (gfp_mask & __GFP_NOMEMALLOC))) { + gfp_mask |= __GFP_NOWARN; goto nopage; + } /* Retry as long as the OOM killer is making progress */ if (did_some_progress) { From d19175bd4c5545ec28a00c7a40fb09353c51b8d5 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 4 Apr 2020 17:48:40 -0700 Subject: [PATCH 040/439] mm: vmpressure: Ignore allocation orders above PAGE_ALLOC_COSTLY_ORDER PAGE_ALLOC_COSTLY_ORDER allocations can cause vmpressure to incorrectly think that memory pressure is high, when it's really just that the allocation's high order is difficult to satisfy. When this rare scenario occurs, ignore the input to vmpressure to avoid sending out a spurious high-pressure signal. Signed-off-by: Sultan Alsawaf --- include/linux/vmpressure.h | 3 ++- mm/vmpressure.c | 5 ++++- mm/vmscan.c | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 8f33ef96dd5d..c8b5cf51d652 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -34,7 +34,8 @@ struct mem_cgroup; extern int vmpressure_notifier_register(struct notifier_block *nb); extern int vmpressure_notifier_unregister(struct notifier_block *nb); extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed); + unsigned long scanned, unsigned long reclaimed, + int order); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); #ifdef CONFIG_MEMCG diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 58a8ad031c5e..15c95c01bb44 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -422,8 +422,11 @@ static void __vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool critical, * This function does not return any value. */ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) + unsigned long scanned, unsigned long reclaimed, int order) { + if (order > PAGE_ALLOC_COSTLY_ORDER) + return; + __vmpressure(gfp, memcg, false, tree, scanned, reclaimed); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d1cb6dfa3800..2a53e3240d04 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2942,7 +2942,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, - sc->nr_reclaimed - reclaimed); + sc->nr_reclaimed - reclaimed, sc->order); /* * Direct reclaim and kswapd have to scan all memory @@ -2978,7 +2978,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) /* Record the subtree's reclaim efficiency */ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, sc->nr_scanned - nr_scanned, - sc->nr_reclaimed - nr_reclaimed); + sc->nr_reclaimed - nr_reclaimed, sc->order); if (sc->nr_reclaimed - nr_reclaimed) reclaimable = true; From 20e5bebc56702a4e238011568ee311a5a62073bb Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 14 May 2020 15:16:48 -0700 Subject: [PATCH 041/439] simple_lmk: Consider all positive adjs when finding victims We are allowed to kill any process with a positive adj, so we shouldn't exclude any processes with adjs greater than 999. This would present a problem with quirky applications that set their own adj score, such as stress-ng. In the case of stress-ng, it would set its adj score to 1000 and thus exempt itself from being killed by Simple LMK. This shouldn't be allowed; any process with a positive adj, up to the highest positive adj possible (32767) should be killable. Reported-by: Danny Lin Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index f502eb5da8d7..d89e5b1ce363 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -29,21 +29,21 @@ struct victim_info { }; /* Pulled from the Android framework. Lower adj means higher priority. */ -static const short adjs[] = { - 1000, /* CACHED_APP_MAX_ADJ + 1 */ - 950, /* CACHED_APP_LMK_FIRST_ADJ */ - 900, /* CACHED_APP_MIN_ADJ */ - 800, /* SERVICE_B_ADJ */ - 700, /* PREVIOUS_APP_ADJ */ - 600, /* HOME_APP_ADJ */ - 500, /* SERVICE_ADJ */ - 400, /* HEAVY_WEIGHT_APP_ADJ */ - 300, /* BACKUP_APP_ADJ */ - 250, /* PERCEPTIBLE_LOW_APP_ADJ */ - 200, /* PERCEPTIBLE_APP_ADJ */ - 100, /* VISIBLE_APP_ADJ */ - 50, /* PERCEPTIBLE_RECENT_FOREGROUND_APP_ADJ */ - 0 /* FOREGROUND_APP_ADJ */ +static const unsigned short adjs[] = { + SHRT_MAX + 1, /* Include all positive adjs in the final range */ + 950, /* CACHED_APP_LMK_FIRST_ADJ */ + 900, /* CACHED_APP_MIN_ADJ */ + 800, /* SERVICE_B_ADJ */ + 700, /* PREVIOUS_APP_ADJ */ + 600, /* HOME_APP_ADJ */ + 500, /* SERVICE_ADJ */ + 400, /* HEAVY_WEIGHT_APP_ADJ */ + 300, /* BACKUP_APP_ADJ */ + 250, /* PERCEPTIBLE_LOW_APP_ADJ */ + 200, /* PERCEPTIBLE_APP_ADJ */ + 100, /* VISIBLE_APP_ADJ */ + 50, /* PERCEPTIBLE_RECENT_FOREGROUND_APP_ADJ */ + 0 /* FOREGROUND_APP_ADJ */ }; static struct victim_info victims[MAX_VICTIMS]; @@ -85,8 +85,8 @@ static unsigned long get_total_mm_pages(struct mm_struct *mm) return pages; } -static unsigned long find_victims(int *vindex, short target_adj_min, - short target_adj_max) +static unsigned long find_victims(int *vindex, unsigned short target_adj_min, + unsigned short target_adj_max) { unsigned long pages_found = 0; int old_vindex = *vindex; From 856aa5f27cf66cccc5f68a6c3a0711114b0294c7 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 20 May 2020 09:55:17 -0700 Subject: [PATCH 042/439] mm: Don't stop kswapd on a per-node basis when there are no waiters The page allocator wakes all kswapds in an allocation context's allowed nodemask in the slow path, so it doesn't make sense to have the kswapd- waiter count per each NUMA node. Instead, it should be a global counter to stop all kswapds when there are no failed allocation requests. Signed-off-by: Sultan Alsawaf --- include/linux/mmzone.h | 1 - mm/internal.h | 1 + mm/page_alloc.c | 8 ++++---- mm/vmscan.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 619ccfdf47d7..cecf0a58cf66 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -656,7 +656,6 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; - atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by diff --git a/mm/internal.h b/mm/internal.h index b0302094d4bb..9284225c09f0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -167,6 +167,7 @@ extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern int user_min_free_kbytes; +extern atomic_long_t kswapd_waiters; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2dce6a289597..5fb2136b90b8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -75,6 +75,8 @@ #include #include "internal.h" +atomic_long_t kswapd_waiters = ATOMIC_LONG_INIT(0); + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -3913,7 +3915,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int reserve_flags; unsigned long pages_reclaimed = 0; int retry_loop_count = 0; - pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; bool woke_kswapd = false; unsigned long jiffies_s = jiffies; u64 utime, stime_s, stime_e, stime_d; @@ -3954,7 +3955,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (gfp_mask & __GFP_KSWAPD_RECLAIM) { if (!woke_kswapd) { - atomic_inc(&pgdat->kswapd_waiters); + atomic_long_inc(&kswapd_waiters); woke_kswapd = true; } wake_all_kswapds(order, ac); @@ -4158,7 +4159,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, fail: got_pg: if (woke_kswapd) - atomic_dec(&pgdat->kswapd_waiters); + atomic_long_dec(&kswapd_waiters); if (!page) warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); @@ -6131,7 +6132,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); spin_lock_init(&pgdat->lru_lock); lruvec_init(node_lruvec(pgdat)); - pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); pgdat->per_cpu_nodestats = &boot_nodestats; diff --git a/mm/vmscan.c b/mm/vmscan.c index 2a53e3240d04..3ccf3b12ab4f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3727,7 +3727,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) /* Check if kswapd should be suspending */ if (try_to_freeze() || kthread_should_stop() || - !atomic_read(&pgdat->kswapd_waiters)) + !atomic_long_read(&kswapd_waiters)) break; /* From b0a69bfef33c8bbe6267d330bda41e6ae3a66a21 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 20 May 2020 19:52:05 -0700 Subject: [PATCH 043/439] simple_lmk: Hold an RCU read lock instead of the tasklist read lock We already check to see if each eligible process isn't already dying, so an RCU read lock can be used to speed things up instead of holding the tasklist read lock. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index d89e5b1ce363..a08287e4fcfe 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -172,18 +172,14 @@ static void scan_and_kill(unsigned long pages_needed) int i, nr_to_kill = 0, nr_victims = 0, ret; unsigned long pages_found = 0; - /* - * Hold the tasklist lock so tasks don't disappear while scanning. This - * is preferred to holding an RCU read lock so that the list of tasks - * is guaranteed to be up to date. - */ - read_lock(&tasklist_lock); + /* Hold an RCU read lock while traversing the global process list */ + rcu_read_lock(); for (i = 1; i < ARRAY_SIZE(adjs); i++) { pages_found += find_victims(&nr_victims, adjs[i], adjs[i - 1]); if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) break; } - read_unlock(&tasklist_lock); + rcu_read_unlock(); /* Pretty unlikely but it can happen */ if (unlikely(!nr_victims)) { From 71862b8c108b8bc844af689a7c46c9e9dbd919a0 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 10 Jun 2020 17:55:04 -0700 Subject: [PATCH 044/439] simple_lmk: Remove unnecessary clean-up when timeout is reached Zeroing out the mm struct pointers when the timeout is hit isn't needed because mm_free_lock prevents any readers from accessing the mm struct pointers while clean-up occurs, and since the simple_lmk_mm_freed() loop bound is set to zero during clean-up, there is no possibility of dying processes ever reading stale mm struct pointers. Therefore, it is unnecessary to clear out the mm struct pointers when the timeout is reached. Now the only step to do when the timeout is reached is to re-init the completion, but since reinit_completion() just sets a struct member to zero, call reinit_completion() unconditionally as it is faster than encapsulating it within a conditional statement. Also take this opportunity to rename some variables and tidy up some code indentation. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 40 +++++++++++++++--------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index a08287e4fcfe..b0bffb991aa3 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -50,7 +50,7 @@ static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); static DEFINE_RWLOCK(mm_free_lock); -static int victims_to_kill; +static int nr_victims; static atomic_t needs_reclaim = ATOMIC_INIT(0); static atomic_t nr_killed = ATOMIC_INIT(0); @@ -169,26 +169,26 @@ static int process_victims(int vlen, unsigned long pages_needed) static void scan_and_kill(unsigned long pages_needed) { - int i, nr_to_kill = 0, nr_victims = 0, ret; + int i, nr_to_kill = 0, nr_found = 0; unsigned long pages_found = 0; /* Hold an RCU read lock while traversing the global process list */ rcu_read_lock(); for (i = 1; i < ARRAY_SIZE(adjs); i++) { - pages_found += find_victims(&nr_victims, adjs[i], adjs[i - 1]); - if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) + pages_found += find_victims(&nr_found, adjs[i], adjs[i - 1]); + if (pages_found >= pages_needed || nr_found == MAX_VICTIMS) break; } rcu_read_unlock(); /* Pretty unlikely but it can happen */ - if (unlikely(!nr_victims)) { + if (unlikely(!nr_found)) { pr_err("No processes available to kill!\n"); return; } /* First round of victim processing to weed out unneeded victims */ - nr_to_kill = process_victims(nr_victims, pages_needed); + nr_to_kill = process_victims(nr_found, pages_needed); /* * Try to kill as few of the chosen victims as possible by sorting the @@ -202,7 +202,7 @@ static void scan_and_kill(unsigned long pages_needed) /* Store the final number of victims for simple_lmk_mm_freed() */ write_lock(&mm_free_lock); - victims_to_kill = nr_to_kill; + nr_victims = nr_to_kill; write_unlock(&mm_free_lock); /* Kill the victims */ @@ -235,15 +235,10 @@ static void scan_and_kill(unsigned long pages_needed) } /* Wait until all the victims die or until the timeout is reached */ - ret = wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); + wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); write_lock(&mm_free_lock); - if (!ret) { - /* Extra clean-up is needed when the timeout is hit */ - reinit_completion(&reclaim_done); - for (i = 0; i < nr_to_kill; i++) - victims[i].mm = NULL; - } - victims_to_kill = 0; + reinit_completion(&reclaim_done); + nr_victims = 0; nr_killed = (atomic_t)ATOMIC_INIT(0); write_unlock(&mm_free_lock); } @@ -270,14 +265,13 @@ void simple_lmk_mm_freed(struct mm_struct *mm) int i; read_lock(&mm_free_lock); - for (i = 0; i < victims_to_kill; i++) { - if (victims[i].mm != mm) - continue; - - victims[i].mm = NULL; - if (atomic_inc_return_relaxed(&nr_killed) == victims_to_kill) - complete(&reclaim_done); - break; + for (i = 0; i < nr_victims; i++) { + if (victims[i].mm == mm) { + victims[i].mm = NULL; + if (atomic_inc_return_relaxed(&nr_killed) == nr_victims) + complete(&reclaim_done); + break; + } } read_unlock(&mm_free_lock); } From d73f245f71c3424b69b1a18ee524c2868ceccd99 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 30 Nov 2018 10:33:18 +1100 Subject: [PATCH 045/439] VFS: use synchronize_rcu_expedited() in namespace_unlock() The synchronize_rcu() in namespace_unlock() is called every time a filesystem is unmounted. If a great many filesystems are mounted, this can cause a noticable slow-down in, for example, system shutdown. The sequence: mkdir -p /tmp/Mtest/{0..5000} time for i in /tmp/Mtest/*; do mount -t tmpfs tmpfs $i ; done time umount /tmp/Mtest/* on a 4-cpu VM can report 8 seconds to mount the tmpfs filesystems, and 100 seconds to unmount them. Boot the same VM with 1 CPU and it takes 18 seconds to mount the tmpfs filesystems, but only 36 to unmount. If we change the synchronize_rcu() to synchronize_rcu_expedited() the umount time on a 4-cpu VM drop to 0.6 seconds I think this 200-fold speed up is worth the slightly high system impact of using synchronize_rcu_expedited(). Acked-by: Paul E. McKenney (from general rcu perspective) Signed-off-by: NeilBrown Signed-off-by: Al Viro --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/namespace.c b/fs/namespace.c index eaa138384686..c06a0954a9d2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1842,7 +1842,7 @@ static void namespace_unlock(void) if (likely(hlist_empty(&head))) return; - synchronize_rcu(); + synchronize_rcu_expedited(); group_pin_kill(&head); } From c541f328dd71a2eebbe5ff75dcb6ea6c10a6403c Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 10 Sep 2020 15:41:57 -0700 Subject: [PATCH 046/439] simple_lmk: Print a message when the timeout is reached This aids in selecting an adequate timeout. If the timeout is hit often and Simple LMK is killing too much, then the timeout should be lengthened. If the timeout is rarely hit and Simple LMK is not killing fast enough under pressure, then the timeout should be shortened. Signed-off-by: Sultan Alsawaf --- drivers/android/simple_lmk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index b0bffb991aa3..0ce3bb924220 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -235,7 +235,10 @@ static void scan_and_kill(unsigned long pages_needed) } /* Wait until all the victims die or until the timeout is reached */ - wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); + if (!wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES)) + pr_info("Timeout hit waiting for victims to die, proceeding\n"); + + /* Clean up for future reclaim invocations */ write_lock(&mm_free_lock); reinit_completion(&reclaim_done); nr_victims = 0; From 38ecea091d20a7d5a4aedd210b087af44dc6f5d1 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 10 Sep 2020 15:43:46 -0700 Subject: [PATCH 047/439] simple_lmk: Add !PSI dependency When PSI is enabled, lmkd in userspace will use PSI notifications to perform low memory kills. Therefore, to ensure that Simple LMK is the only active LMK implementation, add a !PSI dependency. Signed-off-by: Sultan Alsawaf --- drivers/android/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 6a87d1298c5e..6e3fce05d861 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -44,7 +44,7 @@ config ANDROID_BINDER_IPC_SELFTEST config ANDROID_SIMPLE_LMK bool "Simple Android Low Memory Killer" - depends on !ANDROID_LOW_MEMORY_KILLER && !MEMCG + depends on !ANDROID_LOW_MEMORY_KILLER && !MEMCG && !PSI ---help--- This is a complete low memory killer solution for Android that is small and simple. Processes are killed according to the priorities From c467e48e5eb7bec685fd285eb56f9e7074548d1a Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 10 Sep 2020 15:51:17 -0700 Subject: [PATCH 048/439] simple_lmk: Update Kconfig description for VM pressure change Simple LMK uses VM pressure now, not a kswapd hook like before. Update the Kconfig description to reflect such. Signed-off-by: Sultan Alsawaf --- drivers/android/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 6e3fce05d861..c46fbd79e7fa 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -50,8 +50,8 @@ config ANDROID_SIMPLE_LMK small and simple. Processes are killed according to the priorities that Android gives them, so that the least important processes are always killed first. Processes are killed until memory deficits are - satisfied, as observed from kswapd struggling to free up pages. Simple - LMK stops killing processes when kswapd finally goes back to sleep. + satisfied, as observed from direct reclaim and kswapd reclaim + struggling to free up pages, via VM pressure notifications. if ANDROID_SIMPLE_LMK From 31b82bde65cee6acc66e3a6da21b66e6031c4cf3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 12 Feb 2020 19:43:01 +0300 Subject: [PATCH 049/439] Makefile: ignore toolchain directory Signed-off-by: Denis Efremov --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 6f8e18d17152..294d53d3bc88 100644 --- a/Makefile +++ b/Makefile @@ -460,10 +460,12 @@ export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_ve # Files to ignore in find ... statements export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o \ - -name CVS -o -name .pc -o -name .hg -o -name .git \) \ + -name CVS -o -name .pc -o -name .hg -o -name .git -o \ + -name toolchain \) \ -prune -o export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ - --exclude CVS --exclude .pc --exclude .hg --exclude .git + --exclude CVS --exclude .pc --exclude .hg --exclude .git \ + --exclude toolchain # =========================================================================== # Rules shared between *config targets and build targets From 59a9cb399ea97405fc37a377aa6ebe8d365e463f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 10 Sep 2020 19:12:14 +0300 Subject: [PATCH 050/439] Makefile: revert back to original compiler variables Signed-off-by: Denis Efremov --- Makefile | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 294d53d3bc88..ba827944994e 100644 --- a/Makefile +++ b/Makefile @@ -310,10 +310,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ # "make" in the configured kernel build directory always uses that. # Default value for CROSS_COMPILE is not to prefix executables # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile -#ARCH ?= $(SUBARCH) -#CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) -ARCH ?= arm64 -CROSS_COMPILE ?= $(srctree)/toolchain/gcc-cfp/gcc-cfp-jopp-only/aarch64-linux-android-4.9/bin/aarch64-linux-android- +ARCH ?= $(SUBARCH) +CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) # Architecture as present in compile.h UTS_MACHINE := $(ARCH) @@ -375,14 +373,14 @@ HOST_LOADLIBES := $(HOST_LFS_LIBS) AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld LDGOLD = $(CROSS_COMPILE)ld.gold -#CC = $(CROSS_COMPILE)gcc -CC = $(srctree)/toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/clang +CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +READELF = $(CROSS_COMPILE)readelf AWK = awk GENKSYMS = scripts/genksyms/genksyms INSTALLKERNEL := installkernel @@ -391,11 +389,6 @@ PERL = perl PYTHON = python CHECK = sparse -ifeq ($(CONFIG_EXYNOS_FMP_FIPS),) -READELF = $(CROSS_COMPILE)readelf -export READELF -endif - CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void $(CF) NOSTDINC_FLAGS = @@ -440,7 +433,7 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds GCC_PLUGINS_CFLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES +export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS @@ -513,8 +506,7 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -#CLANG_TRIPLE ?= $(CROSS_COMPILE) -CLANG_TRIPLE ?= $(srctree)/toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/aarch64-linux-gnu- +CLANG_TRIPLE ?= $(CROSS_COMPILE) CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) From 5926334efdfaaee1ae64f6b73b1938340b0ffde3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 20 Oct 2020 01:01:46 +0300 Subject: [PATCH 051/439] firmware/Makefile: fix out-of-tree build Signed-off-by: Denis Efremov --- firmware/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/firmware/Makefile b/firmware/Makefile index 5bcb1eb32452..84711ec93fe4 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -8,7 +8,7 @@ CONFIG_EXTRA_FIRMWARE_DIR="firmware" # Create $(fwabs) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a # leading /, it's relative to $(srctree). fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR)) -fwabs := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) +fwabs := $(filter-out /%,$(fwdir))$(filter /%,$(fwdir)) fw-external-y := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)) @@ -209,13 +209,13 @@ wordsize_deps := $(wildcard include/config/64bit.h include/config/32bit.h \ firmware/Makefile) $(patsubst %,$(obj)/%.gen.S, $(fw-shipped-y)): %: $(wordsize_deps) - $(call cmd,fwbin,$(fwabs)/$(patsubst $(obj)/%.gen.S,%,$@)) + $(call cmd,fwbin,$(patsubst %.gen.S,%,$@)) $(patsubst %,$(obj)/%.gen.S, $(fw-external-y)): %: $(wordsize_deps) - $(call cmd,fwbin,$(fwabs)/$(patsubst $(obj)/%.gen.S,%,$@)) + $(call cmd,fwbin,$(patsubst %.gen.S,%,$@)) # The .o files depend on the binaries directly; the .S files don't. -$(patsubst %,$(obj)/%.gen.o, $(fw-shipped-y)): $(obj)/%.gen.o: $(fwdir)/% -$(patsubst %,$(obj)/%.gen.o, $(fw-external-y)): $(obj)/%.gen.o: $(fwdir)/% +$(patsubst %,$(obj)/%.gen.o, $(fw-shipped-y)): %.gen.o: % +$(patsubst %,$(obj)/%.gen.o, $(fw-external-y)): %.gen.o: % # .ihex is used just as a simple way to hold binary files in a source tree # where binaries are frowned upon. They are directly converted with objcopy. From 207602be6d27b7b1e401be0845c93ce789c70c49 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 18:48:58 +0300 Subject: [PATCH 052/439] wholetree: .gitignore update Signed-off-by: Denis Efremov --- .gitignore | 6 ++++++ firmware/five/.gitignore | 1 + lib/.gitignore | 1 + scripts/rkp_cfp/.gitignore | 1 + security/proca/.gitignore | 2 ++ security/samsung/defex_lsm/.gitignore | 3 +++ 6 files changed, 14 insertions(+) create mode 100644 firmware/five/.gitignore create mode 100644 scripts/rkp_cfp/.gitignore create mode 100644 security/proca/.gitignore create mode 100644 security/samsung/defex_lsm/.gitignore diff --git a/.gitignore b/.gitignore index be92dfa89957..880734d33f8e 100644 --- a/.gitignore +++ b/.gitignore @@ -125,3 +125,9 @@ all.config # fetched Android config fragments kernel/configs/android-*.cfg + +# samsung +*.dtbo +*.reverse.dts +__pycache__/ +*.pyc diff --git a/firmware/five/.gitignore b/firmware/five/.gitignore new file mode 100644 index 000000000000..0994479aa88a --- /dev/null +++ b/firmware/five/.gitignore @@ -0,0 +1 @@ +*.tlbin diff --git a/lib/.gitignore b/lib/.gitignore index 09aae85418ab..5d4f9603e477 100644 --- a/lib/.gitignore +++ b/lib/.gitignore @@ -4,3 +4,4 @@ gen_crc32table crc32table.h oid_registry_data.c +libdss.c diff --git a/scripts/rkp_cfp/.gitignore b/scripts/rkp_cfp/.gitignore new file mode 100644 index 000000000000..82520ca123af --- /dev/null +++ b/scripts/rkp_cfp/.gitignore @@ -0,0 +1 @@ +/tmp/ diff --git a/security/proca/.gitignore b/security/proca/.gitignore new file mode 100644 index 000000000000..d95557d705a3 --- /dev/null +++ b/security/proca/.gitignore @@ -0,0 +1,2 @@ +proca_certificate-asn1.h +proca_certificate-asn1.c diff --git a/security/samsung/defex_lsm/.gitignore b/security/samsung/defex_lsm/.gitignore new file mode 100644 index 000000000000..16ed5d7821b2 --- /dev/null +++ b/security/samsung/defex_lsm/.gitignore @@ -0,0 +1,3 @@ +*.der +defex_packed_rules.inc +pack_rules From d0b59b275cc1709285208a613adac990c185b32d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 20:43:04 +0300 Subject: [PATCH 053/439] kernel: Add CC_WERROR config to turn warnings into errors Add configuration option CONFIG_CC_WERROR to prevent warnings from creeping in. Signed-off-by: Chris Fries Signed-off-by: Nathan Chancellor Signed-off-by: Denis Efremov --- Makefile | 6 +++++- lib/Kconfig.debug | 8 ++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ba827944994e..cce28dd381d6 100644 --- a/Makefile +++ b/Makefile @@ -421,9 +421,9 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -Werror \ -Xassembler -march=armv8-a+lse \ -std=gnu89 + KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := @@ -647,6 +647,10 @@ else include/config/auto.conf: ; endif # $(dot-config) +ifdef CONFIG_CC_WERROR + KBUILD_CFLAGS += -Werror +endif + # For the kernel to actually contain only the needed exported symbols, # we have to build modules as well to determine what those symbols are. # (this can be evaluated only once include/config/auto.conf has been included) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 85b2e9c7aa23..ccd0e952f182 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1960,6 +1960,14 @@ config BUG_ON_DATA_CORRUPTION If unsure, say N. +config CC_WERROR + bool "Treat all compile warnings as errors" + default y + help + Select this option to set compiler warnings as errors, + to prevent easily-fixable problems from creeping into + the codebase. + source "samples/Kconfig" source "lib/Kconfig.kgdb" From a7a69eed82718b99512d934ae7b56980233c3293 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 10 Jan 2021 18:19:33 +0300 Subject: [PATCH 054/439] bcmdhd_101_16: add __linux__ define Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index 8eba343dde17..c311e7a849c8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -24,7 +24,8 @@ ##################### DHDCFLAGS += -DBCMUTILS_ERR_CODES -DUSE_NEW_RSPEC_DEFS -DHDCFLAGS += -Wall -Werror -Wstrict-prototypes -Dlinux -DLINUX -DBCMDRIVER \ +DHDCFLAGS += -Dlinux -D__linux__ -DLINUX +DHDCFLAGS += -Wall -Werror -Wstrict-prototypes -DBCMDRIVER \ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ -DDHDTHREAD -DDHD_BCMEVENTS -DSHOW_EVENTS -DWLP2P \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DKEEP_ALIVE -DCSCAN \ From 44d1bb6c64672e808a699c527bcab1661ac87e5c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 27 Mar 2020 01:40:39 +0300 Subject: [PATCH 055/439] rkp_cfp: fix python version Signed-off-by: Denis Efremov --- scripts/rkp_cfp/instrument.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/rkp_cfp/instrument.py b/scripts/rkp_cfp/instrument.py index 1895ba96b8bd..d19ea7e01031 100755 --- a/scripts/rkp_cfp/instrument.py +++ b/scripts/rkp_cfp/instrument.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # Copyright (c) 2016 Samsung Electronics Co., Ltd. # Authors: James Gleeson From 3623d60fa4670e3213ccf2b3e029ffe04ecf251a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 9 Sep 2020 18:32:37 +0300 Subject: [PATCH 056/439] lib/libdss-build.sh: fix python version Signed-off-by: Denis Efremov --- lib/libdss-build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/libdss-build.sh b/lib/libdss-build.sh index 30d3685c53f1..60c71ab9aa4e 100755 --- a/lib/libdss-build.sh +++ b/lib/libdss-build.sh @@ -3,7 +3,7 @@ if [[ ${CC} = *"clang" ]]; then CC_DIR=$(dirname "${CC}") export PATH=$PATH:${CC_DIR} rm -rf lib/libdss.c -python lib/make_libdss.py &> lib/libdss.c +python2 lib/make_libdss.py &> lib/libdss.c ${CC} \ --target=aarch64-linux-gnu \ -Ilib/libdss-include \ From caa2c718da569107c745986f90dede39b562f6ce Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 28 Sep 2020 01:10:39 +0300 Subject: [PATCH 057/439] security/samsung/five: fix include paths Signed-off-by: Denis Efremov --- security/samsung/five/Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/security/samsung/five/Makefile b/security/samsung/five/Makefile index 8c548f526579..74e6d169463d 100644 --- a/security/samsung/five/Makefile +++ b/security/samsung/five/Makefile @@ -2,10 +2,10 @@ obj-$(CONFIG_FIVE) += five.o obj-$(CONFIG_FIVE_PA_FEATURE) += five_pa.o -EXTRA_CFLAGS += -I$(src) -asflags-y += -Isecurity/integrity/five -asflags-y += -Isecurity/samsung/five -ccflags-y += -I$(srctree) +asflags-y += -Wa,-I$(srctree)/security/integrity/five +asflags-y += -Wa,-I$(srctree)/$(src) +ccflags-y += -I$(srctree)/ +ccflags-y += -I$(srctree)/$(src) ccflags-y += -Wformat five-y := five_lv.o five_cert.o five_keyring.o five_init.o \ From 52ce8b7918f50da3da4976d7004eb54265746bcb Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 28 Sep 2020 01:11:20 +0300 Subject: [PATCH 058/439] drivers/misc/tzdev/tz_deploy_tzar: fix startup.tzar inclusion Signed-off-by: Denis Efremov --- drivers/misc/tzdev/Makefile | 6 +++--- drivers/misc/tzdev/tz_deploy_tzar.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/misc/tzdev/Makefile b/drivers/misc/tzdev/Makefile index d0f764b404f4..ac199989c8bc 100644 --- a/drivers/misc/tzdev/Makefile +++ b/drivers/misc/tzdev/Makefile @@ -1,6 +1,6 @@ -subdir-ccflags-y += -I$(srctree)/drivers/misc/tzdev/include -subdir-ccflags-y += -I$(srctree)/drivers/misc/tzdev -ccflags-$(CONFIG_TZDEV_DEPLOY_TZAR) += -D"_STR(s)=\#s" -D"KBUILD_SRC=_STR($(KBUILD_SRC))" +subdir-ccflags-y += -I$(srctree)/$(src)/include +subdir-ccflags-y += -I$(srctree)/$(src) +ccflags-$(CONFIG_TZDEV_DEPLOY_TZAR) += -Wa,-I$(srctree)/$(src) obj-$(CONFIG_TZDEV) += lib/ obj-$(CONFIG_TZDEV) += teec/ diff --git a/drivers/misc/tzdev/tz_deploy_tzar.c b/drivers/misc/tzdev/tz_deploy_tzar.c index fc16e191cddf..5a280e4ad0fc 100644 --- a/drivers/misc/tzdev/tz_deploy_tzar.c +++ b/drivers/misc/tzdev/tz_deploy_tzar.c @@ -33,7 +33,7 @@ __asm__ ( ".section .init.data,\"aw\"\n" "tzdev_tzar_begin:\n" - ".incbin \"" KBUILD_SRC "/drivers/misc/tzdev/startup.tzar\"\n" + ".incbin \"startup.tzar\"\n" "tzdev_tzar_end:\n" ".previous\n" ); From d66ce0afcf73a440808c754e636446bc39c07c52 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 16:33:20 +0300 Subject: [PATCH 059/439] drivers: leds: fix double definition of LEDS_KTD2692 Signed-off-by: Denis Efremov --- drivers/leds/Kconfig | 6 ------ drivers/leds/Makefile | 1 - 2 files changed, 7 deletions(-) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 2ff5ee5ef6b9..6c4a2adf6eac 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -731,12 +731,6 @@ config LEDS_S2MPB02 help This option enables support for the LEDs on the S2MPB02. -config LEDS_KTD2692 - bool "LED support for the KTD2692" - help - If you say yes here you will get support for - for the KTD2692 FLASH led chip. - config LEDS_S2MPB02_MULTI_TORCH_REAR2 bool "LED support for second multi flash" help diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 5f19157ef445..76a3f8a3addb 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -78,7 +78,6 @@ obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o obj-$(CONFIG_LEDS_S2MPB02) += leds-s2mpb02.o -obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o From 00dd251c91e99e4ce40150f1488ad75e481b915c Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Sun, 24 Feb 2019 21:55:28 -0300 Subject: [PATCH 060/439] selinux: use kernel linux/socket.h for genheaders and mdp commit dfbd199a7cfe3e3cd8531e1353cdbd7175bfbc5e upstream. When compiling genheaders and mdp from a newer host kernel, the following error happens: In file included from scripts/selinux/genheaders/genheaders.c:18: ./security/selinux/include/classmap.h:238:2: error: #error New address family defined, please update secclass_map. #error New address family defined, please update secclass_map. ^~~~~ make[3]: *** [scripts/Makefile.host:107: scripts/selinux/genheaders/genheaders] Error 1 make[2]: *** [scripts/Makefile.build:599: scripts/selinux/genheaders] Error 2 make[1]: *** [scripts/Makefile.build:599: scripts/selinux] Error 2 make[1]: *** Waiting for unfinished jobs.... Instead of relying on the host definition, include linux/socket.h in classmap.h to have PF_MAX. Cc: stable@vger.kernel.org Signed-off-by: Paulo Alcantara Acked-by: Stephen Smalley [PM: manually merge in mdp.c, subject line tweaks] Signed-off-by: Paul Moore Signed-off-by: Greg Kroah-Hartman Signed-off-by: Denis Efremov --- scripts/selinux/genheaders/genheaders.c | 1 - scripts/selinux/mdp/mdp.c | 1 - security/selinux/include/classmap.h | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c index fa48fabcb330..3cc4893d98cc 100644 --- a/scripts/selinux/genheaders/genheaders.c +++ b/scripts/selinux/genheaders/genheaders.c @@ -9,7 +9,6 @@ #include #include #include -#include struct security_class_mapping { const char *name; diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c index ffe8179f5d41..c29fa4a6228d 100644 --- a/scripts/selinux/mdp/mdp.c +++ b/scripts/selinux/mdp/mdp.c @@ -32,7 +32,6 @@ #include #include #include -#include static void usage(char *name) { diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 05ecb689f8e4..21e523cc32ce 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include +#include #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" From 9bca23f6aa363616934a3468d3343f77be43a1fb Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 16:59:03 +0300 Subject: [PATCH 061/439] drivers/samsung/misc/Kconfig: remove leading whitespace Signed-off-by: Denis Efremov --- drivers/samsung/misc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/samsung/misc/Kconfig b/drivers/samsung/misc/Kconfig index f0fa141f3d06..6bc5d951efb9 100644 --- a/drivers/samsung/misc/Kconfig +++ b/drivers/samsung/misc/Kconfig @@ -107,7 +107,7 @@ config SEC_NAD_C Samsung TN NAD Feature, Enable NAD C config SEC_NAD_BPS_CLASSIFIER - bool " Samsung TN NAD BPS Classifier Feature" + bool "Samsung TN NAD BPS Classifier Feature" depends on SEC_NAD default n help From b777e6fa395d8e4c9ff916ac032a62da8c63007d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 14 Nov 2018 18:11:18 -0800 Subject: [PATCH 062/439] scripts/setlocalversion: Improve -dirty check with git-status --no-optional-locks [ Upstream commit ff64dd4857303dd5550faed9fd598ac90f0f2238 ] git-diff-index does not refresh the index for you, so using it for a "-dirty" check can give misleading results. Commit 6147b1cf19651 ("scripts/setlocalversion: git: Make -dirty check more robust") tried to fix this by switching to git-status, but it overlooked the fact that git-status also writes to the .git directory of the source tree, which is definitely not kosher for an out-of-tree (O=) build. That is getting reverted. Fortunately, git-status now supports avoiding writing to the index via the --no-optional-locks flag, as of git 2.14. It still calculates an up-to-date index, but it avoids writing it out to the .git directory. So, let's retry the solution from commit 6147b1cf19651 using this new flag first, and if it fails, we assume this is an older version of git and just use the old git-diff-index method. It's hairy to get the 'grep -vq' (inverted matching) correct by stashing the output of git-status (you have to be careful about the difference betwen "empty stdin" and "blank line on stdin"), so just pipe the output directly to grep and use a regex that's good enough for both the git-status and git-diff-index version. Cc: Christian Kujau Cc: Guenter Roeck Suggested-by: Alexander Kapshuk Signed-off-by: Brian Norris Tested-by: Genki Sky Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin Signed-off-by: Denis Efremov --- scripts/setlocalversion | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 71f39410691b..365b3c2b8f43 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -73,8 +73,16 @@ scm_version() printf -- '-svn%s' "`git svn find-rev $head`" fi - # Check for uncommitted changes - if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then + # Check for uncommitted changes. + # First, with git-status, but --no-optional-locks is only + # supported in git >= 2.14, so fall back to git-diff-index if + # it fails. Note that git-diff-index does not refresh the + # index, so it may give misleading results. See + # git-update-index(1), git-diff-index(1), and git-status(1). + if { + git --no-optional-locks status -uno --porcelain 2>/dev/null || + git diff-index --name-only HEAD + } | grep -qvE '^(.. )?scripts/package'; then printf '%s' -dirty fi From 9df6885172a21ac9fd5a017bab3ce7fa9ce35d9d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 19:35:22 +0300 Subject: [PATCH 063/439] vmlinux.lds.S: fix undefined reference to `idmap_pg_dir' Signed-off-by: Denis Efremov --- arch/arm64/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e0b071913f44..ca8ceaac4730 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -217,7 +217,7 @@ SECTIONS BSS_SECTION(0, 0, 0) . = ALIGN(PAGE_SIZE); -#ifndef CONFIG_UH +#ifndef CONFIG_UH_RKP idmap_pg_dir = .; . += IDMAP_DIR_SIZE; swapper_pg_dir = .; From ed6501fd709e4dfac93dc4a51170ab2fbcddba57 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 18:07:53 +0300 Subject: [PATCH 064/439] exynos/fimc-is2: fix section mismatch for fimc_is_resourcemgr_probe() Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c | 2 +- drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c index a594b5dee75b..519d81fdd441 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c @@ -1260,7 +1260,7 @@ static struct notifier_block notify_reboot_block = { }; #endif -int fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, +int __init fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, void *private_data, struct platform_device *pdev) { int ret = 0; diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h index 709561a0bfea..2284fccbb30a 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h @@ -158,7 +158,7 @@ struct fimc_is_resourcemgr { u32 streaming_cnt; }; -int fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, void *private_data, struct platform_device *pdev); +int __init fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, void *private_data, struct platform_device *pdev); int fimc_is_resource_open(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type, void **device); int fimc_is_resource_get(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type); int fimc_is_resource_put(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type); From 1a3dd9da3a91ec1c1e9d9b5d089140e5e90d2569 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 28 Feb 2020 22:05:29 +0300 Subject: [PATCH 065/439] modem_v1: fix section mismatch warning Signed-off-by: Denis Efremov --- drivers/misc/modem_v1/boot_device_spi.c | 2 +- drivers/misc/modem_v1_dual/boot_device_spi.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/misc/modem_v1/boot_device_spi.c b/drivers/misc/modem_v1/boot_device_spi.c index 64c872599e96..4de02d55c90c 100644 --- a/drivers/misc/modem_v1/boot_device_spi.c +++ b/drivers/misc/modem_v1/boot_device_spi.c @@ -231,7 +231,7 @@ static const struct file_operations modem_spi_boot_fops = { .unlocked_ioctl = spi_boot_ioctl, }; -static int __init modem_spi_boot_probe(struct spi_device *spi) +static int modem_spi_boot_probe(struct spi_device *spi) { int ret; struct device *dev = &spi->dev; diff --git a/drivers/misc/modem_v1_dual/boot_device_spi.c b/drivers/misc/modem_v1_dual/boot_device_spi.c index 48f82b761c0c..dac41c30a17e 100644 --- a/drivers/misc/modem_v1_dual/boot_device_spi.c +++ b/drivers/misc/modem_v1_dual/boot_device_spi.c @@ -232,7 +232,7 @@ static const struct file_operations modem_spi_boot_fops = { .unlocked_ioctl = spi_boot_ioctl, }; -static int __init modem_spi_boot_probe(struct spi_device *spi) +static int modem_spi_boot_probe(struct spi_device *spi) { int ret; struct device *dev = &spi->dev; From c3673ab11291eeed74a13c878489b0dc5b5bce64 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 10 Sep 2020 17:44:16 +0300 Subject: [PATCH 066/439] drivers: fingerprint: unexport fpsensor_goto_suspend() Signed-off-by: Denis Efremov --- drivers/fingerprint/fingerprint.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/fingerprint/fingerprint.h b/drivers/fingerprint/fingerprint.h index 42cc3cf72f17..13c764c1e0c9 100644 --- a/drivers/fingerprint/fingerprint.h +++ b/drivers/fingerprint/fingerprint.h @@ -73,7 +73,6 @@ EXPORT_SYMBOL(fp_lockscreen_mode); #endif extern int fpsensor_goto_suspend; -EXPORT_SYMBOL(fpsensor_goto_suspend); #endif #endif From 63c076b4af254ea9741282f6028efcef672c779e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 10 Sep 2020 18:56:07 +0300 Subject: [PATCH 067/439] spu-verify: add stub for spu_firmware_signature_verify() Signed-off-by: Denis Efremov --- drivers/spu_verify/Makefile | 2 +- include/linux/spu-verify.h | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/drivers/spu_verify/Makefile b/drivers/spu_verify/Makefile index ce595a158d33..09fd2a27cece 100644 --- a/drivers/spu_verify/Makefile +++ b/drivers/spu_verify/Makefile @@ -1 +1 @@ -obj-y += spu-sign-verify.o \ No newline at end of file +obj-$(CONFIG_SPU_VERIFY) += spu-sign-verify.o diff --git a/include/linux/spu-verify.h b/include/linux/spu-verify.h index 3fac19d8d558..cada9ad77987 100644 --- a/include/linux/spu-verify.h +++ b/include/linux/spu-verify.h @@ -26,6 +26,40 @@ /* TOTAL METADATA SIZE */ #define SPU_METADATA_SIZE(FW) ( (TAG_LEN(FW)) + (DIGEST_LEN) + (SIGN_LEN) ) +#ifdef CONFIG_SPU_VERIFY extern long spu_firmware_signature_verify(const char* fw_name, const u8* fw_data, const long fw_size); +#else +static inline long spu_firmware_signature_verify(const char* fw_name, const u8* fw_data, const long fw_size) { + const static struct { + const char *tag; + int len; + int metadata_size; + } tags[] = { + { TSP_TAG, TAG_LEN(TSP), SPU_METADATA_SIZE(TSP) }, + { MFC_TAG, TAG_LEN(MFC), SPU_METADATA_SIZE(MFC) }, + { WACOM_TAG, TAG_LEN(WACOM), SPU_METADATA_SIZE(WACOM) }, + { PDIC_TAG, TAG_LEN(PDIC), SPU_METADATA_SIZE(PDIC) }, + { SENSORHUB_TAG, TAG_LEN(SENSORHUB), SPU_METADATA_SIZE(SENSORHUB) }, + }; + int i; -#endif //end _SPU_VERIFY_H_ \ No newline at end of file + if (!fw_name || !fw_data || fw_size < 0) { + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(tags); ++i) { + if(!strncmp(fw_name, tags[i].tag, tags[i].len)) { + long offset = fw_size - tags[i].metadata_size; + if (!strncmp(fw_name, fw_data + offset, tags[i].len)) { + return offset; + } else { + return -EINVAL; + } + } + } + + return -EINVAL; +} +#endif + +#endif //end _SPU_VERIFY_H_ From 298cb1a4d48b11bbf74cdb167ca6f25d2aa1f3f6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 21:05:53 +0300 Subject: [PATCH 068/439] samsung: debug_test: fix linking error for simulate_WRITE_RO() Signed-off-by: Denis Efremov --- drivers/samsung/debug/sec_debug_test.c | 2 +- drivers/soc/samsung/debug/exynos-debug-test.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/samsung/debug/sec_debug_test.c b/drivers/samsung/debug/sec_debug_test.c index 3168d1f764a3..552449bcb020 100644 --- a/drivers/samsung/debug/sec_debug_test.c +++ b/drivers/samsung/debug/sec_debug_test.c @@ -768,7 +768,7 @@ static void simulate_WRITE_RO(char *arg) #ifdef CONFIG_RKP_CFP_JOPP ptr = (unsigned long *)__start_rodata; #else - ptr = (unsigned long *)simulate_WRITE_RO; + ptr = NULL; #endif *ptr ^= 0x12345678; } diff --git a/drivers/soc/samsung/debug/exynos-debug-test.c b/drivers/soc/samsung/debug/exynos-debug-test.c index 2dd874fd0479..f90bfc164193 100644 --- a/drivers/soc/samsung/debug/exynos-debug-test.c +++ b/drivers/soc/samsung/debug/exynos-debug-test.c @@ -787,7 +787,7 @@ static void simulate_WRITE_RO(char *arg) #ifdef CONFIG_RKP_CFP_JOPP ptr = (unsigned long *)__start_rodata; #else - ptr = (unsigned long *)simulate_WRITE_RO; + ptr = NULL; #endif *ptr ^= 0x12345678; } From 85a7303d91c46bc276dd8d5bb4fbd2baa0e3aa94 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 18:43:13 +0300 Subject: [PATCH 069/439] security: samsung: defex: DEFEX_KERNEL_ONLY=y by default Signed-off-by: Denis Efremov --- security/samsung/defex_lsm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/samsung/defex_lsm/Kconfig b/security/samsung/defex_lsm/Kconfig index 1ed27b1f0a23..f2638458f598 100644 --- a/security/samsung/defex_lsm/Kconfig +++ b/security/samsung/defex_lsm/Kconfig @@ -16,7 +16,7 @@ config SECURITY_DEFEX config DEFEX_KERNEL_ONLY bool "Defex Kernel Only" depends on SECURITY_DEFEX - default n + default y help This lets defex know whether kernel-only build or not. Default value will be set to "y" if the build is kernel-only. From 2e64a9fa862f6bd32a8061da26cfd9e0729f7077 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 11 Mar 2020 17:18:51 +0300 Subject: [PATCH 070/439] security: selinux: make audit optional Signed-off-by: Denis Efremov --- security/selinux/Kconfig | 2 +- security/selinux/avc.c | 46 +++++++++++++++++++++++----------- security/selinux/hooks.c | 29 +++++++++++++-------- security/selinux/include/avc.h | 2 ++ security/selinux/ss/services.c | 19 ++++++++++++-- 5 files changed, 71 insertions(+), 27 deletions(-) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index 8af7a690eb40..8609ed8f5fc5 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -1,6 +1,6 @@ config SECURITY_SELINUX bool "NSA SELinux Support" - depends on SECURITY_NETWORK && AUDIT && NET && INET + depends on SECURITY_NETWORK && NET && INET select NETWORK_SECMARK default n help diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 7134ac3af173..da4a3d7f56d5 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -107,6 +107,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); } +#ifdef CONFIG_AUDIT /** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class @@ -174,6 +175,7 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)); audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); } +#endif /** * avc_init - Initialize the AVC. @@ -438,6 +440,7 @@ static int avc_xperms_populate(struct avc_node *node, } +#ifdef CONFIG_AUDIT static inline u32 avc_xperms_audit_required(u32 requested, struct av_decision *avd, struct extended_perms_decision *xpd, @@ -483,6 +486,7 @@ static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass, return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied, result, ad, 0); } +#endif static void avc_node_free(struct rcu_head *rhead) { @@ -708,6 +712,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, return node; } +#ifdef CONFIG_AUDIT /** * avc_audit_pre_callback - SELinux specific information * will be called by generic audit code @@ -781,6 +786,7 @@ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); return 0; } +#endif /** * avc_add_callback - Register a callback for security events. @@ -1079,7 +1085,7 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct extended_perms_data dontaudit; struct avc_xperms_node local_xp_node; struct avc_xperms_node *xp_node; - int rc = 0, rc2; + int rc = 0; xp_node = &local_xp_node; BUG_ON(!requested); @@ -1133,10 +1139,14 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, rcu_read_unlock(); - rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, - &avd, xpd, xperm, rc, ad); - if (rc2) - return rc2; +#ifdef CONFIG_AUDIT + { + int rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, + &avd, xpd, xperm, rc, ad); + if (rc2) + return rc2; + } +#endif return rc; } @@ -1208,13 +1218,17 @@ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata) { struct av_decision avd; - int rc, rc2; + int rc; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); - rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0); - if (rc2) - return rc2; +#ifdef CONFIG_AUDIT + { + int rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0); + if (rc2) + return rc2; + } +#endif return rc; } @@ -1223,14 +1237,18 @@ int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, int flags) { struct av_decision avd; - int rc, rc2; + int rc; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); - rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, - auditdata, flags); - if (rc2) - return rc2; +#ifdef CONFIG_AUDIT + { + int rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, + auditdata, flags); + if (rc2) + return rc2; + } +#endif return rc; } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 40c7112e5875..634f718d6e39 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1897,11 +1897,13 @@ static int cred_has_capability(const struct cred *cred, } rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); +#ifdef CONFIG_AUDIT if (audit == SECURITY_CAP_AUDIT) { int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0); if (rc2) return rc2; } +#endif return rc; } @@ -3213,6 +3215,7 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode, rcu ? MAY_NOT_BLOCK : 0); } +#ifdef CONFIG_AUDIT static noinline int audit_inode_permission(struct inode *inode, u32 perms, u32 audited, u32 denied, int result, @@ -3231,6 +3234,7 @@ static noinline int audit_inode_permission(struct inode *inode, return rc; return 0; } +#endif static int selinux_inode_permission(struct inode *inode, int mask) { @@ -3241,8 +3245,7 @@ static int selinux_inode_permission(struct inode *inode, int mask) struct inode_security_struct *isec; u32 sid; struct av_decision avd; - int rc, rc2; - u32 audited, denied; + int rc; from_access = mask & MAY_ACCESS; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); @@ -3282,15 +3285,21 @@ static int selinux_inode_permission(struct inode *inode, int mask) // ] SEC_SELINUX_PORTING_COMMON rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd); - audited = avc_audit_required(perms, &avd, rc, - from_access ? FILE__AUDIT_ACCESS : 0, - &denied); - if (likely(!audited)) - return rc; +#ifdef CONFIG_AUDIT + { + int rc2; + u32 audited, denied; + audited = avc_audit_required(perms, &avd, rc, + from_access ? FILE__AUDIT_ACCESS : 0, + &denied); + if (likely(!audited)) + return rc; - rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags); - if (rc2) - return rc2; + rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags); + if (rc2) + return rc2; + } +#endif return rc; } diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index 57d61cf36500..f5bc76c36b53 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h @@ -66,6 +66,7 @@ struct selinux_audit_data { void __init avc_init(void); +#ifdef CONFIG_AUDIT static inline u32 avc_audit_required(u32 requested, struct av_decision *avd, int result, @@ -142,6 +143,7 @@ static inline int avc_audit(u32 ssid, u32 tsid, requested, audited, denied, result, a, flags); } +#endif #define AVC_STRICT 1 /* Ignore permissive mode. */ #define AVC_EXTENDED_PERMS 2 /* update extended permissions */ diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 21301d6248c9..8e923c76950c 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -467,6 +467,7 @@ static int constraint_expr_eval(struct context *scontext, return s[0]; } +#ifdef CONFIG_AUDIT /* * security_dump_masked_av - dumps masked permissions during * security_compute_av due to RBAC, MLS/Constraint and Type bounds. @@ -556,6 +557,7 @@ static void security_dump_masked_av(struct context *scontext, return; } +#endif /* * security_boundary_permission - drops violated permissions @@ -609,9 +611,11 @@ static void type_attribute_bounds_av(struct context *scontext, /* mask violated permissions */ avd->allowed &= ~masked; +#ifdef CONFIG_AUDIT /* audit masked permissions */ security_dump_masked_av(scontext, tcontext, tclass, masked, "bounds"); +#endif } /* @@ -753,6 +757,7 @@ static int security_validtrans_handle_fail(struct context *ocontext, struct context *tcontext, u16 tclass) { +#ifdef CONFIG_AUDIT char *o = NULL, *n = NULL, *t = NULL; u32 olen, nlen, tlen; @@ -770,6 +775,7 @@ static int security_validtrans_handle_fail(struct context *ocontext, kfree(o); kfree(n); kfree(t); +#endif // [ SEC_SELINUX_PORTING_COMMON #ifdef CONFIG_ALWAYS_ENFORCE @@ -930,6 +936,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) index = type->bounds; } +#ifdef CONFIG_AUDIT if (rc) { char *old_name = NULL; char *new_name = NULL; @@ -949,6 +956,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) kfree(new_name); kfree(old_name); } +#endif out: read_unlock(&policy_rwlock); @@ -1542,6 +1550,7 @@ static int compute_sid_handle_invalid_context( u16 tclass, struct context *newcontext) { +#ifdef CONFIG_AUDIT char *s = NULL, *t = NULL, *n = NULL; u32 slen, tlen, nlen; @@ -1561,6 +1570,7 @@ static int compute_sid_handle_invalid_context( kfree(s); kfree(t); kfree(n); +#endif // [ SEC_SELINUX_PORTING_COMMON #ifdef CONFIG_ALWAYS_ENFORCE @@ -2886,8 +2896,6 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) struct context *context1; struct context *context2; struct context newcon; - char *s; - u32 len; int rc; rc = 0; @@ -2927,6 +2935,10 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) if (!policydb_context_isvalid(&policydb, &newcon)) { rc = convert_context_handle_invalid_context(&newcon); if (rc) { +#ifdef CONFIG_AUDIT + char *s; + u32 len; + if (!context_struct_to_string(&newcon, &s, &len)) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, @@ -2934,6 +2946,7 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) "invalid_context=%s", s); kfree(s); } +#endif goto out_unlock; } } @@ -3156,6 +3169,7 @@ int security_policycap_supported(unsigned int req_cap) return rc; } +#ifdef CONFIG_AUDIT struct selinux_audit_rule { u32 au_seqno; struct context au_ctxt; @@ -3422,6 +3436,7 @@ static int __init aurule_init(void) return err; } __initcall(aurule_init); +#endif #ifdef CONFIG_NETLABEL /** From 09e9e1220ee327ae78039188840cc9278155f70c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 19:03:23 +0300 Subject: [PATCH 071/439] security: selinux: Add CONFIG_ALWAYS_ENFORCE Signed-off-by: Denis Efremov --- security/selinux/Kconfig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index 8609ed8f5fc5..c529054fe423 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -8,6 +8,23 @@ config SECURITY_SELINUX You will also need a policy configuration and a labeled filesystem. If you are unsure how to answer this question, answer N. +choice + prompt "NSA SELinux mode" + default ALWAYS_ENFORCE + depends on SECURITY_SELINUX + +config SECURITY_SELINUX_SWITCH + bool "Dynamically switch between permissive and enforcing" + help + Allow to switch dynamically between permissive and enforcing modes. + +config ALWAYS_ENFORCE + bool "Always Enforce mode" + help + Pin enforcing mode. + +endchoice + config SECURITY_SELINUX_BOOTPARAM bool "NSA SELinux boot parameter" depends on SECURITY_SELINUX From fb45e955a58f97ac3d7b239f881bbfd264e466e1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 25 Mar 2020 00:30:06 +0300 Subject: [PATCH 072/439] security: selinux: Add CONFIG_ALWAYS_PERMIT Signed-off-by: Denis Efremov --- security/selinux/Kconfig | 5 +++++ security/selinux/selinuxfs.c | 3 +++ 2 files changed, 8 insertions(+) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index c529054fe423..9b278a40e4ee 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -23,6 +23,11 @@ config ALWAYS_ENFORCE help Pin enforcing mode. +config ALWAYS_PERMIT + bool "Always Permit mode" + help + Pin permissive mode. + endchoice config SECURITY_SELINUX_BOOTPARAM diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 72abcdab314b..934da25e52aa 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -135,6 +135,9 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; +#ifdef CONFIG_ALWAYS_PERMIT + new_value = 0; +#endif // [ SEC_SELINUX_PORTING_COMMON #ifdef CONFIG_ALWAYS_ENFORCE From 2496bd8779abf04f403542dbf67d22f9f478264c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 00:04:54 +0300 Subject: [PATCH 073/439] drivers: kperfmon: suppress missing files warnings Signed-off-by: Denis Efremov --- drivers/kperfmon/Makefile | 30 ++++++++---------------------- include/linux/ologk.h | 8 +++----- 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/drivers/kperfmon/Makefile b/drivers/kperfmon/Makefile index e5abca82ff2b..057bf4e92de4 100644 --- a/drivers/kperfmon/Makefile +++ b/drivers/kperfmon/Makefile @@ -5,40 +5,26 @@ # Rewritten to use lists instead of if-statements. # -FLAG=1 - -#$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/include/linux/olog.pb.h $(srctree)/drivers/kperfmon/)") - -ifneq ($(CONFIG_KPERFMON), y) FLAG=0 -$(info kperfmon_DUMMY="CONFIG_KPERFMON is off.") -endif - -ifneq ($(shell [ -e $(srctree)/include/linux/olog.pb.h ] && echo exist), exist) -$(info kperfmon_DUMMY="olog.pb.h file is missing... retrying") -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../frameworks/base/proto/src/olog.proto $(srctree)/drivers/kperfmon/)") -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../vendor/samsung/system/libperflog/aprotoc $(srctree)/drivers/kperfmon/)") -$(info kperfmon_DUMMY="$(shell $(srctree)/drivers/kperfmon/aprotoc --perflog_out=./ $(srctree)/drivers/kperfmon/olog.proto)") -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/drivers/kperfmon/olog.pb.h $(srctree)/include/linux/)") -#$(info kperfmon_DUMMY="$(shell ls $(srctree)/drivers/kperfmon/*)") -#$(info kperfmon_DUMMY="$(shell ls $(srctree)/include/linux/olog*)") +ifeq ($(CONFIG_KPERFMON), y) +FLAG=1 +endif +ifeq ($(FLAG), 1) ifneq ($(shell [ -e $(srctree)/include/linux/olog.pb.h ] && echo exist), exist) -$(info kperfmon_DUMMY="olog.pb.h file is missing... again") FLAG=0 -endif +$(warning "kperfmon: olog.pb.h file is missing.") endif -ifneq ($(shell [ -e $(srctree)/../../system/core/liblog/include/log/perflog.h ] && echo exist), exist) +ifneq ($(shell [ -e $(srctree)/include/linux/perflog.h ] && echo exist), exist) FLAG=0 -$(info kperfmon_DUMMY="perflog.h file is missing.") +$(warning "kperfmon: perflog.h file is missing.") +endif endif -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../system/core/liblog/include/log/perflog.h $(srctree)/include/linux/)") ifeq ($(FLAG), 1) obj-y += kperfmon.o else obj-y += ologk.o -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/drivers/kperfmon/ologk.h $(srctree)/include/linux/)") endif diff --git a/include/linux/ologk.h b/include/linux/ologk.h index c3c1b8be0b1a..58456d98d3a1 100644 --- a/include/linux/ologk.h +++ b/include/linux/ologk.h @@ -2,13 +2,11 @@ #define _OLOG_KERNEL_H_ #include -#include "olog.pb.h" #define OLOG_CPU_FREQ_FILTER 1500000 -#define ologk(...) _perflog(PERFLOG_LOG, PERFLOG_UNKNOWN, __VA_ARGS__) -#define perflog(...) _perflog(PERFLOG_LOG, __VA_ARGS__) -extern void _perflog(int type, int logid, const char *fmt, ...); -extern void perflog_evt(int logid, int arg1); +#define ologk(...) +#define perflog(...) +#define perflog_evt(...) #endif From 65e5619a80b2928e88cbb6e4588bd2be31d2ff65 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 15:04:22 +0300 Subject: [PATCH 074/439] net/sch_generic.h: fix "unused" qcb variable warning Signed-off-by: Denis Efremov --- include/net/sch_generic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f59acacaa265..cf44dc4d4b7e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -277,7 +277,7 @@ struct tcf_block { static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { - struct qdisc_skb_cb *qcb; + struct qdisc_skb_cb *qcb __maybe_unused; BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); BUILD_BUG_ON(sizeof(qcb->data) < sz); From 5d97895592dcc746472ef7dac0c7b54d7434a691 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 23:45:11 +0300 Subject: [PATCH 075/439] block: blk-crypt: fix blk_crypt_initialize() defn Signed-off-by: Denis Efremov --- block/blk-crypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-crypt.c b/block/blk-crypt.c index dd783891555f..399135cfbe03 100644 --- a/block/blk-crypt.c +++ b/block/blk-crypt.c @@ -210,7 +210,7 @@ void blk_crypt_put_context(blk_crypt_t *bc_ctx) } /* H/W algorithm APIs */ -static int blk_crypt_initialize() +static int blk_crypt_initialize(void) { if (likely(blk_crypt_cachep)) return 0; From ac8f0a78f9b48a2d417a882f8e798e410694e0c2 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 23:46:40 +0300 Subject: [PATCH 076/439] samsung: debug: fix sec_platform_watchdog_start_timer() defn Signed-off-by: Denis Efremov --- drivers/samsung/debug/sec_debug_platform_watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/samsung/debug/sec_debug_platform_watchdog.c b/drivers/samsung/debug/sec_debug_platform_watchdog.c index 6eaaaae91929..e0de88d72b8b 100644 --- a/drivers/samsung/debug/sec_debug_platform_watchdog.c +++ b/drivers/samsung/debug/sec_debug_platform_watchdog.c @@ -169,7 +169,7 @@ static void sec_platform_watchdog_timer_fn(unsigned long data) mod_timer(&sec_platform_watchdog_timer, jiffies + sample_period * HZ); } -static void sec_platform_watchdog_start_timer() +static void sec_platform_watchdog_start_timer(void) { del_timer_sync(&sec_platform_watchdog_timer); mod_timer(&sec_platform_watchdog_timer, jiffies + sample_period * HZ); From 35f3dde713cb49b7048e6aacb80ef029c857b2be Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 23:52:14 +0300 Subject: [PATCH 077/439] drivers: exynos: fimc-is2: fix loop initial declarations Signed-off-by: Denis Efremov --- .../exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c index 24c102d16680..5923832296cd 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c @@ -2952,20 +2952,21 @@ int fimc_is_sec_sensor_find_rear_tof_uid(struct fimc_is_core *core, char *buf) #ifdef CAMERA_REAR_TOF struct fimc_is_vender_specific *specific = core->vender.private_data; struct fimc_is_rom_info *finfo = NULL; + int i; fimc_is_sec_get_sysfs_finfo(&finfo, REAR_TOF_ROM_ID); if (finfo->cal_map_ver[3] >= REAR_TOF_CHECK_MAP_VERSION) { char uid_list[256] = {0, }; char uid_temp[10] = {0, }; - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->rear_tof_uid[i] = *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[i]]); sprintf(uid_temp, "0x%x ", specific->rear_tof_uid[i]); strcat(uid_list, uid_temp); } info("rear_tof_uid: %s\n", uid_list); } else { - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->rear_tof_uid[i] = REAR_TOF_DEFAULT_UID; } info("rear_tof_uid: 0x%x, use default 0x%x", *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[0]]), @@ -2980,20 +2981,21 @@ int fimc_is_sec_sensor_find_front_tof_uid(struct fimc_is_core *core, char *buf) #ifdef CAMERA_FRONT_TOF struct fimc_is_vender_specific *specific = core->vender.private_data; struct fimc_is_rom_info *finfo = NULL; + int i; fimc_is_sec_get_sysfs_finfo(&finfo, FRONT_TOF_ROM_ID); if (finfo->cal_map_ver[3] >= FRONT_TOF_CHECK_MAP_VERSION) { char uid_list[256] = {0, }; char uid_temp[10] = {0, }; - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->front_tof_uid[i] = *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[i]]); sprintf(uid_temp, "0x%x ", specific->front_tof_uid[i]); strcat(uid_list, uid_temp); } info("front_tof_uid: %s\n", uid_list); } else { - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->front_tof_uid[i] = FRONT_TOF_DEFAULT_UID; } info("front_tof_uid: 0x%x, use default 0x%x", *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[0]]), From e3763230dc096e930131e510263e5677a64e42ce Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 15:58:52 +0300 Subject: [PATCH 078/439] npu: generated: fix update_ver_info.sh script Signed-off-by: Denis Efremov --- drivers/vision/npu/generated/update_ver_info.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/vision/npu/generated/update_ver_info.sh b/drivers/vision/npu/generated/update_ver_info.sh index e93245cd45d3..11548bc1f5a1 100755 --- a/drivers/vision/npu/generated/update_ver_info.sh +++ b/drivers/vision/npu/generated/update_ver_info.sh @@ -9,17 +9,17 @@ if [ -z "$NPU_GIT_LOCAL_CHANGE" ] then NPU_GIT_LOCAL_CHANGE="No local change" fi -STASH_DEPTH=`git stash list | wc -l` -USER_INFO=whoami|sed 's/\\/\-/g' +STASH_DEPTH="$(git stash list | wc -l)" +USER_INFO="$(whoami | sed 's/\\/\-/g')" # Error checking -if [ ( -z $NPU_GIT_LOG ) -o ( -z $NPU_GIT_HASH ) -o ( -z $USER_INFO ) -o ( -z $NPU_GIT_LOCAL_CHANGE ) -o ( -z $STASH_DEPTH ) ] +if [ \( -z "$NPU_GIT_LOG" \) -o \( -z "$NPU_GIT_HASH" \) -o \( -z "$USER_INFO" \) -o \( -z "$NPU_GIT_LOCAL_CHANGE" \) -o \( -z "$STASH_DEPTH" \) ] then echo "An error occured during build info gathering." >&2 exit 16 fi -BUILD_INFO="$(USER_INFO)@$(hostname) / Build on $(date --rfc-3339='seconds')" +BUILD_INFO="$USER_INFO@$(hostname) / Build on $(date --rfc-3339='seconds')" cat > $TARGET_FILE << ENDL const char *npu_git_log_str = From e8cd8530f48120950a3672113bce757ddd51b078 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 2 Oct 2020 21:51:26 +0300 Subject: [PATCH 079/439] arch/arm64/Makefile: don't gzip kernel Image file Signed-off-by: Denis Efremov --- arch/arm64/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 145408d9df99..a221b9f8f98c 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -161,7 +161,7 @@ endif KBUILD_DTBS := dtbs -all: Image.gz $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) +all: Image $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) Image: vmlinux From 2443f25a4b3e163c66d9f1622b7a609fe98a6b33 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 15:17:36 +0300 Subject: [PATCH 080/439] drivers/of/Kconfig: unbind OF_FLATTREE from CONFIG_DTC Signed-off-by: Denis Efremov --- drivers/of/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index ba7b034b2b91..a6539f152951 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -1,5 +1,5 @@ config DTC - bool + bool "Build DTC compiler" menuconfig OF bool "Device Tree and Open Firmware support" @@ -35,7 +35,6 @@ config OF_ALL_DTBS config OF_FLATTREE bool - select DTC select LIBFDT select CRC32 From 75c12d2afcb267738d23de63c32efd069cda74c7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 24 Jan 2020 18:29:50 +0300 Subject: [PATCH 081/439] include/linux/nmi: fix sl_softirq_entry() definition Signed-off-by: Denis Efremov --- include/linux/nmi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 892cf046ca33..69d4e1acfde9 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -123,7 +123,7 @@ extern void sl_softirq_entry(const char *, void *); extern void sl_softirq_exit(void); unsigned long long get_dss_softlockup_thresh(void); #else -static inline void void sl_softirq_entry(const char *, void *) { } +static inline void sl_softirq_entry(const char *softirq_type, void *fn) { } static inline void sl_softirq_exit(void) { } #endif From 16fbc4c1b6cc1bd8cbdcbdc9db208f8648118450 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 24 Jan 2020 18:42:22 +0300 Subject: [PATCH 082/439] wholetree: fix sec_debug missing declarations Signed-off-by: Denis Efremov --- include/linux/debug-snapshot.h | 7 ++++++- include/linux/sec_debug.h | 3 +++ kernel/power/process.c | 2 +- kernel/printk/printk.c | 4 ++-- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/linux/debug-snapshot.h b/include/linux/debug-snapshot.h index 227371b68d20..d09c89a20dc0 100644 --- a/include/linux/debug-snapshot.h +++ b/include/linux/debug-snapshot.h @@ -206,6 +206,7 @@ extern void dbg_snapshot_get_softlockup_info(unsigned int cpu, void *info); #define dbg_snapshot_printk(...) do { } while(0) #define dbg_snapshot_printkl(a,b) do { } while(0) #define dbg_snapshot_save_context(a) do { } while(0) +#define dbg_snapshot_print_notifier_call(a,b,c) do { } while(0) #define dbg_snapshot_try_enable(a,b) do { } while(0) #define dbg_snapshot_set_enable(a,b) do { } while(0) #define dbg_snapshot_get_enable(a) do { } while(0) @@ -255,12 +256,16 @@ static inline bool dbg_snapshot_dumper_one(void *v_dumper, { return false; } -static int dbg_snapshot_add_bl_item_info(const char *name, +static inline int dbg_snapshot_add_bl_item_info(const char *name, unsigned int paddr, unsigned int size) { return 0; } +static inline void dbg_snapshot_save_log(int cpu, unsigned long where) +{ +} + #endif /* CONFIG_DEBUG_SNAPSHOT */ extern void dbg_snapshot_soc_helper_init(void); diff --git a/include/linux/sec_debug.h b/include/linux/sec_debug.h index 8b7a8d00b7b5..401bbe2a78d9 100644 --- a/include/linux/sec_debug.h +++ b/include/linux/sec_debug.h @@ -521,6 +521,7 @@ extern void sec_debug_set_extra_info_epd(char *str); #define sec_debug_set_extra_info_panic(a) do { } while (0) #define sec_debug_set_extra_info_backtrace(a) do { } while (0) #define sec_debug_set_extra_info_backtrace_cpu(a, b) do { } while (0) +#define sec_debug_set_extra_info_backtrace_task(a) do { } while (0) #define sec_debug_set_extra_info_evt_version() do { } while (0) #define sec_debug_set_extra_info_sysmmu(a) do { } while (0) #define sec_debug_set_extra_info_busmon(a) do { } while (0) @@ -660,7 +661,9 @@ struct sec_debug_next { struct sec_debug_spinlock_info rlock; struct sec_debug_kernel_data kernd; +#ifdef CONFIG_SEC_DEBUG_AUTO_COMMENT struct sec_debug_auto_comment auto_comment; +#endif struct sec_debug_shared_buffer extra_info; }; diff --git a/kernel/power/process.c b/kernel/power/process.c index 98001a7303f7..fd1e7a5c5858 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -44,7 +44,7 @@ static int try_to_freeze_tasks(bool user_only) #ifdef CONFIG_PM_SLEEP char suspend_abort[MAX_SUSPEND_ABORT_LEN]; #endif - char *sys_state[SYSTEM_END] = { + char *sys_state[SYSTEM_END] __maybe_unused = { "BOOTING", "SCHEDULING", "RUNNING", diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a5bf81671436..13dc0c6581df 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -658,7 +658,7 @@ void register_hook_logbuf(void (*func)(const char *buf, size_t size)) EXPORT_SYMBOL(register_hook_logbuf); #endif -#if CONFIG_SEC_DEBUG_FIRST_KMSG +#ifdef CONFIG_SEC_DEBUG_FIRST_KMSG static void (*func_hook_first_kmsg)(const char *buf, size_t size); void register_first_kmsg_hook_func(void (*func)(const char *buf, size_t size)) { @@ -801,7 +801,7 @@ static int log_store(int facility, int level, } #endif -#if CONFIG_SEC_DEBUG_FIRST_KMSG +#ifdef CONFIG_SEC_DEBUG_FIRST_KMSG if (func_hook_first_kmsg) func_hook_first_kmsg(hook_text, hook_size); #endif From 4edbf25193869eb5a156169b1b82872dc8013fc6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 02:09:12 +0300 Subject: [PATCH 083/439] sec_debug: fix build errors for SEC_DEBUG_TSP_LOG Signed-off-by: Denis Efremov --- drivers/input/sec_cmd.c | 4 ++++ include/linux/sec_debug.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/input/sec_cmd.c b/drivers/input/sec_cmd.c index 20ff8df8ccde..21e8caf35776 100644 --- a/drivers/input/sec_cmd.c +++ b/drivers/input/sec_cmd.c @@ -310,6 +310,7 @@ static void sec_cmd_store_function(struct sec_cmd_data *data) sec_cmd_ptr->cmd_func(data); +#ifdef SEC_DEBUG_TSP_LOG if (cmd_found && sec_cmd_ptr->cmd_log) { char tbuf[32]; unsigned long long t; @@ -324,6 +325,7 @@ static void sec_cmd_store_function(struct sec_cmd_data *data) sec_debug_tsp_command_history(tbuf); } +#endif } static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devattr, @@ -355,6 +357,7 @@ static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devatt list_for_each_entry(sec_cmd_ptr, &data->cmd_list_head, list) { if (!strncmp(cmd.cmd, sec_cmd_ptr->cmd_name, strlen(sec_cmd_ptr->cmd_name))) { +#ifdef SEC_DEBUG_TSP_LOG if (sec_cmd_ptr->cmd_log) { char task_info[40]; char tbuf[32]; @@ -374,6 +377,7 @@ static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devatt sec_debug_tsp_command_history(tbuf); } +#endif break; } } diff --git a/include/linux/sec_debug.h b/include/linux/sec_debug.h index 401bbe2a78d9..c543dbab0b7a 100644 --- a/include/linux/sec_debug.h +++ b/include/linux/sec_debug.h @@ -690,7 +690,7 @@ extern void sec_debug_tsp_command_history(char *buf); #define sec_debug_tsp_raw_data(a, ...) do { } while (0) #define sec_debug_tsp_raw_data_msg(a, b, ...) do { } while (0) #define sec_tsp_raw_data_clear() do { } while (0) -#define sec_debug_tsp_command_history() do { } while (0) +#define sec_debug_tsp_command_history(buf) do { } while (0) #endif /* CONFIG_SEC_DEBUG_TSP_LOG */ #ifdef CONFIG_TOUCHSCREEN_DUMP_MODE From 7c622ca347f8443a6ebf74a88143f6b5f70228f8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 10:34:07 +0300 Subject: [PATCH 084/439] sec_debug: fix build error for CONFIG_KALLSYMS Signed-off-by: Denis Efremov --- include/linux/sec_debug.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/linux/sec_debug.h b/include/linux/sec_debug.h index c543dbab0b7a..a8a25d01ca01 100644 --- a/include/linux/sec_debug.h +++ b/include/linux/sec_debug.h @@ -345,7 +345,17 @@ extern void sec_debug_task_sched_log(int cpu, struct task_struct *task); extern void sec_debug_irq_sched_log(unsigned int irq, void *fn, int en); extern void sec_debug_irq_enterexit_log(unsigned int irq, unsigned long long start_time); +#ifdef CONFIG_KALLSYMS extern void sec_debug_set_kallsyms_info(struct sec_debug_ksyms *ksyms, int magic); +#else +static inline void sec_debug_set_kallsyms_info(struct sec_debug_ksyms *ksyms, int magic) +{ + if (ksyms) { + memset(ksyms, 0, sizeof(*ksyms)); + ksyms->magic = magic; + } +} +#endif extern int sec_debug_check_sj(void); extern unsigned int sec_debug_get_kevent_paddr(int type); From 0f7d7820201bbb0f88a61f5b794d0c5aab54a4b7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 02:11:24 +0300 Subject: [PATCH 085/439] lib/debug-snapshot-utils.c: fix build errors Signed-off-by: Denis Efremov --- lib/debug-snapshot-utils.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/debug-snapshot-utils.c b/lib/debug-snapshot-utils.c index da8b1249ec7f..d3e380aa57bc 100644 --- a/lib/debug-snapshot-utils.c +++ b/lib/debug-snapshot-utils.c @@ -33,6 +33,8 @@ #include #include +#include + #include "debug-snapshot-local.h" DEFINE_PER_CPU(struct pt_regs *, dss_core_reg); From 1df0e4c5894a667af215f060a577cba1ed51dd5b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 25 Jan 2020 00:04:57 +0300 Subject: [PATCH 086/439] phy-exynos-usbdrd: add argument names to definitions Signed-off-by: Denis Efremov --- drivers/phy/samsung/phy-exynos-debug.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/phy/samsung/phy-exynos-debug.h b/drivers/phy/samsung/phy-exynos-debug.h index a04b52ebe4ad..9e587bbbdf05 100644 --- a/drivers/phy/samsung/phy-exynos-debug.h +++ b/drivers/phy/samsung/phy-exynos-debug.h @@ -17,9 +17,9 @@ extern int exynos_usbdrd_debugfs_init(struct exynos_usbdrd_phy *phy_drd); extern int exynos_usbdrd_dp_debugfs_init(struct exynos_usbdrd_phy *phy_drd); extern void exynos_usbdrd_debugfs_exit(struct exynos_usbdrd_phy *phy_drd); #else -static inline int exynos_usbdrd_debugfs_init(struct exynos_usbdrd_phy *) +static inline int exynos_usbdrd_debugfs_init(struct exynos_usbdrd_phy *phy_drd) { return 0; } -static inline void exynos_usbdrd_debugfs_exit(struct exynos_usbdrd_phy *) +static inline void exynos_usbdrd_debugfs_exit(struct exynos_usbdrd_phy *phy_drd) { } #endif From fa17746931f7f3c1d351749f2f42d6e2d782fab8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 25 Jan 2020 00:05:51 +0300 Subject: [PATCH 087/439] s3c2410_wdt: move variable declaration Signed-off-by: Denis Efremov --- drivers/watchdog/s3c2410_wdt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 0c885c52d05b..b561edafda56 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -429,7 +429,6 @@ static int s3c2410wdt_keepalive(struct watchdog_device *wdd) { struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd); unsigned long flags, wtcnt = 0; - time64_t sec; s3c2410wdt_multistage_wdt_keepalive(); @@ -442,6 +441,8 @@ static int s3c2410wdt_keepalive(struct watchdog_device *wdd) #ifdef SEC_WATCHDOGD_FOOTPRINT if (wdt->cluster == 0) { + time64_t sec; + wdd_info->last_ping_cpu = raw_smp_processor_id(); wdd_info->last_ping_time = sched_clock(); From ba90808d798c8484751d37e2ad1080fb8d902050 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 25 Jan 2020 00:06:51 +0300 Subject: [PATCH 088/439] f2fs: fix CONFIG_F2FS_STAT_FS compilation Signed-off-by: Denis Efremov --- fs/f2fs/sysfs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 6755dba3d2ee..53d791de0f62 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -183,7 +183,7 @@ static ssize_t current_reserved_blocks_show(struct f2fs_attr *a, return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks); } -#ifdef CONFIG_F2FS_SEC_BLOCK_OPERATIONS_DEBUG +#if defined(CONFIG_F2FS_SEC_BLOCK_OPERATIONS_DEBUG) && defined(CONFIG_F2FS_STAT_FS) static int f2fs_sec_blockops_dbg(struct f2fs_sb_info *sbi, char *buf, int src_len) { int len = src_len; int i, j; @@ -227,6 +227,7 @@ static int f2fs_sec_blockops_dbg(struct f2fs_sb_info *sbi, char *buf, int src_le } #endif +#ifdef CONFIG_F2FS_STAT_FS /* Copy from debug.c stat_show */ static ssize_t f2fs_sec_stats_show(struct f2fs_sb_info *sbi, char *buf) { @@ -410,6 +411,7 @@ static ssize_t f2fs_sec_stats_show(struct f2fs_sb_info *sbi, char *buf) #endif return len; } +#endif static void __sec_bigdata_init_value(struct f2fs_sb_info *sbi, const char *attr_name) @@ -614,8 +616,10 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a, } return len; +#ifdef CONFIG_F2FS_STAT_FS } else if (!strcmp(a->attr.name, "sec_stats")) { return f2fs_sec_stats_show(sbi, buf); +#endif } ui = (unsigned int *)(ptr + a->offset); @@ -908,7 +912,9 @@ F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif F2FS_RW_ATTR_640(F2FS_SBI, f2fs_sb_info, sec_gc_stat, sec_stat); F2FS_RW_ATTR_640(F2FS_SBI, f2fs_sb_info, sec_io_stat, sec_stat); +#ifdef CONFIG_F2FS_STAT_FS F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_stats, stat_info); +#endif F2FS_RW_ATTR_640(F2FS_SBI, f2fs_sb_info, sec_fsck_stat, sec_fsck_stat); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_best_extents, s_sec_part_best_extents); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_current_extents, s_sec_part_current_extents); @@ -975,7 +981,9 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(extension_list), ATTR_LIST(sec_gc_stat), ATTR_LIST(sec_io_stat), +#ifdef CONFIG_F2FS_STAT_FS ATTR_LIST(sec_stats), +#endif ATTR_LIST(sec_fsck_stat), ATTR_LIST(sec_part_best_extents), ATTR_LIST(sec_part_current_extents), From 80fc7f7d1d32f8eb791fb090d59922910d7eec1d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 23 Oct 2020 23:46:36 +0300 Subject: [PATCH 089/439] fs: sdfat: add missing sdfat_debug_warn_on() define Signed-off-by: Denis Efremov --- fs/sdfat/sdfat.h | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/sdfat/sdfat.h b/fs/sdfat/sdfat.h index 8824d10ef058..8a18aa5b798e 100644 --- a/fs/sdfat/sdfat.h +++ b/fs/sdfat/sdfat.h @@ -447,6 +447,7 @@ void sdfat_debug_check_clusters(struct inode *inode); #define sdfat_debug_check_clusters(inode) #define sdfat_debug_bug_on(expr) +#define sdfat_debug_warn_on(expr) #endif /* CONFIG_SDFAT_DEBUG */ From 384636c07f18b789943ec245f0a6676d861b5650 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 6 Jan 2022 15:45:04 +0300 Subject: [PATCH 090/439] drivers/gpu/arm/exynos: fix functions arguments Signed-off-by: Denis Efremov --- .../arm/exynos/backend/gpexbe_qos_internal.h | 2 +- drivers/gpu/arm/exynos/frontend/gpex_clock.c | 16 +++++++------- drivers/gpu/arm/exynos/frontend/gpex_dvfs.c | 22 +++++++++---------- .../gpu/arm/exynos/frontend/gpex_thermal.c | 6 ++--- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h b/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h index a85fedfcc595..89b5b40546f6 100644 --- a/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h +++ b/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h @@ -18,7 +18,7 @@ * http://www.gnu.org/licenses/gpl-2.0.html. */ -static inline pmqos_flag_check(mali_pmqos_flags type, mali_pmqos_flags in) +static inline int pmqos_flag_check(mali_pmqos_flags type, mali_pmqos_flags in) { return (type & in) == in; } diff --git a/drivers/gpu/arm/exynos/frontend/gpex_clock.c b/drivers/gpu/arm/exynos/frontend/gpex_clock.c index 1d021f6ce7e0..a2c700ba56a6 100644 --- a/drivers/gpu/arm/exynos/frontend/gpex_clock.c +++ b/drivers/gpu/arm/exynos/frontend/gpex_clock.c @@ -37,31 +37,31 @@ static struct _clock_info clk_info; -int gpex_clock_get_boot_clock() +int gpex_clock_get_boot_clock(void) { return clk_info.boot_clock; } -int gpex_clock_get_max_clock() +int gpex_clock_get_max_clock(void) { return clk_info.gpu_max_clock; } -int gpex_clock_get_max_clock_limit() +int gpex_clock_get_max_clock_limit(void) { return clk_info.gpu_max_clock_limit; } -int gpex_clock_get_min_clock() +int gpex_clock_get_min_clock(void) { return clk_info.gpu_min_clock; } -int gpex_clock_get_cur_clock() +int gpex_clock_get_cur_clock(void) { return clk_info.cur_clock; } -int gpex_clock_get_max_lock() +int gpex_clock_get_max_lock(void) { return clk_info.max_lock; } -int gpex_clock_get_min_lock() +int gpex_clock_get_min_lock(void) { return clk_info.min_lock; } @@ -80,7 +80,7 @@ u64 gpex_clock_get_time_busy(int level) /******************************************* * static helper functions ******************************************/ -static int gpex_clock_update_config_data_from_dt() +static int gpex_clock_update_config_data_from_dt(void) { int ret = 0; struct freq_volt *fv_array; diff --git a/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c b/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c index 60c911fcbef9..ea36eb872189 100644 --- a/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c +++ b/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c @@ -85,7 +85,7 @@ static void gpex_dvfs_context_init(struct device **dev) dvfs.polling_speed = gpexbe_devicetree_get_int(gpu_dvfs_polling_time); } -static int gpu_dvfs_calculate_env_data() +static int gpu_dvfs_calculate_env_data(void) { unsigned long flags; static int polling_period; @@ -172,12 +172,12 @@ static void gpu_dvfs_timer_control(bool timer_state) spin_unlock_irqrestore(&dvfs.spinlock, flags); } -void gpex_dvfs_start() +void gpex_dvfs_start(void) { gpu_dvfs_timer_control(true); } -void gpex_dvfs_stop() +void gpex_dvfs_stop(void) { gpu_dvfs_timer_control(false); } @@ -220,17 +220,17 @@ static int gpu_dvfs_on_off(bool enable) return 0; } -int gpex_dvfs_enable() +int gpex_dvfs_enable(void) { return gpu_dvfs_on_off(true); } -int gpex_dvfs_disable() +int gpex_dvfs_disable(void) { return gpu_dvfs_on_off(false); } -static int gpu_dvfs_handler_init() +static int gpu_dvfs_handler_init(void) { if (!dvfs.status) dvfs.status = true; @@ -243,7 +243,7 @@ static int gpu_dvfs_handler_init() return 0; } -static int gpu_dvfs_handler_deinit() +static int gpu_dvfs_handler_deinit(void) { if (dvfs.status) dvfs.status = false; @@ -254,7 +254,7 @@ static int gpu_dvfs_handler_deinit() return 0; } -static int gpu_pm_metrics_init() +static int gpu_pm_metrics_init(void) { INIT_DELAYED_WORK(&dvfs.dvfs_work, dvfs_callback); dvfs.dvfs_wq = create_workqueue("g3d_dvfs"); @@ -265,7 +265,7 @@ static int gpu_pm_metrics_init() return 0; } -static void gpu_pm_metrics_term() +static void gpu_pm_metrics_term(void) { cancel_delayed_work(&dvfs.dvfs_work); flush_workqueue(dvfs.dvfs_wq); @@ -293,7 +293,7 @@ int gpex_dvfs_init(struct device **dev) return 0; } -void gpex_dvfs_term() +void gpex_dvfs_term(void) { /* DVFS stuff */ gpu_pm_metrics_term(); @@ -301,7 +301,7 @@ void gpex_dvfs_term() dvfs.kbdev = NULL; } -int gpex_dvfs_get_status() +int gpex_dvfs_get_status(void) { return dvfs.status; } diff --git a/drivers/gpu/arm/exynos/frontend/gpex_thermal.c b/drivers/gpu/arm/exynos/frontend/gpex_thermal.c index 62f701bf17c0..7fcf6a55fd5a 100644 --- a/drivers/gpu/arm/exynos/frontend/gpex_thermal.c +++ b/drivers/gpu/arm/exynos/frontend/gpex_thermal.c @@ -149,7 +149,7 @@ static ssize_t show_kernel_sysfs_gpu_temp(char *buf) } CREATE_SYSFS_KOBJECT_READ_FUNCTION(show_kernel_sysfs_gpu_temp); -static void gpex_thermal_create_sysfs_file() +static void gpex_thermal_create_sysfs_file(void) { GPEX_UTILS_SYSFS_DEVICE_FILE_ADD(tmu, show_tmu, set_tmu_control); GPEX_UTILS_SYSFS_KOBJECT_FILE_ADD_RO(gpu_tmu, show_kernel_sysfs_gpu_temp); @@ -158,14 +158,14 @@ static void gpex_thermal_create_sysfs_file() /*********************************************************************** * INIT, TERM FUNCTIONS ***********************************************************************/ -int gpex_thermal_init() +int gpex_thermal_init(void) { gpex_thermal_create_sysfs_file(); return 0; } -void gpex_thermal_term() +void gpex_thermal_term(void) { thermal.tmu_enabled = false; From 583d87342ed1663c8ac41e1c3f96eaabc4fff4f4 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 26 Nov 2019 15:22:48 +0300 Subject: [PATCH 091/439] drivers/gpu/exynos/g2d/g2d_task: fix kcalloc check Signed-off-by: Denis Efremov --- drivers/gpu/exynos/g2d/g2d_task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/exynos/g2d/g2d_task.c b/drivers/gpu/exynos/g2d/g2d_task.c index b0ccfbe699f7..5008d9f89c50 100644 --- a/drivers/gpu/exynos/g2d/g2d_task.c +++ b/drivers/gpu/exynos/g2d/g2d_task.c @@ -465,7 +465,7 @@ static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev, int id) task->source = kcalloc(g2d_dev->max_layers, sizeof(*task->source), GFP_KERNEL); - if (!task) + if (!task->source) goto err_alloc; INIT_LIST_HEAD(&task->node); From c485995bfdbe4b823852b6f5e150a2394d470698 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 15:51:44 +0300 Subject: [PATCH 092/439] drivers/video/fbdev/exynos/panel/aod/aod_drv: add return type int Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/aod/aod_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/aod/aod_drv.c b/drivers/video/fbdev/exynos/panel/aod/aod_drv.c index cda91fd33ce9..52591fe3321c 100644 --- a/drivers/video/fbdev/exynos/panel/aod/aod_drv.c +++ b/drivers/video/fbdev/exynos/panel/aod/aod_drv.c @@ -836,7 +836,7 @@ static int __aod_ioctl_set_digital_clk(struct aod_dev_info *aod, unsigned long a } -static __aod_ictl_set_parial_scan(struct aod_dev_info *aod, unsigned long arg) +static int __aod_ictl_set_parial_scan(struct aod_dev_info *aod, unsigned long arg) { int ret = 0; From 6bb210102114c3001f941529a7367fe8ced1ce44 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Nov 2022 20:55:01 +0400 Subject: [PATCH 093/439] drivers/soc/samsung/exynos-cpuhp.c: fix toupper definition Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos-cpuhp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-cpuhp.c b/drivers/soc/samsung/exynos-cpuhp.c index 09da3bfc1438..66eb93310af1 100644 --- a/drivers/soc/samsung/exynos-cpuhp.c +++ b/drivers/soc/samsung/exynos-cpuhp.c @@ -419,7 +419,7 @@ static int cpuhp_control(bool enable) * #echo mask > /sys/power/cpuhp/set_online_cpu */ #define STR_LEN 6 -static inline toupper(char ch) +static inline int toupper(int ch) { if ('a' <= ch && ch <= 'z') ch += 'A' - 'a'; From b66e2d5fa95ad7311dee2f81d02fffa0f2dcd2bc Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:53:41 +0300 Subject: [PATCH 094/439] drivers/media/platform/exynos/fimc-is2/fimc-is-binary: fix void * dereference Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c index 6e487be65613..ef3ff6c8de85 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c @@ -354,7 +354,7 @@ int carve_binary_version(enum is_bin_type type, unsigned int hint, void *data, s } buf = bin_ver_info[type].get_buf(&bin_ver_info[type], hint); - memcpy(buf, &data[ofs], len); + memcpy(buf, &((char *)data)[ofs], len); buf[len] = '\0'; info("%s version: %s\n", bin_names[bin_ver_info[type].get_name_idx(hint)], From dd68681d8e149625b0f699619e84819d0387a593 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:51:55 +0300 Subject: [PATCH 095/439] drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux: remove unused timeleft variable Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c b/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c index ea5ff1cc27b8..57583fc24cdd 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c @@ -2811,15 +2811,13 @@ static int dhd_wait_for_file_dump(dhd_pub_t *dhdp) DHD_OS_WAKE_LOCK(dhdp); /* check for hal started and only then send event if not clear dump state here */ if (wl_cfg80211_is_hal_started(cfg)) { - int timeleft = 0; - DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__)); dhd_dbg_send_urgent_evt(dhdp, NULL, 0); DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n", __FUNCTION__, dhdp->dhd_bus_busy_state)); - timeleft = dhd_os_busbusy_wait_bitmask(dhdp, - &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); + dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) { DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n", __FUNCTION__, dhdp->dhd_bus_busy_state)); From 60798bed420ebeb3c67f9621452afa0a4d3affc8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 07:57:32 +0300 Subject: [PATCH 096/439] drivers/media/platform/exynos/scaler/scaler-core: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/scaler/scaler-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/scaler/scaler-core.c b/drivers/media/platform/exynos/scaler/scaler-core.c index 05eab727dbfe..6ede627f14c1 100644 --- a/drivers/media/platform/exynos/scaler/scaler-core.c +++ b/drivers/media/platform/exynos/scaler/scaler-core.c @@ -3588,7 +3588,7 @@ static int sc_m2m1shot_prepare_buffer(struct m2m1shot_context *m21ctx, &buf_dma->plane[plane], dir, min_size); if (ret) { dev_err(ctx->sc_dev->dev, - "plane%d size %d is smaller than %d\n", + "plane%d size %zu is smaller than %u\n", plane, buf_dma->plane[plane].bytes_used, min_size); m2m1shot_unmap_dma_buf(m21ctx->m21dev->dev, From 9ab4fbb42e0387504dea50361fcc4646ff63c4c4 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 08:10:40 +0300 Subject: [PATCH 097/439] drivers/optics/max86915: fix printk format errors Signed-off-by: Denis Efremov --- drivers/optics/max86915.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/optics/max86915.c b/drivers/optics/max86915.c index e273cb0a4a56..93400af3878e 100644 --- a/drivers/optics/max86915.c +++ b/drivers/optics/max86915.c @@ -4658,7 +4658,7 @@ static int max86915_power_ctrl(struct max86915_device_data *data, int onoff) if (data->i2c_1p8 != NULL) { regulator_i2c_1p8 = regulator_get(NULL, data->i2c_1p8); if (IS_ERR(regulator_i2c_1p8) || regulator_i2c_1p8 == NULL) { - HRM_err("%s - get i2c_1p8 regulator failed, %d\n", __func__, PTR_ERR(regulator_i2c_1p8)); + HRM_err("%s - get i2c_1p8 regulator failed, %ld\n", __func__, PTR_ERR(regulator_i2c_1p8)); rc = -EINVAL; regulator_i2c_1p8 = NULL; goto get_i2c_1p8_failed; @@ -4669,7 +4669,7 @@ static int max86915_power_ctrl(struct max86915_device_data *data, int onoff) regulator_vdd_1p8 = regulator_get(&data->client->dev, data->vdd_1p8); if (IS_ERR(regulator_vdd_1p8) || regulator_vdd_1p8 == NULL) { - HRM_dbg("%s - get vdd_1p8 regulator failed, %d\n", __func__, PTR_ERR(regulator_vdd_1p8)); + HRM_dbg("%s - get vdd_1p8 regulator failed, %ld\n", __func__, PTR_ERR(regulator_vdd_1p8)); regulator_vdd_1p8 = NULL; goto get_vdd_1p8_failed; } From 82b95690bf6707a749898ab88c434032098bb7a5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:46:34 +0300 Subject: [PATCH 098/439] drivers/sensorhub/brcm/factory/magnetic_common: fix printk format errors Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/factory/magnetic_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/factory/magnetic_common.c b/drivers/sensorhub/brcm/factory/magnetic_common.c index 94ab8f709bdf..799123258396 100644 --- a/drivers/sensorhub/brcm/factory/magnetic_common.c +++ b/drivers/sensorhub/brcm/factory/magnetic_common.c @@ -821,7 +821,7 @@ int load_magnetic_cal_param_from_nvm(u8 *data, u8 length) cal_filp = filp_open(MAG_CAL_PARAM_FILE_PATH, O_CREAT | O_RDONLY | O_NOFOLLOW | O_NONBLOCK, 0660); if (IS_ERR(cal_filp)) { - pr_err("[SSP] %s: filp_open failed, errno = %d\n", __func__, PTR_ERR(cal_filp)); + pr_err("[SSP] %s: filp_open failed, errno = %ld\n", __func__, PTR_ERR(cal_filp)); set_fs(old_fs); iRet = PTR_ERR(cal_filp); From d001d73d4ecebfe622709430116c5a994be37b54 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:48:04 +0300 Subject: [PATCH 099/439] drivers/sensorhub/brcm/sx9330: fix printk format errors Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/sx9330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index 8a1d629eb246..f83bab018699 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -850,7 +850,7 @@ static ssize_t sx9330_avgthresh_show(struct device *dev, (1 << (4 + MAIN_SENSOR)), &avgthresh); avgthresh = (avgthresh & 0x3F000000) >> 24; - return snprintf(buf, PAGE_SIZE, "%ld\n", 16384 * avgthresh); + return snprintf(buf, PAGE_SIZE, "%u\n", 16384 * avgthresh); } static ssize_t sx9330_rawfilt_show(struct device *dev, From 497a6a79096a771d63d6bfbecdc318914f23b92e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 08:11:30 +0300 Subject: [PATCH 100/439] drivers/sensorhub/brcm/ssp_dev: fix printk format errors Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/ssp_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp_dev.c b/drivers/sensorhub/brcm/ssp_dev.c index 61cf2cdeb6bc..80d33ab39108 100644 --- a/drivers/sensorhub/brcm/ssp_dev.c +++ b/drivers/sensorhub/brcm/ssp_dev.c @@ -805,7 +805,7 @@ static int panel_notifier_callback(struct notifier_block *self, unsigned long ev pr_info("[SSP] %s PANEL_EVENT_BL_CHANGED %d %d\n", __func__, evdata->brightness, evdata->aor_ratio); } else { - pr_info("[SSP] %s unknown event %d\n", __func__, event); + pr_info("[SSP] %s unknown event %lu\n", __func__, event); } // store these values for reset From dcbce08985d2ff19e7ed851931f548691713dbd3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 08:11:55 +0300 Subject: [PATCH 101/439] drivers/video/fbdev/exynos/dpu20/decon: fix printk format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/decon.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon.h b/drivers/video/fbdev/exynos/dpu20/decon.h index d7a71fa26f36..da5f47fa244b 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon.h +++ b/drivers/video/fbdev/exynos/dpu20/decon.h @@ -1621,7 +1621,7 @@ static inline int decon_doze_wake_lock(struct decon_device *decon, usleep_range(1000, 1100); if (time_is_before_jiffies(timeout_jiffies)) { - decon_err("%s timeout(elapsed %d msec)\n", + decon_err("%s timeout(elapsed %lu msec)\n", __func__, timeout); } } From 44c2611bb1367cb1334d003b44f2ce5308c53e20 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 07:54:20 +0300 Subject: [PATCH 102/439] drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis: fix printk format errors Signed-off-by: Denis Efremov --- .../exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c b/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c index ccc4ce4242a6..e17503576a8f 100644 --- a/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c +++ b/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c @@ -645,7 +645,7 @@ int sensor_cis_set_initial_exposure(struct v4l2_subdev *subdev) if (cis->use_initial_ae) { cis->init_ae_setting = cis->last_ae_setting; - dbg_sensor(1, "[MOD:D:%d] %s short(exp:%d/again:%d/dgain:%d), long(exp:%d/again:%d/dgain:%d)\n", + dbg_sensor(1, "[MOD:D:%d] %s short(exp:%llu/again:%d/dgain:%d), long(exp:%llu/again:%d/dgain:%d)\n", cis->id, __func__, cis->init_ae_setting.exposure, cis->init_ae_setting.analog_gain, cis->init_ae_setting.digital_gain, cis->init_ae_setting.long_exposure, cis->init_ae_setting.long_analog_gain, cis->init_ae_setting.long_digital_gain); From 1bdbe94961b10f4f9682e6c42c771d56bb53024e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 08:13:13 +0300 Subject: [PATCH 103/439] drivers/rtc/class: fix printk format errors Signed-off-by: Denis Efremov --- drivers/rtc/class.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 65a4e9840646..fdd38df38d38 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -142,7 +142,7 @@ static int rtc_resume(struct device *dev) if (sleep_time.tv_sec >= 0) timekeeping_inject_sleeptime64(&sleep_time); else - pm_deferred_pr_dbg("rtc: suspended for 0.000 seconds (%lld)\n", + pm_deferred_pr_dbg("rtc: suspended for 0.000 seconds (%ld)\n", sleep_time.tv_sec); rtc_hctosys_ret = 0; return 0; From 2c95c0d186944565563422fde1a94ad88f9920b1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 00:17:48 +0300 Subject: [PATCH 104/439] block/blk-crypt: fix printk format errors Signed-off-by: Denis Efremov --- block/blk-crypt-fmp.c | 4 ++-- block/blk-crypt.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/block/blk-crypt-fmp.c b/block/blk-crypt-fmp.c index c4653d8ee861..07032ad19792 100644 --- a/block/blk-crypt-fmp.c +++ b/block/blk-crypt-fmp.c @@ -31,7 +31,7 @@ static void *blk_crypt_fmp_alloc_aes_xts(void) } if (IS_ERR(bctx)) { - pr_debug("error allocating diskciher '%s' transform: %d", + pr_debug("error allocating diskciher '%s' transform: %ld", cipher_str, PTR_ERR(bctx)); return bctx; } @@ -68,7 +68,7 @@ static int __init blk_crypt_alg_fmp_init(void) blk_crypt_handle = blk_crypt_alg_register(NULL, "xts(aes)", BLK_CRYPT_MODE_INLINE_PRIVATE, &fmp_hw_xts_cbs); if (IS_ERR(blk_crypt_handle)) { - pr_err("%s: failed to register alg(xts(aes)), err:%d\n", + pr_err("%s: failed to register alg(xts(aes)), err:%ld\n", __func__, PTR_ERR(blk_crypt_handle) ); blk_crypt_handle = NULL; } diff --git a/block/blk-crypt.c b/block/blk-crypt.c index 399135cfbe03..94fd159a88a7 100644 --- a/block/blk-crypt.c +++ b/block/blk-crypt.c @@ -163,7 +163,7 @@ blk_crypt_t *blk_crypt_get_context(struct block_device *bdev, const char *cipher struct blk_crypt_t *bctx; bctx = blk_crypt_alloc_context(bdev, cipher_str); if (IS_ERR(bctx)) { - pr_debug("error allocating diskciher '%s' err: %d", + pr_debug("error allocating diskciher '%s' err: %ld", cipher_str, PTR_ERR(bctx)); return bctx; } From f76a9437b6f793dc3996662815eaee6130a086e3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 00:20:01 +0300 Subject: [PATCH 105/439] fs/crypto/keyinfo: fix printk format errors Signed-off-by: Denis Efremov --- fs/crypto/keyinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 7279c9ef582b..fd763579eb21 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -319,7 +319,7 @@ static int prepare_inline_encryption(struct super_block *sb, struct fscrypt_info cipher_str = mode->cipher_str + INLINE_PREFIX_LEN; bctx = blk_crypt_get_context(bdev, cipher_str); if (IS_ERR(bctx)) { - pr_err("%s : failed to get blk_crypt context (transform: %s, err: %d)", + pr_err("%s : failed to get blk_crypt context (transform: %s, err: %ld)", __func__, cipher_str, PTR_ERR(bctx)); return PTR_ERR(bctx); } From f13cb058a20bfa1c315d5d70012df1eb37a783fd Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 00:21:23 +0300 Subject: [PATCH 106/439] security/sdp/dek: fix printk format errors Signed-off-by: Denis Efremov --- security/sdp/dek.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/security/sdp/dek.c b/security/sdp/dek.c index ceb619511d08..b5bc7429e148 100644 --- a/security/sdp/dek.c +++ b/security/sdp/dek.c @@ -1093,7 +1093,7 @@ static long dek_do_ioctl_req(unsigned int minor, unsigned int cmd, memset(tempPlain_dek->buf, 0, DEK_MAXLEN); if (ret < 0) { - DEK_LOGE("DEK_ENCRYPT_DEK: failed to encrypt dek! (err:%d)\n", ret); + DEK_LOGE("DEK_ENCRYPT_DEK: failed to encrypt dek! (err:%ld)\n", ret); zero_out((char *)&req, sizeof(dek_arg_encrypt_dek)); kzfree(tempPlain_dek); kzfree(tempEnc_dek); @@ -1162,7 +1162,7 @@ static long dek_do_ioctl_req(unsigned int minor, unsigned int cmd, tempEnc_dek, tempPlain_dek); if (ret < 0) { - DEK_LOGE("DEK_DECRYPT_DEK: failed to decrypt dek! (err:%d)\n", ret); + DEK_LOGE("DEK_DECRYPT_DEK: failed to decrypt dek! (err:%ld)\n", ret); zero_out((char *)&req, sizeof(dek_arg_decrypt_dek)); kzfree(tempPlain_dek); kzfree(tempEnc_dek); From 9fd2ff3fed43a8268bc6d1fc9ea8dc2befe48b26 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 00:21:51 +0300 Subject: [PATCH 107/439] security/sdp/dd_kernel_crypto: fix printk format errors Signed-off-by: Denis Efremov --- security/sdp/dd_kernel_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/sdp/dd_kernel_crypto.c b/security/sdp/dd_kernel_crypto.c index eeea5d21f541..c6aa2a066bbd 100644 --- a/security/sdp/dd_kernel_crypto.c +++ b/security/sdp/dd_kernel_crypto.c @@ -670,6 +670,6 @@ void dd_hex_key_dump(const char* tag, uint8_t *data, size_t data_len) } buf[buf_len - 1] = '\0'; printk(KERN_ERR - "[%s] %s(len=%d) : %s\n", "DEK_DBG", tag, data_len, buf); + "[%s] %s(len=%zu) : %s\n", "DEK_DBG", tag, data_len, buf); kfree(buf); } From 3a9fbb4ccc0c50aa7042f3d4ec88afda4f7d791d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 00:23:05 +0300 Subject: [PATCH 108/439] fs/crypto/sdp/sdp_dek: fix printk format errors Signed-off-by: Denis Efremov --- fs/crypto/sdp/sdp_dek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/crypto/sdp/sdp_dek.c b/fs/crypto/sdp/sdp_dek.c index f39720526f0d..a3b436cec497 100644 --- a/fs/crypto/sdp/sdp_dek.c +++ b/fs/crypto/sdp/sdp_dek.c @@ -85,7 +85,7 @@ void dump_file_key_hex(const char* tag, uint8_t *data, size_t data_len) } buf[buf_len - 1] = '\0'; printk(KERN_ERR - "[%s] %s(len=%d) : %s\n", "DEK_DBG", tag, data_len, buf); + "[%s] %s(len=%zu) : %s\n", "DEK_DBG", tag, data_len, buf); kfree(buf); } From 91376bff104bc55fbff3e2455183381000c3b8b3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 00:24:01 +0300 Subject: [PATCH 109/439] mm/page_alloc: fix printk format errors Signed-off-by: Denis Efremov --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5fb2136b90b8..c7ae28b60071 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4178,14 +4178,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, a_file += node_page_state(pgdat, NR_ACTIVE_FILE); in_file += node_page_state(pgdat, NR_INACTIVE_FILE); } - pr_info("alloc stall: timeJS(ms):%u|%u rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB\n", + pr_info("alloc stall: timeJS(ms):%u|%llu rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB\n", jiffies_to_msecs(jiffies - jiffies_s), stime_d / NSEC_PER_MSEC, did_some_progress, pages_reclaimed, retry_loop_count, order, gfp_mask, &gfp_mask, a_anon << (PAGE_SHIFT-10), in_anon << (PAGE_SHIFT-10), a_file << (PAGE_SHIFT-10), in_file << (PAGE_SHIFT-10)); - ologk("alloc stall: timeJS(ms):%u|%u rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB", + ologk("alloc stall: timeJS(ms):%u|%llu rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB", jiffies_to_msecs(jiffies - jiffies_s), stime_d / NSEC_PER_MSEC, did_some_progress, pages_reclaimed, retry_loop_count, From bd5af929381351bca60095a5727ef76599aa3f6c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 02:19:46 +0300 Subject: [PATCH 110/439] drivers/media/platform/exynos/fimc-is2/fimc-is-core: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/fimc-is2/fimc-is-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c index ae57fd041af1..a03d6269c6be 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c @@ -236,7 +236,7 @@ static int fimc_is_secure_face(struct fimc_is_core *core, if (ret != 0) { err("[SMC] SMC_SECCAM_PREPARE fail(%d)", ret); } else { - info("[SMC] Call SMC_SECCAM_PREPARE ret(%d) / state(%d->%d)\n", + info("[SMC] Call SMC_SECCAM_PREPARE ret(%d) / state(%lu->%d)\n", ret, core->secure_state, FIMC_IS_STATE_SECURED); core->secure_state = FIMC_IS_STATE_SECURED; } @@ -252,7 +252,7 @@ static int fimc_is_secure_face(struct fimc_is_core *core, if (ret != 0) { err("[SMC] SMC_SECCAM_UNPREPARE fail(%d)\n", ret); } else { - info("[SMC] Call SMC_SECCAM_UNPREPARE ret(%d) / smc_state(%d->%d)\n", + info("[SMC] Call SMC_SECCAM_UNPREPARE ret(%d) / smc_state(%lu->%d)\n", ret, core->secure_state, FIMC_IS_STATE_UNSECURE); core->secure_state = FIMC_IS_STATE_UNSECURE; } From e50ceaafe0c97e3dea2d9248b4dc9bf48de78973 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 02:30:22 +0300 Subject: [PATCH 111/439] drivers/media/platform/exynos/fimc-is2/fimc-is-binary: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c index ef3ff6c8de85..f69bf1673bb2 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c @@ -341,14 +341,14 @@ int carve_binary_version(enum is_bin_type type, unsigned int hint, void *data, s ofs = size - bin_ver_info[type].offset; if (ofs <= 0) { - pr_warn("out of range offset(size: %d <= offset: %d)\n", size, + pr_warn("out of range offset(size: %zu <= offset: %d)\n", size, bin_ver_info[type].offset); return -EINVAL; } len = bin_ver_info[type].length; if ((ofs + len) > size) { - pr_warn("too long version length (binary: %d < version: %d)\n", + pr_warn("too long version length (binary: %zu < version: %d)\n", size, (ofs + len)); len -= ((ofs + len) - size); } From c4895dc1322a97535690cc4c936d02b576bfc26c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 02:31:12 +0300 Subject: [PATCH 112/439] drivers/media/platform/exynos/jsqz/jsqz-core: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/jsqz/jsqz-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/jsqz/jsqz-core.c b/drivers/media/platform/exynos/jsqz/jsqz-core.c index e2ab043a497e..9cd6a77334f4 100644 --- a/drivers/media/platform/exynos/jsqz/jsqz-core.c +++ b/drivers/media/platform/exynos/jsqz/jsqz-core.c @@ -1390,7 +1390,7 @@ static long jsqz_ioctl(struct file *filp, return ret; } default: - dev_err(jsqz_device->dev, "%s: Unknown ioctl cmd %x, %x\n", + dev_err(jsqz_device->dev, "%s: Unknown ioctl cmd %x, %lx\n", __func__, cmd, HWJSQZ_IOC_PROCESS); return -EINVAL; } From ff9fa203de8c051e1994c08a8105e074d721f7bd Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 02:32:06 +0300 Subject: [PATCH 113/439] drivers/media/platform/exynos/mfc/mfc_qos: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/mfc/mfc_qos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/mfc/mfc_qos.c b/drivers/media/platform/exynos/mfc/mfc_qos.c index ae07680f3ac8..d2a82ff2f49d 100644 --- a/drivers/media/platform/exynos/mfc/mfc_qos.c +++ b/drivers/media/platform/exynos/mfc/mfc_qos.c @@ -521,7 +521,7 @@ static int __mfc_qos_get_freq_by_bps(struct mfc_dev *dev, unsigned long total_bp int i; if (total_bps > dev->pdata->max_Kbps[0]) { - mfc_debug(4, "[QoS] overspec bps %d > %d\n", + mfc_debug(4, "[QoS] overspec bps %lu > %d\n", total_bps, dev->pdata->max_Kbps[0]); return dev->bitrate_table[dev->pdata->num_mfc_freq - 1].mfc_freq; } @@ -992,7 +992,7 @@ static int __mfc_qos_get_bps_section(struct mfc_ctx *ctx, u32 bytesused) /* Standardization to high bitrate spec */ if (!CODEC_HIGH_PERF(ctx)) ctx->Kbps = dev->bps_ratio * ctx->Kbps; - mfc_debug(3, "[QoS] %d Kbps, average %lld Kbits per frame\n", ctx->Kbps, avg_Kbits); + mfc_debug(3, "[QoS] %d Kbps, average %lu Kbits per frame\n", ctx->Kbps, avg_Kbits); ctx->bitrate_index++; if (ctx->bitrate_index == MAX_TIME_INDEX) { From d46fd1eda139fb0e7186f0f9b6eea7a1f94b1513 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:49:51 +0300 Subject: [PATCH 114/439] drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr: fix printk format errors Signed-off-by: Denis Efremov --- .../media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c index 519d81fdd441..af057519206d 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c @@ -343,7 +343,7 @@ static int fimc_is_resourcemgr_allocmem(struct fimc_is_resourcemgr *resourcemgr) } minfo->total_size += minfo->pb_taaisp->size; - info("[RSC] TAAISP_DMA memory size (aligned) : %08lx\n", TAAISP_DMA_SIZE); + info("[RSC] TAAISP_DMA memory size (aligned) : %08lx\n", (unsigned long) TAAISP_DMA_SIZE); /* ME/DRC buffer */ #if (MEDRC_DMA_SIZE > 0) @@ -354,7 +354,7 @@ static int fimc_is_resourcemgr_allocmem(struct fimc_is_resourcemgr *resourcemgr) return -ENOMEM; } - info("[RSC] ME_DRC memory size (aligned) : %08lx\n", MEDRC_DMA_SIZE); + info("[RSC] ME_DRC memory size (aligned) : %08lx\n", (unsigned long) MEDRC_DMA_SIZE); minfo->total_size += minfo->pb_medrc->size; #endif @@ -623,7 +623,7 @@ static int fimc_is_resourcemgr_alloc_secure_mem(struct fimc_is_resourcemgr *reso return -ENOMEM; } - info("[RSC] TAAISP_DMA_S memory size (aligned) : %08lx\n", TAAISP_DMA_SIZE); + info("[RSC] TAAISP_DMA_S memory size (aligned) : %08lx\n", (unsigned long) TAAISP_DMA_SIZE); /* ME/DRC buffer */ #if (MEDRC_DMA_SIZE > 0) @@ -636,7 +636,7 @@ static int fimc_is_resourcemgr_alloc_secure_mem(struct fimc_is_resourcemgr *reso return -ENOMEM; } - info("[RSC] ME_DRC_S memory size (aligned) : %08lx\n", MEDRC_DMA_SIZE); + info("[RSC] ME_DRC_S memory size (aligned) : %08lx\n", (unsigned long) MEDRC_DMA_SIZE); #endif return 0; From dfeeedcfd6173dd1fd4fdc5124dddb37263da891 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 18:22:34 +0300 Subject: [PATCH 115/439] lib/debug-snapshot: fix printk format errors Signed-off-by: Denis Efremov --- lib/debug-snapshot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/debug-snapshot.c b/lib/debug-snapshot.c index 3078480e4a13..bb5faf1869f1 100644 --- a/lib/debug-snapshot.c +++ b/lib/debug-snapshot.c @@ -493,7 +493,7 @@ static int dbg_snapshot_sfr_dump_init(struct device_node *np) static int __init dbg_snapshot_remap(void) { - unsigned long i, j; + size_t i, j; unsigned long flags = VM_NO_GUARD | VM_MAP; unsigned int enabled_count = 0; pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -508,7 +508,7 @@ static int __init dbg_snapshot_remap(void) page_size = dss_items[i].entry.size / PAGE_SIZE; pages = kzalloc(sizeof(struct page *) * page_size, GFP_KERNEL); page = phys_to_page(dss_items[i].entry.paddr); - pr_info("%s: %2d: paddr: 0x%x\n", __func__, i, dss_items[i].entry.paddr); + pr_info("%s: %2zu: paddr: 0x%lx\n", __func__, i, dss_items[i].entry.paddr); for (j = 0; j < page_size; j++) pages[j] = page++; From 8e0213fb5c4b1a7f306a7e41872059ef7ea8b00a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:47:30 +0300 Subject: [PATCH 116/439] drivers/ccic/max77705_pd: fix printk format errors Signed-off-by: Denis Efremov --- drivers/ccic/max77705_pd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/ccic/max77705_pd.c b/drivers/ccic/max77705_pd.c index 9323e3462322..4af6bc1dd884 100644 --- a/drivers/ccic/max77705_pd.c +++ b/drivers/ccic/max77705_pd.c @@ -611,7 +611,7 @@ void max77705_pdo_list(struct max77705_usbc_platform_data *usbc_data, unsigned c } if (usbc_data->pd_data->pdo_list && do_power_nego) { - pr_info("%s : PDO list is changed, so power negotiation is need\n", + pr_info("%s : PDO list is changed selected_pdo_num(%d), so power negotiation is need\n", __func__, pd_noti.sink_status.selected_pdo_num); pd_noti.sink_status.selected_pdo_num = 0; pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP; @@ -706,7 +706,7 @@ void max77705_current_pdo(struct max77705_usbc_platform_data *usbc_data, unsigne pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK; if (usbc_data->pd_data->pdo_list && do_power_nego) { - pr_info("%s : PDO list is changed, so power negotiation is need\n", + pr_info("%s : PDO list is changed selected_pdo_num(%d), so power negotiation is need\n", __func__, pd_noti.sink_status.selected_pdo_num); pd_noti.sink_status.selected_pdo_num = 0; pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP; @@ -795,7 +795,7 @@ void max77705_current_pdo(struct max77705_usbc_platform_data *usbc_data, unsigne } if (usbc_data->pd_data->pdo_list && do_power_nego) { - pr_info("%s : PDO list is changed, so power negotiation is need\n", + pr_info("%s : PDO list is changed selected_pdo_num(%d), so power negotiation is need\n", __func__, pd_noti.sink_status.selected_pdo_num); pd_noti.sink_status.selected_pdo_num = 0; pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP; From c66af9c3e27467a21cb17c1441d69bdc5627ccdc Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:49:58 +0300 Subject: [PATCH 117/439] drivers/motor/cs40l2x: fix printk format errors Signed-off-by: Denis Efremov --- drivers/motor/cs40l2x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/motor/cs40l2x.c b/drivers/motor/cs40l2x.c index 58873f078b12..092b05e61d28 100644 --- a/drivers/motor/cs40l2x.c +++ b/drivers/motor/cs40l2x.c @@ -3353,7 +3353,7 @@ static ssize_t cs40l2x_motor_type_show(struct device *dev, static ssize_t cs40l2x_event_cmd_show(struct device *dev, struct device_attribute *attr, char *buf) { - pr_info("%s: [%d] %s\n", __func__, sec_prev_event_cmd); + pr_info("%s: %s\n", __func__, sec_prev_event_cmd); return snprintf(buf, MAX_STR_LEN_EVENT_CMD, "%s\n", sec_prev_event_cmd); } From 0b6363a912cd3bf9a2ab600e9b5e30c8c48b98e0 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 13 Sep 2020 23:54:40 +0300 Subject: [PATCH 118/439] drivers/samsung/sec_dump_sink: fix printk format errors Signed-off-by: Denis Efremov --- drivers/samsung/sec_dump_sink.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/samsung/sec_dump_sink.c b/drivers/samsung/sec_dump_sink.c index 8ccde8d8a7fd..167e0822e598 100644 --- a/drivers/samsung/sec_dump_sink.c +++ b/drivers/samsung/sec_dump_sink.c @@ -58,7 +58,7 @@ static void sec_free_rdx_bootdev(phys_addr_t paddr, u64 size) unsigned long pfn_start, pfn_end, pfn_idx; int ret; - pr_info("start (0x%p, 0x%llx)\n", paddr, size); + pr_info("start (0x%p, 0x%llx)\n", (void *)paddr, size); if (!sec_rdx_bootdev_paddr) { pr_err("reserved addr is null\n"); @@ -128,7 +128,7 @@ static ssize_t sec_rdx_bootdev_proc_write(struct file *file, err = -ENODEV; } else { if (count > sec_rdx_bootdev_size) { - pr_err("size is wrong %llu > %llu\n", count, sec_rdx_bootdev_size); + pr_err("size is wrong %lu > %u\n", count, sec_rdx_bootdev_size); err = -EINVAL; goto out; } From c4dd08e160346192290d69ae33458b420b2cb6dd Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:43:19 +0300 Subject: [PATCH 119/439] drivers/scsi/scsi_lib: fix printk format errors Signed-off-by: Denis Efremov --- drivers/scsi/scsi_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b622637ac9df..ea5d94ce63df 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1462,7 +1462,7 @@ void scsi_alloc_tw(struct scsi_device *sdev) blk_alloc_turbo_write(sdev->request_queue); blk_register_tw_try_on_fn(sdev->request_queue, scsi_tw_try_on_fn); blk_register_tw_try_off_fn(sdev->request_queue, scsi_tw_try_off_fn); - printk(KERN_INFO "%s: register scsi ufs tw interface for LU %d\n", + printk(KERN_INFO "%s: register scsi ufs tw interface for LU %llu\n", __func__, sdev->lun); } } From 070ffab775dfe50138f1a562406a2b22d0bb6678 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:43:54 +0300 Subject: [PATCH 120/439] drivers/scsi/ufs/ufshcd: fix printk format errors Signed-off-by: Denis Efremov --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 93a440212360..21af2b4bce2f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5549,7 +5549,7 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) if (dLUNumTurboWriteBufferAllocUnits) { sdev->support_tw_lu = true; - dev_info(hba->dev, "%s: LU %d supports tw, twbuf unit : 0x%x\n", + dev_info(hba->dev, "%s: LU %llu supports tw, twbuf unit : 0x%x\n", __func__, sdev->lun, dLUNumTurboWriteBufferAllocUnits); } else sdev->support_tw_lu = false; From df819d081183f8dd92739a341c47768778d779df Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:45:54 +0300 Subject: [PATCH 121/439] drivers/sensorhub/brcm/bbdpl/bbd: fix printk format errors Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/bbdpl/bbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/bbdpl/bbd.c b/drivers/sensorhub/brcm/bbdpl/bbd.c index a103214c9a04..d29857429971 100644 --- a/drivers/sensorhub/brcm/bbdpl/bbd.c +++ b/drivers/sensorhub/brcm/bbdpl/bbd.c @@ -924,7 +924,7 @@ ssize_t bbd_urgent_patch_read(struct file *user_filp, char __user *buf, size_t s rd_size = urgent_patch_size - offset; // 02-3. read requested size of urget_patch - pr_info("[SSPBBD] %s : download in progress (%d/%d)", __func__, offset + rd_size, urgent_patch_size); + pr_info("[SSPBBD] %s : download in progress (%lu/%d)", __func__, offset + rd_size, urgent_patch_size); if(copy_to_user(buf, (void *)(urgent_buffer + offset), rd_size)) { pr_info("[SSPBBD] %s : copy to user from urgent_buffer", __func__); From bd8655ba6cbc63e7e13447f3f7e3dadf770efd1f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:50:59 +0300 Subject: [PATCH 122/439] drivers/soc/samsung/debug: fix printk format errors Signed-off-by: Denis Efremov --- drivers/soc/samsung/debug/exynos-ehld.c | 14 +++++++------- drivers/soc/samsung/debug/exynos-helper.c | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/soc/samsung/debug/exynos-ehld.c b/drivers/soc/samsung/debug/exynos-ehld.c index 8da790bfe836..1dc2a1fc8b31 100644 --- a/drivers/soc/samsung/debug/exynos-ehld.c +++ b/drivers/soc/samsung/debug/exynos-ehld.c @@ -98,8 +98,8 @@ static int exynos_ehld_start_cpu(unsigned int cpu) event = perf_event_create_kernel_counter(&exynos_ehld_attr, cpu, NULL, exynos_ehld_callback, NULL); if (IS_ERR(event)) { - ehld_printk(0, "@%s: cpu%d event make failed err:%d\n", - __func__, cpu, (int)event); + ehld_printk(0, "@%s: cpu%d event make failed err: %ld\n", + __func__, cpu, PTR_ERR(event)); return PTR_ERR(event); } else { ehld_printk(0, "@%s: cpu%d event make success\n", __func__, cpu); @@ -142,7 +142,7 @@ unsigned long long exynos_ehld_event_read_cpu(int cpu) if (!in_irq() && event) { total = perf_event_read_value(event, &enabled, &running); - ehld_printk(0, "%s: cpu%d - enabled: %zx, running: %zx, total: %zx\n", + ehld_printk(0, "%s: cpu%d - enabled: %llx, running: %llx, total: %llx\n", __func__, cpu, enabled, running, total); } return total; @@ -170,12 +170,12 @@ void exynos_ehld_event_raw_update_allcpu(void) data->time[count] = cpu_clock(cpu); if (cpu_is_offline(cpu) || !exynos_cpu.power_state(cpu) || !ctrl->ehld_running) { - ehld_printk(0, "%s: cpu%d is turned off : running:%x, power:%x, offline:%x\n", + ehld_printk(0, "%s: cpu%d is turned off : running:%x, power:%x, offline:%lx\n", __func__, cpu, ctrl->ehld_running, exynos_cpu.power_state(cpu), cpu_is_offline(cpu)); data->event[count] = 0xC2; data->pmpcsr[count] = 0; } else { - ehld_printk(0, "%s: cpu%d is turned on : running:%x, power:%x, offline:%x\n", + ehld_printk(0, "%s: cpu%d is turned on : running:%x, power:%x, offline:%lx\n", __func__, cpu, ctrl->ehld_running, exynos_cpu.power_state(cpu), cpu_is_offline(cpu)); DBG_UNLOCK(ctrl->dbg_base + PMU_OFFSET); val = __raw_readq(ctrl->dbg_base + PMU_OFFSET + PMUPCSR); @@ -186,7 +186,7 @@ void exynos_ehld_event_raw_update_allcpu(void) DBG_LOCK(ctrl->dbg_base + PMU_OFFSET); } raw_spin_unlock_irqrestore(&ctrl->lock, flags); - ehld_printk(0, "%s: cpu%d - time:%llu, event:0x%x\n", + ehld_printk(0, "%s: cpu%d - time:%llu, event:0x%llx\n", __func__, cpu, data->time[count], data->event[count]); } } @@ -444,7 +444,7 @@ static int exynos_ehld_init_dt_parse(struct device_node *np) return -ENOMEM; } - ehld_printk(1, "exynos-ehld: cpu#%d, cs_base:0x%x, dbg_base:0x%x, total:0x%x, ioremap:0x%x\n", + ehld_printk(1, "exynos-ehld: cpu#%d, cs_base:0x%x, dbg_base:0x%x, total:0x%x, ioremap:0x%lx\n", cpu, base, offset, ehld_main.cs_base + offset, (unsigned long)ctrl->dbg_base); } diff --git a/drivers/soc/samsung/debug/exynos-helper.c b/drivers/soc/samsung/debug/exynos-helper.c index dd2dcac9c64b..db7b58d147a3 100644 --- a/drivers/soc/samsung/debug/exynos-helper.c +++ b/drivers/soc/samsung/debug/exynos-helper.c @@ -200,7 +200,7 @@ void exynos_err_parse(u32 reg_idx, u64 reg, struct err_variant_data *exynos_cpu_ valid = reg & BIT(exynos_cpu_err->valid_bit); if (!valid) { - pr_emerg("%s valid_bit(%d) is NOT set (0x%lx)\n", + pr_emerg("%s valid_bit(%d) is NOT set (0x%llx)\n", exynos_cpu_err->reg_name, exynos_cpu_err->valid_bit, valid); return; } @@ -216,7 +216,7 @@ void exynos_err_parse(u32 reg_idx, u64 reg, struct err_variant_data *exynos_cpu_ field = (reg & GENMASK_ULL(fld_end, fld_offset)) >> fld_offset; if (field != 0) - pr_emerg("%s (%d:%d) %s 0x%lx\n", + pr_emerg("%s (%d:%d) %s 0x%x\n", exynos_cpu_err->reg_name, fld_end, fld_offset, variant[i].fld_name, field); From b3cab9105c4f2fd220bb5399b85412b760c01243 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 11 Sep 2020 22:58:25 +0300 Subject: [PATCH 123/439] arch/arm64/mm/fault: fix printk format errors Signed-off-by: Denis Efremov --- arch/arm64/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6b158b39af7d..90052a10f9b9 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -702,7 +702,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) inf = esr_to_fault_info(esr); - pr_auto(ASL1, "%s (0x%08x) at 0x%016lx[0x%09lx]\n", + pr_auto(ASL1, "%s (0x%08x) at 0x%016lx[0x%09llx]\n", inf->name, esr, addr, show_virt_to_phys(addr)); /* * Synchronous aborts may interrupt code which had interrupts masked. From 98a203a64dc814484bdde34b524b2f4de6b1e7ed Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 11 Sep 2020 23:02:25 +0300 Subject: [PATCH 124/439] drivers/media/platform/exynos/mfc/mfc_enc_ctrl: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c b/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c index 6412c57f920a..b95520c935fc 100644 --- a/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c +++ b/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c @@ -1157,7 +1157,7 @@ static void __mfc_enc_set_buf_ctrls_exception(struct mfc_ctx *ctx, value &= ~(0xFFFF); value |= (p->rc_frame_delta & 0xFFFF); MFC_WRITEL(value, MFC_REG_E_RC_FRAME_RATE); - mfc_debug(3, "[DROPCTRL] fps %d -> %d, delta: %d, reg: %#x\n", + mfc_debug(3, "[DROPCTRL] fps %d -> %ld, delta: %d, reg: %#x\n", p->rc_framerate, USEC_PER_SEC / ctx->ts_last_interval, p->rc_frame_delta, value); } @@ -1478,7 +1478,7 @@ static int mfc_enc_set_buf_ctrls_val_nal_q(struct mfc_ctx *ctx, pInStr->RcFrameRate &= ~(buf_ctrl->mask << buf_ctrl->shft); pInStr->RcFrameRate |= (p->rc_frame_delta & buf_ctrl->mask) << buf_ctrl->shft; - mfc_debug(3, "[NALQ][DROPCTRL] fps %d -> %d, delta: %d, reg: %#x\n", + mfc_debug(3, "[NALQ][DROPCTRL] fps %d -> %ld, delta: %d, reg: %#x\n", p->rc_framerate, USEC_PER_SEC / ctx->ts_last_interval, p->rc_frame_delta, pInStr->RcFrameRate); break; From b467dcf83796f378828d19b1541a3bc3f81d670b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 11 Sep 2020 23:04:52 +0300 Subject: [PATCH 125/439] drivers/media/platform/exynos/tsmux/tsmux_reg: fix printk format errors Signed-off-by: Denis Efremov --- drivers/media/platform/exynos/tsmux/tsmux_reg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/tsmux/tsmux_reg.c b/drivers/media/platform/exynos/tsmux/tsmux_reg.c index 5b04d6657a1a..e1f2fbdcab06 100644 --- a/drivers/media/platform/exynos/tsmux/tsmux_reg.c +++ b/drivers/media/platform/exynos/tsmux/tsmux_reg.c @@ -287,7 +287,7 @@ void tsmux_print_cmu_mfc_sfr(struct tsmux_device *tsmux_dev) { for (i = 0; i < tsmux_cmu_mfc_sfr_list_size; i++) { cmu_mfc_sfr = TSMUX_CMU_MFC_READL(tsmux_cmu_mfc_sfr_list[i].offset); - print_tsmux(TSMUX_SFR, "%.8x: %.8x: %.8x, %s\n", + print_tsmux(TSMUX_SFR, "%.8llx: %.8x: %.8x, %s\n", tsmux_cmu_mfc_sfr_list[i].base_pa, tsmux_cmu_mfc_sfr_list[i].offset, cmu_mfc_sfr, From 15a1b75d28fb424bde8ac3e91e0c1bbca510ceba Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 11 Sep 2020 23:06:47 +0300 Subject: [PATCH 126/439] fs/f2fs/inode: fix printk format errors Signed-off-by: Denis Efremov --- fs/f2fs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 07c99ecb87dc..c2ea7d70060c 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -430,7 +430,7 @@ static int do_read_inode(struct inode *inode) corrupted_inode: printk_ratelimited(KERN_ERR "F2FS-fs: On-disk inode is corrupted: " - "err: %ld, inode: %u, first Non-zero: %lu\n", + "err: %d, inode: %lu, first Non-zero: %lu\n", err, inode->i_ino, find_first_bit(page_address(node_page), F2FS_BLKSIZE)); print_block_data(sbi->sb, node_page->index, @@ -439,7 +439,7 @@ static int do_read_inode(struct inode *inode) if (unlikely(!ignore_fs_panic)) { f2fs_set_sb_extra_flag(sbi, F2FS_SEC_EXTRA_FSCK_MAGIC); #ifdef CONFIG_F2FS_STRICT_BUG_ON - panic("F2FS 0x%p %x", + panic("F2FS 0x%p %lx", page_address(node_page), find_first_bit(page_address(node_page), F2FS_BLKSIZE*8)); #else From da0378dc712fc504c6d47d28ca992eb9f6edf549 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:33:45 +0300 Subject: [PATCH 127/439] drivers/samsung/debug: fix printk format errors Signed-off-by: Denis Efremov --- drivers/samsung/debug/sec_debug.c | 2 +- drivers/samsung/debug/sec_debug_dtask.c | 2 +- drivers/samsung/debug/sec_debug_hist.c | 4 ++-- drivers/samsung/debug/sec_debug_init_log.c | 2 +- drivers/samsung/debug/sec_debug_test.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/samsung/debug/sec_debug.c b/drivers/samsung/debug/sec_debug.c index 6d644157da12..5be4a0afe015 100644 --- a/drivers/samsung/debug/sec_debug.c +++ b/drivers/samsung/debug/sec_debug.c @@ -891,7 +891,7 @@ static void sec_debug_set_essinfo(void) init_ess_info(index++, "empty"); for (index = 0; index < SD_NR_ESSINFO_ITEMS; index++) - printk("%s: key: %s offset: %llx nr: %x\n", __func__, + printk("%s: key: %s offset: %lx nr: %x\n", __func__, sdn->ss_info.item[index].key, sdn->ss_info.item[index].base, sdn->ss_info.item[index].nr); diff --git a/drivers/samsung/debug/sec_debug_dtask.c b/drivers/samsung/debug/sec_debug_dtask.c index 577519d8ab60..37767cfca0c4 100644 --- a/drivers/samsung/debug/sec_debug_dtask.c +++ b/drivers/samsung/debug/sec_debug_dtask.c @@ -25,7 +25,7 @@ static void sec_debug_print_mutex_info(struct task_struct *task, struct sec_debu pr_info("Mutex: %pS", wmutex); if (owner_task) { if (raw) - pr_cont(": owner[0x%lx %s :%d]", owner_task, owner_task->comm, owner_task->pid); + pr_cont(": owner[%p %s :%d]", owner_task, owner_task->comm, owner_task->pid); else pr_cont(": owner[%s :%d]", owner_task->comm, owner_task->pid); } diff --git a/drivers/samsung/debug/sec_debug_hist.c b/drivers/samsung/debug/sec_debug_hist.c index 3e88f40b7654..2fb02db8136e 100644 --- a/drivers/samsung/debug/sec_debug_hist.c +++ b/drivers/samsung/debug/sec_debug_hist.c @@ -42,7 +42,7 @@ static ssize_t sec_dhist_read(struct file *file, char __user *buf, } if (pos >= dhist_size) { - pr_crit("%s: pos %x , dhist: %x\n", __func__, pos, dhist_size); + pr_crit("%s: pos %lld, dhist: %x\n", __func__, pos, dhist_size); ret = 0; @@ -53,7 +53,7 @@ static ssize_t sec_dhist_read(struct file *file, char __user *buf, base = (char *)phys_to_virt((phys_addr_t)dhist_base); if (!base) { - pr_crit("%s: fail to get va (%llx)\n", __func__, dhist_base); + pr_crit("%s: fail to get va (%lx)\n", __func__, dhist_base); ret = -EFAULT; diff --git a/drivers/samsung/debug/sec_debug_init_log.c b/drivers/samsung/debug/sec_debug_init_log.c index 7b7c2553f3de..e99899ebdfef 100644 --- a/drivers/samsung/debug/sec_debug_init_log.c +++ b/drivers/samsung/debug/sec_debug_init_log.c @@ -50,7 +50,7 @@ static int __init sec_debug_init_init_log(void) buf_ptr = (char *)phys_to_virt((sec_debug_get_buf_base(SDN_MAP_INITTASK_LOG))); buf_size = sec_debug_get_buf_size(SDN_MAP_INITTASK_LOG); - pr_err("%s: buffer size 0x%llx at addr 0x%llx\n", __func__, buf_size ,buf_ptr); + pr_err("%s: buffer size 0x%lx at addr %p\n", __func__, buf_size ,buf_ptr); if (!buf_ptr || !buf_size) return 0; diff --git a/drivers/samsung/debug/sec_debug_test.c b/drivers/samsung/debug/sec_debug_test.c index 552449bcb020..96245ce6bf18 100644 --- a/drivers/samsung/debug/sec_debug_test.c +++ b/drivers/samsung/debug/sec_debug_test.c @@ -664,7 +664,7 @@ static void simulate_SAFEFAULT(char *arg) smp_call_function(simulate_ALLSPIN_LOCKUP_handler, NULL, 0); - pr_info("%s %p %s %d %p %p %llx\n", + pr_info("%s %p %s %d %p %p %lx\n", __func__, current, current->comm, current->pid, current_thread_info(), current->stack, current_stack_pointer); From 6d0e0c2fa24adbb8cef4d9e7e55e315aa9575620 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 10:56:50 +0300 Subject: [PATCH 128/439] drivers/soc/samsung: fix printk format errors Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos-ppmpu.c | 4 ++-- drivers/soc/samsung/exynos-sci.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/soc/samsung/exynos-ppmpu.c b/drivers/soc/samsung/exynos-ppmpu.c index d16b31f32282..533ae3db658b 100644 --- a/drivers/soc/samsung/exynos-ppmpu.c +++ b/drivers/soc/samsung/exynos-ppmpu.c @@ -231,7 +231,7 @@ static int exynos_ppmpu_probe(struct platform_device *pdev) break; case PPMPU_ERROR_INVALID_FAIL_INFO_SIZE: dev_err(data->dev, - "The size of struct ppmpu_fail_info(%#x) is invalid\n", + "The size of struct ppmpu_fail_info(%#lx) is invalid\n", sizeof(struct ppmpu_fail_info)); break; case SMC_CMD_CHECK_PPMPU_CH_NUM: @@ -277,7 +277,7 @@ static int exynos_ppmpu_probe(struct platform_device *pdev) "VA of ppmpu_fail_info : %lx\n", (unsigned long)data->fail_info); dev_dbg(data->dev, - "PA of ppmpu_fail_info : %lx\n", + "PA of ppmpu_fail_info : %llx\n", data->fail_info_pa); ret = of_property_read_u32(data->dev->of_node, "irqcnt", &data->irqcnt); diff --git a/drivers/soc/samsung/exynos-sci.c b/drivers/soc/samsung/exynos-sci.c index 9f3a7b2bd870..e5eeb5d6afd6 100644 --- a/drivers/soc/samsung/exynos-sci.c +++ b/drivers/soc/samsung/exynos-sci.c @@ -585,14 +585,14 @@ void sci_error_dump(void) exynos_sci_err_parse(SCI_ERRSTATHI, sci_reg); pr_info("SCI_ErrStatLo : %08x\n", sci_reg = __raw_readl(sci_base + SCI_ErrStatLo)); exynos_sci_err_parse(SCI_ERRSTATLO, sci_reg); - pr_info("SCI_ErrAddr(Hi,Lo): %08x %08x\n", + pr_info("SCI_ErrAddr(Hi,Lo): %08lx %08x\n", sci_reg_hi = __raw_readl(sci_base + SCI_ErrAddrHi), sci_reg = __raw_readl(sci_base + SCI_ErrAddrLo)); sci_reg_addr = sci_reg + (MSB_MASKING & (sci_reg_hi << 32L)); sci_ns = (ERR_NS & sci_reg_hi) >> 8; sci_err_inj = (ERR_INJ_DONE & sci_reg_hi) >> 31; - pr_info("SCI_ErrAddr : %016lx (NS:%d, ERR_INJ:%d)\n", sci_reg_addr, sci_err_inj); + pr_info("SCI_ErrAddr : %016lx (NS:%u, ERR_INJ:%u)\n", sci_reg_addr, sci_ns, sci_err_inj); exynos_dump_common_cpu_reg(); pr_info("============================================================\n"); } From bb138d1993fa95612d9a712eeaa3081023f0e31b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 11:46:38 +0300 Subject: [PATCH 129/439] drivers/video/fbdev/exynos/panel/panel_drv: fix printk format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/panel_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/exynos/panel/panel_drv.c b/drivers/video/fbdev/exynos/panel/panel_drv.c index 3c4465693b8e..e36ec272c3ba 100644 --- a/drivers/video/fbdev/exynos/panel/panel_drv.c +++ b/drivers/video/fbdev/exynos/panel/panel_drv.c @@ -2480,14 +2480,14 @@ static int of_get_panel_gpio(struct device_node *np, struct panel_gpio *gpio) if ((gpio->dir & GPIOF_DIR_IN) == GPIOF_DIR_OUT) { ret = gpio_request(gpio->num, gpio->name); if (ret < 0) { - panel_err("PANEL:ERR:%s:failed to request gpio(%s:%d)\n", + panel_err("PANEL:ERR:%s:failed to request gpio(%d:%s)\n", __func__, gpio->num, gpio->name); return ret; } } else { ret = gpio_request_one(gpio->num, GPIOF_IN, gpio->name); if (ret < 0) { - panel_err("PANEL:ERR:%s:failed to request gpio(%s:%d)\n", + panel_err("PANEL:ERR:%s:failed to request gpio(%d:%s)\n", __func__, gpio->num, gpio->name); return ret; } From 0c4fa1f98585236a68de8c453fbddfefdf72d1bf Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 12:14:04 +0300 Subject: [PATCH 130/439] sound/soc/samsung/abox/abox_cmpnt_v20: fix printk format errors Signed-off-by: Denis Efremov --- sound/soc/samsung/abox/abox_cmpnt_v20.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox_cmpnt_v20.c b/sound/soc/samsung/abox/abox_cmpnt_v20.c index c6203af533c4..064357a837be 100644 --- a/sound/soc/samsung/abox/abox_cmpnt_v20.c +++ b/sound/soc/samsung/abox/abox_cmpnt_v20.c @@ -2612,7 +2612,7 @@ static int asrc_update_tick(struct abox_data *data, int stream, int id) int ticknum, tickdiv; int i, res, ret = 0; - dev_dbg(dev, "%s(%d, %d, %ulHz)\n", __func__, stream, id, aclk); + dev_dbg(dev, "%s(%d, %d, %luHz)\n", __func__, stream, id, aclk); if (idx < 0) { dev_err(dev, "%s(%d, %d): invalid idx: %d\n", __func__, From 9e257d233c3e65a595570d642d9479aacf3089e3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 12:14:31 +0300 Subject: [PATCH 131/439] sound/soc/samsung/sec_audio_sysfs: fix printk format errors Signed-off-by: Denis Efremov --- sound/soc/samsung/sec_audio_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/samsung/sec_audio_sysfs.c b/sound/soc/samsung/sec_audio_sysfs.c index 814b037e1551..28994d5f0b1b 100644 --- a/sound/soc/samsung/sec_audio_sysfs.c +++ b/sound/soc/samsung/sec_audio_sysfs.c @@ -249,7 +249,7 @@ static int __init sec_audio_sysfs_init(void) audio_data->jack_dev = NULL; --dev_id; } else { - pr_info("%s: create earjack device id(%lu)\n", + pr_info("%s: create earjack device id(%u)\n", __func__, dev_id); audio_data->jack_dev_id = dev_id; } @@ -275,7 +275,7 @@ static int __init sec_audio_sysfs_init(void) audio_data->codec_dev = NULL; --dev_id; } else { - pr_info("%s: create codec device id(%lu)\n", + pr_info("%s: create codec device id(%u)\n", __func__, dev_id); audio_data->codec_dev_id = dev_id; } From 58e4db6bc803a68a4e7e8853b8eef10a131fccf1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 09:24:08 +0300 Subject: [PATCH 132/439] drivers/optics/tcs3407: fix printk format errors Signed-off-by: Denis Efremov --- drivers/optics/tcs3407.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/optics/tcs3407.c b/drivers/optics/tcs3407.c index c09280954711..08f87c5368f1 100644 --- a/drivers/optics/tcs3407.c +++ b/drivers/optics/tcs3407.c @@ -2392,7 +2392,7 @@ static int tcs3407_eol_mode(struct tcs3407_device_data *data) s2mpb02_led_en(S2MPB02_TORCH_LED_1, 0, S2MPB02_LED_TURN_WAY_GPIO); gpio_free(data->pin_led_en); } else { - ALS_dbg("%s - PWM torch set 0x%x 0x%x\n", __func__, data->pinctrl_pwm, data->pinctrl_out); + ALS_dbg("%s - PWM torch set 0x%p 0x%p\n", __func__, data->pinctrl_pwm, data->pinctrl_out); pinctrl_select_state(data->als_pinctrl, data->pinctrl_pwm); pwm_get_state(data->pwm, &state); @@ -2440,7 +2440,7 @@ static int tcs3407_eol_mode(struct tcs3407_device_data *data) pwm_apply_state(data->pwm, &state); - ALS_dbg("%s - pinctrl out = 0x%x\n", __func__, data->pinctrl_out); + ALS_dbg("%s - pinctrl out = 0x%p\n", __func__, data->pinctrl_out); pinctrl_select_state(data->als_pinctrl, data->pinctrl_out); } @@ -3054,14 +3054,14 @@ static int tcs3407_parse_dt(struct tcs3407_device_data *data) data->pinctrl_out = pinctrl_lookup_state(data->als_pinctrl, "torch_out"); if (IS_ERR(data->pinctrl_pwm) || IS_ERR(data->pinctrl_out)) { - ALS_err("%s - Failed to get pinctrl for pwm, %d %d\n", + ALS_err("%s - Failed to get pinctrl for pwm, %ld %ld\n", __func__, PTR_ERR(data->pinctrl_pwm), PTR_ERR(data->pinctrl_out)); data->pinctrl_pwm = NULL; data->pinctrl_out = NULL; } else { data->pwm = devm_of_pwm_get(dev, dNode, NULL); if (IS_ERR(data->pwm)) { - ALS_err("%s - unable to request PWM %d\n", __func__, PTR_ERR(data->pwm)); + ALS_err("%s - unable to request PWM %ld\n", __func__, PTR_ERR(data->pwm)); data->pwm = NULL; } } From 94e25ebd7cdb455d3c8e24ae0eb26cac646465ca Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:17:56 +0300 Subject: [PATCH 133/439] drivers/misc/mcu_ipc/shm_ipc: fix printk format errors Signed-off-by: Denis Efremov --- drivers/misc/mcu_ipc/shm_ipc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/misc/mcu_ipc/shm_ipc.c b/drivers/misc/mcu_ipc/shm_ipc.c index d427e987d433..641749bfa9ca 100644 --- a/drivers/misc/mcu_ipc/shm_ipc.c +++ b/drivers/misc/mcu_ipc/shm_ipc.c @@ -1031,16 +1031,16 @@ static int shm_probe(struct platform_device *pdev) pdata.p_acpm_addr, pdata.acpm_size); #ifdef CONFIG_LINK_DEVICE_PCIE - dev_info(dev, "msi_base=0x%08X msi_size=0x%08X\n", + dev_info(dev, "msi_base=0x%08lX msi_size=0x%08X\n", pdata.p_msi_addr, pdata.t_msi_size); #endif #ifdef CONFIG_SEC_SIPC_DUAL_MODEM_IF - dev_info(dev, "s5100_ipc_base=0x%08X s5100_ipc_size=0x%08X\n", + dev_info(dev, "s5100_ipc_base=0x%08lX s5100_ipc_size=0x%08X\n", pdata.p_s5100_ipc_addr, pdata.t_s5100_ipc_size); - dev_info(dev, "s5100_cp2cp_addr=0x%08X s5100_cp2cp_size=0x%08X s5100_cp2cp_offset=0x%08X\n", - pdata.p_s5100_ipc_addr, pdata.t_s5100_ipc_size, + dev_info(dev, "s5100_cp2cp_addr=0x%08lX s5100_cp2cp_size=0x%08X s5100_cp2cp_offset=0x%08X\n", + pdata.p_s5100_cp2cp_addr, pdata.t_s5100_cp2cp_size, pdata.s5100_cp2cp_off); #endif From 6e61612eb49c61bdcd3376bb857ffbe5a15070ed Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:20:05 +0300 Subject: [PATCH 134/439] drivers/misc/modem_v1_dual: fix printk format errors Signed-off-by: Denis Efremov --- drivers/misc/modem_v1_dual/link_device_shmem.c | 4 ++-- drivers/misc/modem_v1_dual/modem_ctrl_s5100.c | 2 +- drivers/misc/modem_v1_dual/s5100_pcie.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/misc/modem_v1_dual/link_device_shmem.c b/drivers/misc/modem_v1_dual/link_device_shmem.c index d90f212ba311..6ed6239ee8df 100644 --- a/drivers/misc/modem_v1_dual/link_device_shmem.c +++ b/drivers/misc/modem_v1_dual/link_device_shmem.c @@ -2453,7 +2453,7 @@ static int shmem_security_request(struct link_device *ld, struct io_device *iod, cp_init_done = 1; } - mif_err("mode=%lx, param2=0x%lx, param3=0x%lx, cp_base_addr=0x%lx\n", + mif_err("mode=%x, param2=0x%lx, param3=0x%lx, cp_base_addr=0x%lx\n", msr.mode, param2, param3, shm_get_phys_base()); err = exynos_smc(SMC_ID, msr.mode, param2, param3); @@ -2489,7 +2489,7 @@ static int shmem_security_cp2cp_baaw_request(struct link_device *ld, unsigned int cp2cp_size = shm_get_s5100_cp2cp_size(); unsigned int cp2cp_offset = shm_get_s5100_cp2cp_offset(); - mif_info("cp2cp_addr=0x%08X cp2cp_size=0x%08X cp2cp_offset=0x%08X\n", + mif_info("cp2cp_addr=0x%08lX cp2cp_size=0x%08X cp2cp_offset=0x%08X\n", cp2cp_base, cp2cp_size, cp2cp_offset); #if defined(CONFIG_CP_SECURE_BOOT) diff --git a/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c b/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c index c7742a2c20a1..95b10d948f7d 100644 --- a/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c +++ b/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c @@ -1220,7 +1220,7 @@ static int s5100_pm_notifier(struct notifier_block *notifier, break; default: - mif_info("pm_event %d\n", pm_event); + mif_info("pm_event %lu\n", pm_event); break; } diff --git a/drivers/misc/modem_v1_dual/s5100_pcie.c b/drivers/misc/modem_v1_dual/s5100_pcie.c index fb8aa5a14884..bdcfa8af7f72 100644 --- a/drivers/misc/modem_v1_dual/s5100_pcie.c +++ b/drivers/misc/modem_v1_dual/s5100_pcie.c @@ -116,7 +116,7 @@ inline int s5100pcie_send_doorbell_int(int int_num) reg = ioread32(s5100pcie.doorbell_addr); /* debugging: */ - mif_debug("DBG: s5100pcie.doorbell_addr = 0x%x - written(int_num=0x%x) read(reg=0x%x)\n", \ + mif_debug("DBG: s5100pcie.doorbell_addr = 0x%p - written(int_num=0x%x) read(reg=0x%x)\n", \ s5100pcie.doorbell_addr, int_num, reg); if (reg == 0xffffffff) { @@ -400,7 +400,7 @@ static int s5100pcie_probe(struct pci_dev *pdev, s5100pcie.doorbell_addr = devm_ioremap_wc(&pdev->dev, 0x11000d20, SZ_4); - pr_info("s5100pcie.doorbell_addr = 0x%x (CONFIG_SOC_EXYNOS9820: 0x11000d20)\n", \ + pr_info("s5100pcie.doorbell_addr = 0x%p (CONFIG_SOC_EXYNOS9820: 0x11000d20)\n", \ s5100pcie.doorbell_addr); #else #error "Can't set Doorbell interrupt register!" From f25008b32be7849246e352eaf00d62bd7795d60b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:29:59 +0300 Subject: [PATCH 135/439] drivers/input/wacom: fix printk format errors Signed-off-by: Denis Efremov --- drivers/input/wacom/wacom_i2c_elec.c | 26 +++++++++++----------- drivers/input/wacom/wacom_i2c_sec.c | 32 ++++++++++++++-------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/drivers/input/wacom/wacom_i2c_elec.c b/drivers/input/wacom/wacom_i2c_elec.c index 4c29498ee991..2781959c9f37 100644 --- a/drivers/input/wacom/wacom_i2c_elec.c +++ b/drivers/input/wacom/wacom_i2c_elec.c @@ -497,7 +497,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->xx_xx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xx_xx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -510,7 +510,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->xy_xy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xy_xy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -523,7 +523,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->yx_yx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yx_yx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -536,7 +536,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->yy_yy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yy_yy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -568,7 +568,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->rxx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -581,7 +581,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->rxy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -594,7 +594,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->ryx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -607,7 +607,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->ryy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -639,7 +639,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->drxx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -652,7 +652,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->drxy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -665,7 +665,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->dryx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -678,7 +678,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->dryy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -687,4 +687,4 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(buff, 0x00, buff_size); kfree(buff); -} \ No newline at end of file +} diff --git a/drivers/input/wacom/wacom_i2c_sec.c b/drivers/input/wacom/wacom_i2c_sec.c index 6729f07500cb..a70935be8052 100644 --- a/drivers/input/wacom/wacom_i2c_sec.c +++ b/drivers/input/wacom/wacom_i2c_sec.c @@ -1673,7 +1673,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xx_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1687,7 +1687,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xy_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1701,7 +1701,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yx_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1715,7 +1715,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yy_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1729,7 +1729,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xx_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1743,7 +1743,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xy_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1757,7 +1757,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yx_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1771,7 +1771,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yy_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1785,7 +1785,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxx_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1799,7 +1799,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxy_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1813,7 +1813,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryx_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1827,7 +1827,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryy_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1841,7 +1841,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxx_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1855,7 +1855,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxy_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1869,7 +1869,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryx_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1883,7 +1883,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryy_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); From 39e643af1ddd5bed7704959473f289ec1ad474df Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:35:24 +0300 Subject: [PATCH 136/439] drivers/sensorhub/brcm/sx9360: fix printk format errors Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/sx9360.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/sx9360.c b/drivers/sensorhub/brcm/sx9360.c index 0889c58d3110..835f3cdac3a6 100644 --- a/drivers/sensorhub/brcm/sx9360.c +++ b/drivers/sensorhub/brcm/sx9360.c @@ -973,7 +973,7 @@ static ssize_t sx9360_normal_threshold_show(struct device *dev, break; } - return snprintf(buf, PAGE_SIZE, "%lu,%lu\n", + return snprintf(buf, PAGE_SIZE, "%u,%u\n", (u32)threshold + (u32)hyst, (u32)threshold - (u32)hyst); } From 898187ea163665a479189891394bf2b52859b63e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 16:04:38 +0300 Subject: [PATCH 137/439] drivers/video/fbdev/exynos/dpu20/bts: fix printk format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/bts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/bts.c b/drivers/video/fbdev/exynos/dpu20/bts.c index ed1c6d319762..dc91a8dc98cd 100644 --- a/drivers/video/fbdev/exynos/dpu20/bts.c +++ b/drivers/video/fbdev/exynos/dpu20/bts.c @@ -365,7 +365,7 @@ u64 dpu_bts_calc_aclk_disp(struct decon_device *decon, if ((aclk_disp > TSP_INTER_MIN) && (aclk_disp < TSP_INTER_MAX)) { - decon_dbg("aclk : %d -> %d\n", aclk_disp, ACLK_AVOID_INTER); + decon_dbg("aclk : %lld -> %d\n", aclk_disp, ACLK_AVOID_INTER); aclk_disp = ACLK_AVOID_INTER; } } From 262f2c5d4d08a5047565f903a6b40f869c3fdf78 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Dec 2019 01:03:10 +0300 Subject: [PATCH 138/439] drivers/soc/samsung/exynos-seclog: fix printk format errors Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos-seclog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-seclog.c b/drivers/soc/samsung/exynos-seclog.c index 32b563fa85c2..157d86df4609 100644 --- a/drivers/soc/samsung/exynos-seclog.c +++ b/drivers/soc/samsung/exynos-seclog.c @@ -354,7 +354,7 @@ static int exynos_seclog_probe(struct platform_device *pdev) } dev_info(&pdev->dev, - "Message buffer address[PA : %#lx, VA : %#lx], Message buffer size[%#lx]\n", + "Message buffer address[PA : %#lx, VA : %p], Message buffer size[%#lx]\n", ldata.phys_addr, ldata.virt_addr, ldata.size); dev_info(&pdev->dev, "Exynos Secure Log driver probe done!\n"); From 81f07270d33d9d5e65b3ef4a1f9ebb038bd1d94b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 16:09:30 +0300 Subject: [PATCH 139/439] sound/soc/samsung/abox/abox_mmapfd: fix printk format errors Signed-off-by: Denis Efremov --- sound/soc/samsung/abox/abox_mmapfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox_mmapfd.c b/sound/soc/samsung/abox/abox_mmapfd.c index 2b048b7d39a2..1cd5cb9c9de4 100644 --- a/sound/soc/samsung/abox/abox_mmapfd.c +++ b/sound/soc/samsung/abox/abox_mmapfd.c @@ -120,7 +120,7 @@ int abox_ion_alloc(struct abox_platform_data *data, buf->kva); if (ret < 0) { - dev_err(dev, "Failed to iommu_map(%#lx): %d\n", + dev_err(dev, "Failed to iommu_map(%#llx): %d\n", buf->iova, ret); goto error_iommu_map_sg; From 6b1c6c48f944406ee66916693a1cb1ec6024edaf Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 16:10:44 +0300 Subject: [PATCH 140/439] sound/soc/samsung/abox/abox_rdma: fix printk format errors Signed-off-by: Denis Efremov --- sound/soc/samsung/abox/abox_rdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox_rdma.c b/sound/soc/samsung/abox/abox_rdma.c index f02f667fdd9f..2e2af4591674 100644 --- a/sound/soc/samsung/abox/abox_rdma.c +++ b/sound/soc/samsung/abox/abox_rdma.c @@ -1425,7 +1425,7 @@ static int abox_rdma_hw_params(struct snd_pcm_substream *substream, dev_info(dev, "dma buffer changed\n"); } } else if (data->buf_type == BUFFER_TYPE_ION) { - dev_info(dev, "ion_buffer %s bytes(%d) size(%d)\n", + dev_info(dev, "ion_buffer %s bytes(%zu) size(%zu)\n", __func__, buffer_bytes, data->ion_buf.size); } else { From e62be6d3644e1ccf7d5ed0492b2a2f6774d94159 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 01:54:04 +0300 Subject: [PATCH 141/439] drivers/video/fbdev/exynos/dpu20/displayport_drv: fix printk format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/displayport_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c index dc3f6efb739c..bf924a06d258 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c @@ -142,7 +142,7 @@ static u64 displayport_find_edid_max_pixelclock(void) supported_videos[i].dv_timings.bt.pixelclock > max_pclk) max_pclk = supported_videos[i].dv_timings.bt.pixelclock; } - displayport_info("find max pclk : %ld\n", max_pclk); + displayport_info("find max pclk : %lld\n", max_pclk); return max_pclk; } @@ -167,7 +167,7 @@ static int displayport_check_edid_max_clock(struct displayport_device *displaypo if (displayport->rx_edid_data.max_support_clk != 0) { if (calc_pixel_clock > displayport->rx_edid_data.max_support_clk * MHZ) { displayport_info("RX support Max TMDS Clock = %llu, but pixel clock = %llu\n", - displayport->rx_edid_data.max_support_clk * MHZ, calc_pixel_clock); + (u64) displayport->rx_edid_data.max_support_clk * MHZ, calc_pixel_clock); ret_val = false; } } else From 1b6d2b976096dc1346413c7b0e22219747956447 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 01:59:13 +0300 Subject: [PATCH 142/439] drivers/video/fbdev/exynos/dpu20/decon_core: fix printk format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/decon_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 9d5d15e5c204..66dfd7ab1118 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -3513,7 +3513,7 @@ static int decon_ioctl(struct fb_info *info, unsigned int cmd, v4l2_subdev_call(decon->dpp_sd[i], core, ioctl, DPP_GET_RESTRICTION, &disp_res.dpp_ch[i]); - decon_info("DECON:INFO:%s:DPP_RESTRICTIONS:0x%x\n", + decon_info("DECON:INFO:%s:DPP_RESTRICTIONS:0x%lx\n", __func__, disp_res.dpp_ch[i].attr); } From 0a71bc602873751027e9d923cee3cec281e7a3ae Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 09:12:05 +0300 Subject: [PATCH 143/439] drivers/video/fbdev/exynos/dpu20/event_log: fix printk format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/event_log.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/event_log.c b/drivers/video/fbdev/exynos/dpu20/event_log.c index ffe5cb80d14e..029574147004 100644 --- a/drivers/video/fbdev/exynos/dpu20/event_log.c +++ b/drivers/video/fbdev/exynos/dpu20/event_log.c @@ -1987,7 +1987,7 @@ static int __init decon_event_log_setup(char *str) #else if (reserve_bootmem(base - 8, size + 8, BOOTMEM_EXCLUSIVE)) { #endif - pr_err("%s: failed reserving size %d at base 0x%lx\n", + pr_err("%s: failed reserving size %zu at base 0x%lx\n", __func__, size, base); goto setup_exit; } @@ -1997,7 +1997,7 @@ static int __init decon_event_log_setup(char *str) rdx_mem_size = size; pr_info("%s: *disp_rdx_log_ptr:%x\n", __func__, *rdx_mem_ptr); - pr_info("%s: disp_rdx_log_buf:%p disp_rdx_log_size:0x%llx\n", + pr_info("%s: disp_rdx_log_buf:%p disp_rdx_log_size:%zu\n", __func__, rdx_mem_buf, rdx_mem_size); return 1; @@ -2024,14 +2024,14 @@ int decon_create_debugfs(struct decon_device *decon) if (decon->id == 0) { decon->d.event_log_header = rdx_mem_alloc(sizeof(struct dpu_log_header)); if (IS_ERR_OR_NULL(decon->d.event_log_header)) { - decon_warn("failed to alloc event log header buf[%d]. retry\n", + decon_warn("failed to alloc event log header buf[%zu]. retry\n", sizeof(struct dpu_log_header)); continue; } real_size = sizeof(struct dpu_log_header) + sizeof(struct dpu_log) * event_cnt; - pr_info("%s alloc total size %llx\n", __func__, real_size); + pr_info("%s alloc total size %zu\n", __func__, real_size); if (real_size >= rdx_mem_size) { - decon_warn("failed to alloc because over size[%d]. retry\n", + decon_warn("failed to alloc because over size[%zu]. retry\n", real_size); continue; } From 06880f5fed1ac521f1f7cbcc5faf81f14d332c4d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 16:00:41 +0300 Subject: [PATCH 144/439] drivers/vision/score/hardware/v3/score_scq: fix printk format errors Signed-off-by: Denis Efremov --- drivers/vision/score/hardware/v3/score_packet.h | 2 +- drivers/vision/score/hardware/v3/score_scq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/vision/score/hardware/v3/score_packet.h b/drivers/vision/score/hardware/v3/score_packet.h index fa91a8a1d383..166865f5b65b 100644 --- a/drivers/vision/score/hardware/v3/score_packet.h +++ b/drivers/vision/score/hardware/v3/score_packet.h @@ -18,7 +18,7 @@ #define MIN_PACKET_SIZE (sizeof(struct score_host_packet) + \ sizeof(struct score_host_packet_info)) -#define MAX_PACKET_SIZE (2048) +#define MAX_PACKET_SIZE (2048UL) enum score_host_packet_version { HOST_PKT_V1 = 0x1, diff --git a/drivers/vision/score/hardware/v3/score_scq.c b/drivers/vision/score/hardware/v3/score_scq.c index 118c62e5ff41..d22bd41bc49e 100644 --- a/drivers/vision/score/hardware/v3/score_scq.c +++ b/drivers/vision/score/hardware/v3/score_scq.c @@ -472,7 +472,7 @@ static int __score_scq_translate_packet(struct score_frame *frame) if (packet_size < MIN_PACKET_SIZE || packet_size > MAX_PACKET_SIZE) { ret = -EINVAL; - score_err("packet size is invalid (%u/MIN:%zu/MAX:%zu)\n", + score_err("packet size is invalid (%u/MIN:%lu/MAX:%lu)\n", packet_size, MIN_PACKET_SIZE, MAX_PACKET_SIZE); goto p_err; } From 7e39297c4fb06e5b1639dcc089be5c32e60ebaf8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Nov 2022 20:55:36 +0400 Subject: [PATCH 145/439] drivers/video/fbdev/exynos/dpu20/displayport.h: fix missing brackets Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/displayport.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/displayport.h b/drivers/video/fbdev/exynos/dpu20/displayport.h index 3cd59c783af4..33bedbefed2e 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport.h +++ b/drivers/video/fbdev/exynos/dpu20/displayport.h @@ -74,9 +74,10 @@ extern int forced_resolution; #define displayport_info(fmt, ...) \ do { \ - if (displayport_log_level >= 6) \ + if (displayport_log_level >= 6) { \ pr_info("Displayport: " pr_fmt(fmt), ##__VA_ARGS__); \ dp_logger_print(fmt, ##__VA_ARGS__); \ + } \ } while (0) #define displayport_dbg(fmt, ...) \ From a3d230e41ded79600851fd1839a653060806167d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:57:57 +0300 Subject: [PATCH 146/439] drivers/battery_v2/sec_battery: fix power_supply_propval init Signed-off-by: Denis Efremov --- drivers/battery_v2/sec_battery.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/battery_v2/sec_battery.c b/drivers/battery_v2/sec_battery.c index 8f4b15fa3422..5eb41950e4c8 100644 --- a/drivers/battery_v2/sec_battery.c +++ b/drivers/battery_v2/sec_battery.c @@ -4203,9 +4203,11 @@ static void sec_bat_wireless_minduty_cntl(struct sec_battery_info *battery, unsi static void sec_bat_wireless_uno_cntl(struct sec_battery_info *battery, bool en) { - union power_supply_propval value = {0, }; + union power_supply_propval value = { + .intval = en + }; - battery->uno_en = value.intval = en; + battery->uno_en = en; pr_info("@Tx_Mode %s : Uno control %d\n", __func__, battery->uno_en); if (value.intval) { From 51afd1b8001032db0b66d6a4d1f112990204e604 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 29 Nov 2019 20:28:00 +0300 Subject: [PATCH 147/439] drivers/soc/samsung/exynos-hdcp/exynos-hdcp2: move global vars to .c file Signed-off-by: Denis Efremov --- .../soc/samsung/exynos-hdcp/exynos-hdcp2.c | 26 +++++++++++++++++++ .../soc/samsung/exynos-hdcp/exynos-hdcp2.h | 26 ++----------------- 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c index 738073446146..45958a0088d5 100644 --- a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c +++ b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c @@ -55,6 +55,32 @@ uint32_t func_test_mode = 1; uint32_t func_test_mode; #endif +char *hdcp_session_st_str[] = { + "ST_INIT", + "ST_LINK_SETUP", + "ST_END", + NULL +}; + +char *hdcp_link_st_str[] = { + "ST_INIT", + "ST_H0_NO_RX_ATTACHED", + "ST_H1_TX_LOW_VALUE_CONTENT", + "ST_A0_DETERMINE_RX_HDCP_CAP", + "ST_A1_EXCHANGE_MASTER_KEY", + "ST_A2_LOCALITY_CHECK", + "ST_A3_EXCHANGE_SESSION_KEY", + "ST_A4_TEST_REPEATER", + "ST_A5_AUTHENTICATED", + "ST_A6_WAIT_RECEIVER_ID_LIST", + "ST_A7_VERIFY_RECEIVER_ID_LIST", + "ST_A8_SEND_RECEIVER_ID_LIST_ACK", + "ST_A9_CONTENT_STREAM_MGT", + "ST_END", + NULL +}; + + static long hdcp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int rval; diff --git a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h index fe497b8c2913..9c9aab5985d4 100644 --- a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h +++ b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h @@ -41,30 +41,8 @@ #define TEMP_ERROR -ENOTTY -static char *hdcp_session_st_str[] = { - "ST_INIT", - "ST_LINK_SETUP", - "ST_END", - NULL -}; - -static char *hdcp_link_st_str[] = { - "ST_INIT", - "ST_H0_NO_RX_ATTACHED", - "ST_H1_TX_LOW_VALUE_CONTENT", - "ST_A0_DETERMINE_RX_HDCP_CAP", - "ST_A1_EXCHANGE_MASTER_KEY", - "ST_A2_LOCALITY_CHECK", - "ST_A3_EXCHANGE_SESSION_KEY", - "ST_A4_TEST_REPEATER", - "ST_A5_AUTHENTICATED", - "ST_A6_WAIT_RECEIVER_ID_LIST", - "ST_A7_VERIFY_RECEIVER_ID_LIST", - "ST_A8_SEND_RECEIVER_ID_LIST_ACK", - "ST_A9_CONTENT_STREAM_MGT", - "ST_END", - NULL -}; +extern char *hdcp_session_st_str[]; +extern char *hdcp_link_st_str[]; #define UPDATE_SESSION_STATE(sess, st) do { \ printk("[HDCP2]HDCP Session(%d): %s -> %s\n", sess->id, hdcp_session_st_str[sess->state], hdcp_session_st_str[st]); \ From 5c2d873a5f605c4790610b7f37e0f341c17c05f9 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 01:10:16 +0300 Subject: [PATCH 148/439] drivers/usb/core/config: fix print format errors Signed-off-by: Denis Efremov --- drivers/usb/core/config.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 61086eb4eb43..caaa5d0ae022 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -225,7 +225,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if (d->bEndpointAddress != to_usb_device(ddev)->hwinfo.fb_in_ep) { to_usb_device(ddev)->hwinfo.in_ep = d->bEndpointAddress; - dev_info(ddev, " This is IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, " This is IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } else dev_info(ddev, "IN ISO endpoint is same with FB #0%x\n", @@ -233,19 +233,19 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if ((d->bLength > 7) && (d->bSynchAddress != 0x0)) { to_usb_device(ddev)->hwinfo.fb_out_ep = d->bSynchAddress; - dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } } else { to_usb_device(ddev)->hwinfo.out_ep = d->bEndpointAddress; - dev_info(ddev, " This is OUT ISO endpoint #0%x 0x%p\n", + dev_info(ddev, " This is OUT ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); if ((d->bLength > 7) && (d->bSynchAddress != 0x0)) { to_usb_device(ddev)->hwinfo.fb_in_ep = d->bSynchAddress; - dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } } @@ -254,12 +254,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if (d->bEndpointAddress & USB_ENDPOINT_DIR_MASK) { to_usb_device(ddev)->hwinfo.fb_in_ep = d->bEndpointAddress; - dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } else { to_usb_device(ddev)->hwinfo.fb_out_ep = d->bEndpointAddress; - dev_info(ddev, "Feedback OUT ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback OUT ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } } From 841784fed33fcf6bf93e114fc0de912c2a82cc1f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 01:40:53 +0300 Subject: [PATCH 149/439] drivers/video/fbdev/exynos/dpu20/dpp_drv: fix printf format errors Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/dpp_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c index d0e3ff92a1cb..d26d2f94d5bb 100644 --- a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c @@ -1185,7 +1185,7 @@ static int dpp_probe(struct platform_device *pdev) if (IS_SUPPORT_WCG(attr)) dpp->attr |= (1 << DPP_ATTR_WCG); - dpp_info("DPP:INFO:%s:%x attr : %x", __func__, dpp->id, dpp->attr); + dpp_info("DPP:INFO:%s:%x attr : %lx", __func__, dpp->id, dpp->attr); #if 0 print_dpp_restrict(dpp->attr); #endif From 110ea6a4915c190050b007f9fb69bdbe24214db5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 10 Jan 2021 18:55:57 +0300 Subject: [PATCH 150/439] b_r26p0/.../gpu_custom_interface.c: fix scnprintf() format Signed-off-by: Denis Efremov --- drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c b/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c index 29ac5d5f2a21..4ba994a38636 100644 --- a/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c +++ b/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c @@ -1916,7 +1916,7 @@ static ssize_t show_kernel_sysfs_gpu_memory(struct kobject *kobj, struct kobj_at kbase_device_put_list(kbdev_list); if (buffer_full) - ret += scnprintf(buf + ret, buf_size - ret, "error: buffer is full\n", ret); + ret += scnprintf(buf + ret, buf_size - ret, "error: %zi buffer is full\n", ret); return ret; } From d98f12692424c8ab1d8906e8d99e53b02086bb19 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 09:24:48 +0300 Subject: [PATCH 151/439] drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut: mark global vars as unused Signed-off-by: Denis Efremov --- .../video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h b/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h index 70ed0351def0..cfe2788d7675 100644 --- a/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h +++ b/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h @@ -480,12 +480,12 @@ static unsigned int TABLE_TMS_PQ[INDEX_TMAX][2] = { //*****************************************************************************************************// //***************************************** Bypass table **********************************************// //*****************************************************************************************************// -static unsigned int TABLE_TMS_BYPASS = 0x00000100; -static unsigned int * TABLE_SC_BYPASS = 0; -static unsigned int * TABLE_TM_BYPASS = 0; -static unsigned int * TABLE_GM_BYPASS = 0; -static unsigned int * TABLE_OETF_BYPASS = 0; -static unsigned int * TABLE_EOTF_BYPASS = 0; +static unsigned int TABLE_TMS_BYPASS __maybe_unused = 0x00000100; +static unsigned int * TABLE_SC_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_TM_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_GM_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_OETF_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_EOTF_BYPASS __maybe_unused = 0; //*****************************************************************************************************// //************************************** Indexed Gamut table ******************************************// From 26eea8b38ccc664d702563dff9bac5b6663cbc35 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 15:49:56 +0300 Subject: [PATCH 152/439] drivers/video/fbdev/exynos/panel/Makefile: disable unused-variable warning Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/exynos/panel/Makefile b/drivers/video/fbdev/exynos/panel/Makefile index f7e1f63136d8..006002a2a718 100644 --- a/drivers/video/fbdev/exynos/panel/Makefile +++ b/drivers/video/fbdev/exynos/panel/Makefile @@ -5,6 +5,8 @@ # Licensed under GPLv2 # +ccflags-y += $(call cc-disable-warning, unused-variable) + obj-$(CONFIG_EXYNOS_MIPI_DSIM) += timenval.o panel.o panel_bl.o dimming.o panel_drv.o panel_irc.o obj-$(CONFIG_EXYNOS_DECON_LCD_S6E3HF3) += s6e3hf3/s6e3hf3.o obj-$(CONFIG_EXYNOS_DECON_LCD_S6E3HF4) += s6e3hf4/s6e3hf4.o From d2f247fe831c983209b3a5367833638ac0774f10 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 01:55:28 +0300 Subject: [PATCH 153/439] drivers/video/fbdev/exynos/dpu20/decon_core: fix missing printk arguments Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/decon_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 66dfd7ab1118..42286b86b139 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -4701,7 +4701,7 @@ static int decon_probe(struct platform_device *pdev) ret = create_wcg_sysfs(decon); if (ret) - decon_err("DECON:ERR:%s:faield to create sysfs for wcg\n"); + decon_err("DECON:ERR:%s:failed to create sysfs for wcg\n", __func__); #endif dpu_init_win_update(decon); decon_init_low_persistence_mode(decon); From 12e86a498fa566573a7fcfde4de254293b3fba02 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 16:02:00 +0300 Subject: [PATCH 154/439] sound/soc/samsung/abox/abox: fix missing printk arguments Signed-off-by: Denis Efremov --- sound/soc/samsung/abox/abox.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox.c b/sound/soc/samsung/abox/abox.c index 50b43be81227..6683393df577 100644 --- a/sound/soc/samsung/abox/abox.c +++ b/sound/soc/samsung/abox/abox.c @@ -987,7 +987,7 @@ void abox_request_dram_on(struct device *dev, unsigned int id, bool on) ret = regmap_write(regmap, ABOX_SYSPOWER_CTRL, val); if (ret < 0) { - dev_err(dev, "syspower write failed\n", ret); + dev_err(dev, "syspower write failed (%d)\n", ret); return; } From 9d73c53cd39b63b560001e1d3dbfd82babf9ab3a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 22 Nov 2019 17:18:46 +0300 Subject: [PATCH 155/439] init/uh_fault_handler: fix missing printk arguments Signed-off-by: Denis Efremov --- init/uh_fault_handler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/uh_fault_handler.c b/init/uh_fault_handler.c index 4c5fee09fa4d..76d6497626ed 100644 --- a/init/uh_fault_handler.c +++ b/init/uh_fault_handler.c @@ -110,7 +110,7 @@ void uh_fault_handler(void) exception_class = esr_ec_unknown_reason; pr_alert("=============uH fault handler logging=============\n"); pr_alert("%s",exception_class_string[exception_class]); - pr_alert("[System registers]\n", cpu); + pr_alert("[System registers CPU: %u]\n", cpu); pr_alert("ESR_EL2: %x\tHCR_EL2: %llx\tHPFAR_EL2: %llx\n", uh_handler_data->esr_el2.bits, uh_handler_data->hcr_el2, uh_handler_data->hpfar_el2); From 07dfa1414e43e6940333f68a3761faf38da3e082 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 22 Nov 2019 17:19:30 +0300 Subject: [PATCH 156/439] include/trace/events/ems: fix missing printk arguments Signed-off-by: Denis Efremov --- include/trace/events/ems.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h index ca0cf7320d61..0239b4f00ed5 100644 --- a/include/trace/events/ems.h +++ b/include/trace/events/ems.h @@ -689,7 +689,7 @@ TRACE_EVENT(ems_select_service_cpu, __entry->backup_cpu = backup_cpu; ), - TP_printk("comm=%s pid=%d best_cpu=%d backup_cpu", + TP_printk("comm=%s pid=%d best_cpu=%d backup_cpu=%d", __entry->comm, __entry->pid, __entry->best_cpu, __entry->backup_cpu) ); From ff4d794c4444456003136791f6812152956ff133 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 07:56:12 +0300 Subject: [PATCH 157/439] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu: fix missing printk arguments Signed-off-by: Denis Efremov --- .../exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c index 877a4d7437f1..53a8b9fd125b 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c @@ -666,7 +666,7 @@ int fimc_is_mcu_erase(struct v4l2_subdev *subdev, u32 address, size_t len) xmit_bytes = 2 * erase.count; *((uint8_t *)&pbuf[ix]) = fimc_is_mcu_checksum(xmit, xmit_bytes); xmit_bytes++; - info("mcu xmit_bytess = %d, erase.count = %d"); + info("mcu xmit_bytes = %d, erase.count = %d", xmit_bytes, erase.count); /* transmit parameter */ ret = i2c_master_send(client, xmit, xmit_bytes); @@ -1596,7 +1596,7 @@ int fimc_is_mcu_set_aperture(struct v4l2_subdev *subdev, int onoff) return true; exit: - info("% Do not set aperture. onoff = %d", __FUNCTION__, onoff); + info("%s Do not set aperture. onoff = %d", __FUNCTION__, onoff); return false; } From 25fcae1fb1de0e41804fb22c73296e6bdf2a9373 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 16:16:14 +0300 Subject: [PATCH 158/439] sound/usb/card: fix missing printk arguments Signed-off-by: Denis Efremov --- sound/usb/card.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/card.c b/sound/usb/card.c index 5f4cf0688d37..afb4ab62dada 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -209,7 +209,7 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); } - dev_info(&dev->dev, "usb_host : %s %u:%d \n", __func__); + dev_info(&dev->dev, "usb_host : %s %u:%d \n", __func__, ctrlif, interface); return 0; } From cc13544202aff0ec8617fb7e4cb33712d1fd0c26 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 11 Sep 2020 23:07:59 +0300 Subject: [PATCH 159/439] drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2: fix missing printk arguments Signed-off-by: Denis Efremov --- .../platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c index 4d8139ef861f..37e337e5767a 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c @@ -3333,7 +3333,7 @@ int fimc_is_sensor_front_start(struct fimc_is_device_sensor *device, mutex_lock(&camif_path_lock); ret = fimc_is_hw_camif_fix_up(device); if (ret) { - merr("failed to fix up CAM I/F", device, ret); + merr("failed to fix up CAM I/F(%d)", device, ret); ret = -EINVAL; mutex_unlock(&camif_path_lock); goto p_err; @@ -3341,7 +3341,7 @@ int fimc_is_sensor_front_start(struct fimc_is_device_sensor *device, ret = fimc_is_hw_camif_pdp_in_enable(device, true); if (ret) { - merr("failed to enable PDP IN", device, ret); + merr("failed to enable PDP IN(%d)", device, ret); ret = -EINVAL; mutex_unlock(&camif_path_lock); goto p_err; @@ -3451,7 +3451,7 @@ int fimc_is_sensor_front_stop(struct fimc_is_device_sensor *device) if (IS_ENABLED(USE_CAMIF_FIX_UP)) { ret = fimc_is_hw_camif_pdp_in_enable(device, false); if (ret) - merr("failed to enable PDP IN", device, ret); + merr("failed to enable PDP IN(%d)", device, ret); } reset_the_others: From 06d86d6a028de8a6ed8b698cb83f3ebb0221b873 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 11 Sep 2020 23:09:51 +0300 Subject: [PATCH 160/439] sound/usb/exynos_usb_audio: fix missing printk arguments Signed-off-by: Denis Efremov --- sound/usb/exynos_usb_audio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/exynos_usb_audio.c b/sound/usb/exynos_usb_audio.c index ae95e2bc7d64..ff77fc6d0a1a 100644 --- a/sound/usb/exynos_usb_audio.c +++ b/sound/usb/exynos_usb_audio.c @@ -432,7 +432,7 @@ int exynos_usb_audio_hcd(struct usb_device *udev) */ if (ret == -EADDRINUSE) { cancel_work_sync(&usb_audio->usb_work); - pr_err("iommu unmapping not done. unmap here\n", ret); + pr_err("iommu unmapping not done. unmap here %d\n", ret); exynos_usb_audio_unmap_all(); ret = abox_iommu_map(dev, USB_AUDIO_XHCI_BASE, USB_AUDIO_XHCI_BASE, PAGE_SIZE * 16, 0); From bf0c10c3d678431cd0afd38e8a9c17980c51b649 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 12:02:09 +0300 Subject: [PATCH 161/439] drivers/vision/npu/npu-memory: fix printk format arguments Signed-off-by: Denis Efremov --- drivers/vision/npu/npu-memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/vision/npu/npu-memory.c b/drivers/vision/npu/npu-memory.c index da587f67faf7..02f3cdb95180 100644 --- a/drivers/vision/npu/npu-memory.c +++ b/drivers/vision/npu/npu-memory.c @@ -99,7 +99,7 @@ int npu_memory_map(struct npu_memory *memory, struct npu_memory_buffer *buffer) buffer->dma_buf = dma_buf_get(buffer->fd); if (IS_ERR_OR_NULL(buffer->dma_buf)) { - npu_err("dma_buf_get is fail(0x%08x)\n", buffer->dma_buf); + npu_err("dma_buf_get is fail(0x%p)\n", buffer->dma_buf); ret = -EINVAL; goto p_err; } @@ -231,7 +231,7 @@ int npu_memory_alloc(struct npu_memory *memory, struct npu_memory_buffer *buffer dma_buf = ion_alloc_dmabuf(heapname, size, flag); if (IS_ERR_OR_NULL(dma_buf)) { - npu_err("ion_alloc_dmabuf is fail(0x%08x)\n", dma_buf); + npu_err("ion_alloc_dmabuf is fail(0x%p)\n", dma_buf); ret = -EINVAL; goto p_err; } From b9d971fbd2b8ee28678284a4cbf88bc1e2b409b9 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:45:25 +0300 Subject: [PATCH 162/439] drivers/battery_v2/mfc_charger: fix printk arguments Signed-off-by: Denis Efremov --- drivers/battery_v2/mfc_charger.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/battery_v2/mfc_charger.c b/drivers/battery_v2/mfc_charger.c index 372e844bc141..3664a6399c1a 100644 --- a/drivers/battery_v2/mfc_charger.c +++ b/drivers/battery_v2/mfc_charger.c @@ -2468,7 +2468,7 @@ static void mfc_wpc_rx_power_work(struct work_struct *work) union power_supply_propval value; - pr_info("%s: rx power = %d (0x%x), This W/A is only for Factory\n", __func__, charger->max_power_by_txid); + pr_info("%s: rx power = %d, This W/A is only for Factory\n", __func__, charger->max_power_by_txid); value.intval = charger->max_power_by_txid; psy_do_property("wireless", set, POWER_SUPPLY_PROP_WIRELESS_RX_POWER, value); @@ -3902,7 +3902,7 @@ static int mfc_chg_set_property(struct power_supply *psy, case POWER_SUPPLY_EXT_PROP_PAD_VOLT_CTRL: if(charger->pdata->wpc_vout_ctrl_lcd_on) { if (delayed_work_pending(&charger->wpc_vout_mode_work)) { - pr_info("%s : Already vout change. skip pad control\n"); + pr_info("%s : Already vout change. skip pad control\n", __func__); return 0; } if (val->intval && charger->is_afc_tx && From 7543165ac0b24420fff2a51cde5f9e997581f87e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 11:50:31 +0300 Subject: [PATCH 163/439] drivers/vision/npu/npu-log: fix printk arguments Signed-off-by: Denis Efremov --- drivers/vision/npu/npu-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vision/npu/npu-log.c b/drivers/vision/npu/npu-log.c index ac6bddb875ed..9dc54b384cc6 100644 --- a/drivers/vision/npu/npu-log.c +++ b/drivers/vision/npu/npu-log.c @@ -655,7 +655,7 @@ static int npu_store_log_dump(const size_t dump_size) total = 0; ret = spin_lock_safe_isr(&npu_log_lock); if (ret) { - pr_err("NPU log dump is not available - in interrupt context\n", total); + pr_err("NPU log dump is not available - in interrupt context\n"); goto err_exit; } From 95329706c98cdc80bd9ee3bfe128e943120afb51 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:20:29 +0300 Subject: [PATCH 164/439] drivers/redriver/ptn36502: fix printk arguments Signed-off-by: Denis Efremov --- drivers/redriver/ptn36502.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/redriver/ptn36502.c b/drivers/redriver/ptn36502.c index 1165c22233aa..29dc495e144e 100644 --- a/drivers/redriver/ptn36502.c +++ b/drivers/redriver/ptn36502.c @@ -135,7 +135,7 @@ int ptn36502_config(int config, int is_DFP) break; case CHECK_EXIST: - pr_err("%s: dummy\n"); + pr_err("%s: dummy\n", __func__); break; default: From afae7135e0b9e5e283c605e0c0c3a5773b0f57b0 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:36:18 +0300 Subject: [PATCH 165/439] drivers/video/fbdev/exynos/dpu20/decon_dsi: fix printk arguments Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/decon_dsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_dsi.c b/drivers/video/fbdev/exynos/dpu20/decon_dsi.c index e0cfd52ddd3c..b35c34f1e866 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_dsi.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_dsi.c @@ -1827,7 +1827,7 @@ static int dpu_set_pre_df_dsim(struct decon_device *decon) return -EINVAL; } if (df_set->hs == 0) { - decon_err("[DYN_FREQ]:ERR:%s:df index : %d hs is 0 : %d\n", + decon_err("[DYN_FREQ]:ERR:%s:df index : %d hs is 0\n", __func__, status->target_df); return -EINVAL; } From 4676fcb2163c307aa9c80670de2977c052babdb7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:36:37 +0300 Subject: [PATCH 166/439] drivers/video/fbdev/exynos/panel/panel_spi: fix printk arguments Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/panel_spi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/panel_spi.c b/drivers/video/fbdev/exynos/panel/panel_spi.c index 2b6c0c860368..bb5ed4fa86b4 100644 --- a/drivers/video/fbdev/exynos/panel/panel_spi.c +++ b/drivers/video/fbdev/exynos/panel/panel_spi.c @@ -199,7 +199,7 @@ static int panel_spi_read_id(struct panel_spi_dev *spi_dev, u32 *id) return -EIO; *id = (rbuf[0] << 16) | (rbuf[1] << 8) | rbuf[2]; - pr_debug("%s: 0x06X\n", __func__, *id); + pr_debug("%s: 0x%06X\n", __func__, *id); return 0; } From 09c829c2d3ee0fb199f9fd0a6042d9a7b916c951 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 29 Nov 2019 20:06:17 +0300 Subject: [PATCH 167/439] drivers/soc/samsung/acpm/acpm: fix uninit pointer Signed-off-by: Denis Efremov --- drivers/soc/samsung/acpm/acpm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/acpm/acpm.c b/drivers/soc/samsung/acpm/acpm.c index 2fbdf183f89a..54d769ad7c9e 100644 --- a/drivers/soc/samsung/acpm/acpm.c +++ b/drivers/soc/samsung/acpm/acpm.c @@ -99,7 +99,7 @@ static int plugins_init(void) unsigned int plugin_id; char name[50]; const char *fw_name = NULL; - void __iomem *fw_base_addr; + void __iomem *fw_base_addr = NULL; struct device_node *node, *child; const __be32 *prop; unsigned int offset; From 3930d4395cf3dab1239baca915bbf32c67a0124c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Dec 2019 23:44:20 +0300 Subject: [PATCH 168/439] drivers/staging/android/freecess_pkg: fix uninit var Signed-off-by: Denis Efremov --- drivers/staging/android/freecess_pkg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/android/freecess_pkg.c b/drivers/staging/android/freecess_pkg.c index 91046602d9ac..14738a5fb6d8 100644 --- a/drivers/staging/android/freecess_pkg.c +++ b/drivers/staging/android/freecess_pkg.c @@ -240,7 +240,7 @@ static struct nf_hook_ops freecess_nf_ops[] = { static int __init kfreecess_pkg_init(void) { - int ret; + int ret = 0; int i; struct net *net; From 6b7810e6b7870c83aba9540847d867bf93b9a4dc Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 02:00:26 +0300 Subject: [PATCH 169/439] drivers/video/fbdev/exynos/dpu20/decon_core: fix uninit pointer Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/decon_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 42286b86b139..9e69a218219e 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -2276,7 +2276,7 @@ static void decon_release_old_bufs(struct decon_device *decon, static int decon_set_hdr_info(struct decon_device *decon, struct decon_reg_data *regs, int win_num, bool on) { - struct exynos_video_meta *video_meta; + struct exynos_video_meta *video_meta = NULL; #if defined(CONFIG_EXYNOS_DISPLAYPORT) int ret = 0; #endif From 9d8028e26bf5c4a69c155b4a2b7dfd5c0b6cdd8f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 4 Dec 2019 09:09:56 +0300 Subject: [PATCH 170/439] drivers/video/fbdev/exynos/dpu20/event_log: fix uninit var Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/event_log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/event_log.c b/drivers/video/fbdev/exynos/dpu20/event_log.c index 029574147004..0a8ec2638253 100644 --- a/drivers/video/fbdev/exynos/dpu20/event_log.c +++ b/drivers/video/fbdev/exynos/dpu20/event_log.c @@ -841,7 +841,7 @@ void DPU_EVENT_SHOW(struct seq_file *s, struct decon_device *decon) int latest = idx; struct timeval tv; ktime_t prev_ktime; - struct dsim_device *dsim; + struct dsim_device *dsim = NULL; if (IS_ERR_OR_NULL(decon->d.event_log)) return; From 10b3573f890e178020b309df0030c3a1a976bb14 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Dec 2019 01:06:47 +0300 Subject: [PATCH 171/439] drivers/soc/samsung/exynos-bcm_dbg: fix uninit var Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos-bcm_dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-bcm_dbg.c b/drivers/soc/samsung/exynos-bcm_dbg.c index 0ca36d233d13..64df919a454b 100644 --- a/drivers/soc/samsung/exynos-bcm_dbg.c +++ b/drivers/soc/samsung/exynos-bcm_dbg.c @@ -683,7 +683,7 @@ static int exynos_bcm_dbg_run_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_inf struct exynos_bcm_dbg_data *data) { unsigned int cmd[4] = {0, 0, 0, 0}; - unsigned int run, low_ktime, high_ktime; + unsigned int run = 0, low_ktime, high_ktime; int ret = 0; u64 ktime; unsigned long flags; From 9441486a8dca82a2af67b78cdf8db1d32fb20b08 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Dec 2019 01:08:15 +0300 Subject: [PATCH 172/439] drivers/soc/samsung/exynos-hiu: fix uninit var Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos-hiu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-hiu.c b/drivers/soc/samsung/exynos-hiu.c index 20eca9d64ce2..05770f1d5132 100644 --- a/drivers/soc/samsung/exynos-hiu.c +++ b/drivers/soc/samsung/exynos-hiu.c @@ -662,7 +662,7 @@ static struct attribute_group exynos_hiu_attr_group = { static int hiu_dt_parsing(struct device_node *dn) { const char *buf; - unsigned int val; + unsigned int val = 0; int ret = 0; ret |= of_property_read_u32(dn, "operation-mode", &data->operation_mode); From bb45a7e429f558d000cacce9fe338bf690f75dc8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Dec 2019 01:08:31 +0300 Subject: [PATCH 173/439] drivers/spi/spi-s3c64xx: fix uninit var Signed-off-by: Denis Efremov --- drivers/spi/spi-s3c64xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 44be84ecc467..16ddbe978da6 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -960,7 +960,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master, list_for_each_entry(xfer, &msg->transfers, transfer_list) { unsigned long flags; - int use_dma; + int use_dma = 0; reinit_completion(&sdd->xfer_completion); From c05d02ca6ae10bc072024f98af17780ba0a47707 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 18:13:10 +0300 Subject: [PATCH 174/439] net/unix/af_unix: fix uninit var Signed-off-by: Denis Efremov --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 94d33f541b7d..9936a1334e5f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -995,7 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; char *sun_path = sunaddr->sun_path; int err; - unsigned int hash; + unsigned int hash = 0; struct unix_address *addr; struct hlist_head *list; struct path path = { }; From 396a3d2276e8f82b7e3a5f79cb029b8fa03baa44 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 25 Dec 2020 15:21:19 +0300 Subject: [PATCH 175/439] bcmdhd_100_15/wl_cfg_btcoex: fix uninit var Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c index 2d59331cc591..d9934e0a1eb8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c @@ -146,13 +146,13 @@ static bool btcoex_is_sco_active(struct net_device *dev) ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); - WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); - if (ioc_res < 0) { WL_ERR(("ioc read btc params error\n")); break; } + WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); + if ((param27 & 0x6) == 2) { /* count both sco & esco */ sco_id_cnt++; } From 64014c0712e0422701885c83c9a2ac8039ac5609 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 20:30:03 +0300 Subject: [PATCH 176/439] drivers/gpu/exynos/g2d/g2d_task: fix uninit var Signed-off-by: Denis Efremov --- drivers/gpu/exynos/g2d/g2d_task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/exynos/g2d/g2d_task.c b/drivers/gpu/exynos/g2d/g2d_task.c index 5008d9f89c50..661f38f77179 100644 --- a/drivers/gpu/exynos/g2d/g2d_task.c +++ b/drivers/gpu/exynos/g2d/g2d_task.c @@ -457,7 +457,7 @@ void g2d_destroy_tasks(struct g2d_device *g2d_dev) static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev, int id) { struct g2d_task *task; - int i, ret; + int i, ret = 0; task = kzalloc(sizeof(*task), GFP_KERNEL); if (!task) From d41c3fba7ed633461d941237fe70ded4706eee4e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 10 Jan 2021 18:20:10 +0300 Subject: [PATCH 177/439] bcmdhd_101_16/wl_cfg_btcoex.c: fix uninit var Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c index 09380fb96cfa..0798e739f914 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c @@ -148,13 +148,13 @@ static bool btcoex_is_sco_active(struct net_device *dev) ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); - WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); - if (ioc_res < 0) { WL_ERR(("ioc read btc params error\n")); break; } + WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); + if ((param27 & 0x6) == 2) { /* count both sco & esco */ sco_id_cnt++; } From 1e2716d897483e0205bb74300ca40f61433dbc21 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 10 Jan 2021 18:21:33 +0300 Subject: [PATCH 178/439] bcmdhd_101_16/dhd_debug.c: fix uninit var Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c index 267b2a13057e..d0941e8de807 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c @@ -1289,6 +1289,7 @@ dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_b { dhd_dbg_ring_status_t ring_status; uint32 rlen = 0; + int ret = 0; rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE); @@ -1299,9 +1300,9 @@ dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_b return; } - __dhd_dbg_get_ring_status(ring, &ring_status); + ret = __dhd_dbg_get_ring_status(ring, &ring_status); - if (ring_status.written_bytes != ring_status.read_bytes) { + if (ret == BCME_OK && ring_status.written_bytes != ring_status.read_bytes) { trace_buf_info->availability = NEXT_BUF_AVAIL; } } From 0da2a0be90059c180f22a1778fcc7bd9e3569fcd Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 10 Jan 2021 18:56:49 +0300 Subject: [PATCH 179/439] drivers/soc/samsung/exynos-sci.c: fix uninit var Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos-sci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/samsung/exynos-sci.c b/drivers/soc/samsung/exynos-sci.c index e5eeb5d6afd6..e86832081a61 100644 --- a/drivers/soc/samsung/exynos-sci.c +++ b/drivers/soc/samsung/exynos-sci.c @@ -696,7 +696,7 @@ static ssize_t show_llc_region_alloc(struct device *dev, struct platform_device, dev); struct exynos_sci_data *data = platform_get_drvdata(pdev); ssize_t count = 0; - unsigned int region_index; + unsigned int region_index = 0; int ret; ret = exynos_sci_llc_region_alloc(data, SCI_IPC_GET, ®ion_index, 0); @@ -745,7 +745,7 @@ static ssize_t show_llc_enable(struct device *dev, struct platform_device, dev); struct exynos_sci_data *data = platform_get_drvdata(pdev); ssize_t count = 0; - unsigned int enable; + unsigned int enable = 0; int ret; ret = exynos_sci_llc_enable(data, SCI_IPC_GET, &enable); From 5c0896d2a50a73ef9e1217113bf614b88f2432d3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:55:01 +0300 Subject: [PATCH 180/439] drivers/battery_v2/mfc_charger: fix sscanf Signed-off-by: Denis Efremov --- drivers/battery_v2/mfc_charger.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/battery_v2/mfc_charger.c b/drivers/battery_v2/mfc_charger.c index 3664a6399c1a..a7bb1835992f 100644 --- a/drivers/battery_v2/mfc_charger.c +++ b/drivers/battery_v2/mfc_charger.c @@ -5370,8 +5370,8 @@ ssize_t mfc_store_attrs(struct device *dev, struct power_supply *psy = dev_get_drvdata(dev); struct mfc_charger_data *charger = power_supply_get_drvdata(psy); const ptrdiff_t offset = attr - mfc_attrs; + unsigned int header, data_com, data_val; int x, ret; - u8 header, data_com, data_val; dev_info(charger->dev, "%s \n", __func__); @@ -5399,8 +5399,9 @@ ssize_t mfc_store_attrs(struct device *dev, break; case MFC_PACKET: if (sscanf(buf, "0x%4x 0x%4x 0x%4x\n", &header, &data_com, &data_val) == 3) { - dev_info(charger->dev, "%s 0x%x, 0x%x, 0x%x \n", __func__, header, data_com, data_val); - mfc_send_packet(charger, header, data_com, &data_val, 1); + u8 u8header = header, u8data_com = u8data_com, u8data_val = data_val; + dev_info(charger->dev, "%s 0x%x, 0x%x, 0x%x \n", __func__, u8header, u8data_com, u8data_val); + mfc_send_packet(charger, u8header, u8data_com, &u8data_val, 1); } ret = count; break; From e87c242504de1ae39fc4797f1011ff06af90d097 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Dec 2019 15:52:17 +0300 Subject: [PATCH 181/439] drivers/video/fbdev/exynos/panel/sysfs: fix sscanf input format Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/sysfs.c b/drivers/video/fbdev/exynos/panel/sysfs.c index bea486939931..9508cbedad86 100644 --- a/drivers/video/fbdev/exynos/panel/sysfs.c +++ b/drivers/video/fbdev/exynos/panel/sysfs.c @@ -363,7 +363,7 @@ static ssize_t gamma_interpolation_test_store(struct device *dev, return -EINVAL; } - ret = sscanf(buf, "%x %x %x %x %x %x", + ret = sscanf(buf, "%hhx %hhx %hhx %hhx %hhx %hhx", &write_buf[0], &write_buf[1], &write_buf[2], &write_buf[3], &write_buf[4], &write_buf[5]); if (ret != 6) { From 42c3aaff482c141573b5bdd73fd4733a2900b7b1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 23 Nov 2019 13:50:50 +0300 Subject: [PATCH 182/439] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu: fix missing '=' Signed-off-by: Denis Efremov --- .../exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c index 53a8b9fd125b..cd16da8c1728 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c @@ -3627,7 +3627,7 @@ int fimc_is_mcu_probe(struct i2c_client *client, mcu = kzalloc(sizeof(struct fimc_is_mcu) * sensor_id_len, GFP_KERNEL); if (!mcu) { err("fimc_is_mcu is NULL"); - ret -ENOMEM; + ret = -ENOMEM; goto p_err; } @@ -3641,7 +3641,7 @@ int fimc_is_mcu_probe(struct i2c_client *client, ois = kzalloc(sizeof(struct fimc_is_ois) * sensor_id_len, GFP_KERNEL); if (!ois) { err("fimc_is_ois is NULL"); - ret -ENOMEM; + ret = -ENOMEM; goto p_err; } @@ -3655,7 +3655,7 @@ int fimc_is_mcu_probe(struct i2c_client *client, ois_device = kzalloc(sizeof(struct fimc_is_device_ois), GFP_KERNEL); if (!ois_device) { err("fimc_is_device_ois is NULL"); - ret -ENOMEM; + ret = -ENOMEM; goto p_err; } From 5550484387b782de341ab4e58818a4a0651e1f3a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Dec 2019 01:05:18 +0300 Subject: [PATCH 183/439] drivers/staging/android/ion/ion_debug: fix wrong argument in printk Signed-off-by: Denis Efremov --- drivers/staging/android/ion/ion_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/android/ion/ion_debug.c b/drivers/staging/android/ion/ion_debug.c index d9ccabe00cfa..0eb25a87ba14 100644 --- a/drivers/staging/android/ion/ion_debug.c +++ b/drivers/staging/android/ion/ion_debug.c @@ -372,7 +372,7 @@ void ion_debug_heap_init(struct ion_heap *heap) path = dentry_path(heap->dev->heaps_debug_root, buf, 256); - perrfn("failed to create %s/%s", path, heap_file); + perrfn("failed to create %s/%s", path, heap->name); } } From 7f804a21f32b2a3dae3934e01bdb910e917144c3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:00:39 +0300 Subject: [PATCH 184/439] net/ncm/ncm: check copy_from_user result Signed-off-by: Denis Efremov --- net/ncm/ncm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ncm/ncm.c b/net/ncm/ncm.c index 796ee5ec610f..971551aa760f 100644 --- a/net/ncm/ncm.c +++ b/net/ncm/ncm.c @@ -855,7 +855,9 @@ static ssize_t ncm_write(struct file *file, const char __user *buf, size_t count return -EACCES; } memset(intermediate_string,'\0',sizeof(intermediate_string)); - copy_from_user(intermediate_string,buf,sizeof(intermediate_string)-1); + if (copy_from_user(intermediate_string,buf,sizeof(intermediate_string)-1)) { + return -EINVAL; + } intermediate_value = simple_strtol(intermediate_string, NULL, 10); if (intermediate_value > 0) { update_intermediate_timeout(intermediate_value); From 594d7b2171600b2f1c8b9f19c8b7cd4da1d112a9 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:02:41 +0300 Subject: [PATCH 185/439] kernel/sched/ems/core: check sysfs_create_file() return values Signed-off-by: Denis Efremov --- kernel/sched/ems/core.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index e6baed836569..c26a6ca4822f 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -305,11 +305,22 @@ struct kobject *ems_kobj; static int __init init_sysfs(void) { ems_kobj = kobject_create_and_add("ems", kernel_kobj); + if (!ems_kobj) + return -ENOMEM; - sysfs_create_file(ems_kobj, &sched_topology_attr.attr); - sysfs_create_file(ems_kobj, &eff_mode_attr.attr); + if (sysfs_create_file(ems_kobj, &sched_topology_attr.attr) < 0) + goto topology_err; + if (sysfs_create_file(ems_kobj, &eff_mode_attr.attr) < 0) + goto mode_err; return 0; + +mode_err: + sysfs_remove_file(ems_kobj, &sched_topology_attr.attr); +topology_err: + kobject_put(ems_kobj); + ems_kobj = NULL; + return -ENOMEM; } core_initcall(init_sysfs); From 2d4548dcd0d17a4de7be9bf907ecef42f4ffef2a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:20:49 +0300 Subject: [PATCH 186/439] drivers/soc/samsung/debug/exynos9820-itmon: check strtoul results Signed-off-by: Denis Efremov --- drivers/soc/samsung/debug/exynos9820-itmon.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/soc/samsung/debug/exynos9820-itmon.c b/drivers/soc/samsung/debug/exynos9820-itmon.c index 33029c5371f5..d015294ca7af 100644 --- a/drivers/soc/samsung/debug/exynos9820-itmon.c +++ b/drivers/soc/samsung/debug/exynos9820-itmon.c @@ -1747,7 +1747,8 @@ static ssize_t itmon_scandump_store(struct kobject *kobj, { unsigned long val = 0; - kstrtoul(buf, 16, &val); + if (kstrtoul(buf, 16, &val) < 0) + return -EINVAL; g_itmon->pdata->sysfs_scandump = !!val; return count; @@ -1772,7 +1773,8 @@ static ssize_t itmon_s2d_store(struct kobject *kobj, { unsigned long val = 0; - kstrtoul(buf, 16, &val); + if (kstrtoul(buf, 16, &val) < 0) + return -EINVAL; g_itmon->pdata->sysfs_s2d = !!val; return count; From 1cf8ef08fbc20ead13ebd851bdf0b4b3b24a47a5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:29:04 +0300 Subject: [PATCH 187/439] security/samsung/defex_lsm/pack_rules: fix strncpy() warning Signed-off-by: Denis Efremov --- security/samsung/defex_lsm/pack_rules.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/samsung/defex_lsm/pack_rules.c b/security/samsung/defex_lsm/pack_rules.c index ab59686b26ed..1deb8d533849 100644 --- a/security/samsung/defex_lsm/pack_rules.c +++ b/security/samsung/defex_lsm/pack_rules.c @@ -12,7 +12,7 @@ #include #include "include/defex_rules.h" -#define SAFE_STRCOPY(dst, src) do { strncpy(dst, src, sizeof(dst)); dst[sizeof(dst) - 1] = 0; } while(0) +#define SAFE_STRCOPY(dst, src) do { strncpy(dst, src, sizeof(dst) - 1); dst[sizeof(dst) - 1] = 0; } while(0) const struct feature_match_entry feature_match[] = { {"feature_safeplace_path", feature_safeplace_path}, From 6780657d80cd989713650f6d8e655bdf24696099 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:30:28 +0300 Subject: [PATCH 188/439] drivers/video/fbdev/exynos/panel: move str_stm_fied to c file Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/panel.h | 14 -------------- drivers/video/fbdev/exynos/panel/sysfs.c | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/video/fbdev/exynos/panel/panel.h b/drivers/video/fbdev/exynos/panel/panel.h index 6469dd16a338..0e90f55206f2 100644 --- a/drivers/video/fbdev/exynos/panel/panel.h +++ b/drivers/video/fbdev/exynos/panel/panel.h @@ -807,20 +807,6 @@ enum stm_field_num { STM_V_THRES, STM_FIELD_MAX }; - -static const char *str_stm_fied[STM_FIELD_MAX] = { - "stm_ctrl_en=", - "stm_max_opt=", - "stm_default_opt=", - "stm_dim_step=", - "stm_frame_period=", - "stm_min_sect=", - "stm_pixel_period=", - "stm_line_period=", - "stm_min_move=", - "stm_m_thres=", - "stm_v_thres=" -}; #endif enum { diff --git a/drivers/video/fbdev/exynos/panel/sysfs.c b/drivers/video/fbdev/exynos/panel/sysfs.c index 9508cbedad86..bdf14b146b6e 100644 --- a/drivers/video/fbdev/exynos/panel/sysfs.c +++ b/drivers/video/fbdev/exynos/panel/sysfs.c @@ -33,6 +33,23 @@ #ifdef CONFIG_DYNAMIC_FREQ #include "dynamic_freq.h" #endif + +#ifdef CONFIG_SUPPORT_ISC_TUNE_TEST +static const char *str_stm_fied[STM_FIELD_MAX] = { + "stm_ctrl_en=", + "stm_max_opt=", + "stm_default_opt=", + "stm_dim_step=", + "stm_frame_period=", + "stm_min_sect=", + "stm_pixel_period=", + "stm_line_period=", + "stm_min_move=", + "stm_m_thres=", + "stm_v_thres=" +}; +#endif + static DEFINE_MUTEX(sysfs_lock); char *mcd_rs_name[MAX_MCD_RS] = { From 3fd59b53093ad42936ff43cb5c40b1727261b69a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:34:04 +0300 Subject: [PATCH 189/439] drivers/samsung/debug: check kstrtol results Signed-off-by: Denis Efremov --- drivers/samsung/debug/sec_debug_reset_reason.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/samsung/debug/sec_debug_reset_reason.c b/drivers/samsung/debug/sec_debug_reset_reason.c index 0d00ffe2745a..6565de491dda 100644 --- a/drivers/samsung/debug/sec_debug_reset_reason.c +++ b/drivers/samsung/debug/sec_debug_reset_reason.c @@ -159,7 +159,8 @@ static void parse_pwrsrc_rs(struct outbuf *buf) unsigned long tmp; long long_pwrsrc_rs; - kstrtol(pwrsrc_rs, 16, &long_pwrsrc_rs); + if (kstrtol(pwrsrc_rs, 16, &long_pwrsrc_rs)) + return; secdbg_write_buf(buf, 0, "OFFSRC::"); tmp = long_pwrsrc_rs & 0xff0000000000; From 65dfb894b92253f034a0271daecc4001025d4862 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 10 Jan 2021 18:57:34 +0300 Subject: [PATCH 190/439] .../dpu20/displayport_drv.c: check kstrtouint() results Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/displayport_drv.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c index bf924a06d258..111bbc9a4535 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c @@ -3728,8 +3728,8 @@ static int displayport_update_hmd_list(struct displayport_device *displayport, c ret = -EPERM; goto exit; } - kstrtouint(tok, 10, &num_hmd); - if (num_hmd > MAX_NUM_HMD) { + ret = kstrtouint(tok, 10, &num_hmd); + if (ret || num_hmd > MAX_NUM_HMD) { displayport_err("invalid list num %d\n", num_hmd); num_hmd = 0; ret = -EPERM; @@ -3747,14 +3747,20 @@ static int displayport_update_hmd_list(struct displayport_device *displayport, c tok = strsep(&p, ","); if (tok == NULL || *tok == 0xa/*LF*/) break; - kstrtouint(tok, 16, &val); + if (kstrtouint(tok, 16, &val)) { + ret = -EINVAL; + break; + } displayport->hmd_list[j].ven_id = val; /* PID */ tok = strsep(&p, ","); if (tok == NULL || *tok == 0xa/*LF*/) break; - kstrtouint(tok, 16, &val); + if (kstrtouint(tok, 16, &val)) { + ret = -EINVAL; + break; + } displayport->hmd_list[j].prod_id = val; displayport_info("HMD%02d: %s, 0x%04x, 0x%04x\n", j, From b7b4b5e71a493af7b27ddf522a0dbcf9cbf31b6a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:37:13 +0300 Subject: [PATCH 191/439] drivers/fingerprint: move sensor_status from header file Signed-off-by: Denis Efremov --- drivers/fingerprint/et5xx-spi.c | 3 +++ drivers/fingerprint/fingerprint.h | 2 -- drivers/fingerprint/qbt2000_common.c | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/fingerprint/et5xx-spi.c b/drivers/fingerprint/et5xx-spi.c index 7a06dc9e7470..8ccfa1786f1b 100644 --- a/drivers/fingerprint/et5xx-spi.c +++ b/drivers/fingerprint/et5xx-spi.c @@ -48,6 +48,9 @@ static DEFINE_MUTEX(device_list_lock); int fpsensor_goto_suspend =0; #endif +static char sensor_status[SENSOR_STATUS_SIZE][10] = {"ooo", "unknown", "failed", + "viper", "raptor", "egis", "viper_wog", "namsan", "goodix", "qbt2000", "et7xx", "goodixopt"}; + static int gpio_irq; static struct etspi_data *g_data; static DECLARE_WAIT_QUEUE_HEAD(interrupt_waitq); diff --git a/drivers/fingerprint/fingerprint.h b/drivers/fingerprint/fingerprint.h index 13c764c1e0c9..f0d5f4ae444a 100644 --- a/drivers/fingerprint/fingerprint.h +++ b/drivers/fingerprint/fingerprint.h @@ -41,8 +41,6 @@ enum { }; #define SENSOR_STATUS_SIZE 12 -static char sensor_status[SENSOR_STATUS_SIZE][10] = {"ooo", "unknown", "failed", - "viper", "raptor", "egis", "viper_wog", "namsan", "goodix", "qbt2000", "et7xx", "goodixopt"}; /* For Finger Detect Mode */ enum { diff --git a/drivers/fingerprint/qbt2000_common.c b/drivers/fingerprint/qbt2000_common.c index 195299974ff1..3178910c9609 100644 --- a/drivers/fingerprint/qbt2000_common.c +++ b/drivers/fingerprint/qbt2000_common.c @@ -16,6 +16,10 @@ static struct qbt2000_drvdata *g_data = NULL; +static char sensor_status[SENSOR_STATUS_SIZE][10] = {"ooo", "unknown", "failed", + "viper", "raptor", "egis", "viper_wog", "namsan", "goodix", "qbt2000", "et7xx", "goodixopt"}; + + /* * struct ipc_msg_type_to_fw_event - * entry in mapping between an IPC message type to a firmware event From 57bf8103cd35039032a5d5519285244b8e622e04 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:38:03 +0300 Subject: [PATCH 192/439] drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c: fix argv check Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c index ba4a2fa77810..c9d1a86f8157 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c @@ -4120,7 +4120,7 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) htod16(WL_PKT_FILTER_MFLAG_NEG); (argv[i])++; } - if (argv[i] == '\0') { + if (*argv[i] == '\0') { printf("Pattern not provided\n"); goto fail; } From a15d143cb67c9feb3d50c22aa8f53a32ee82457d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:38:29 +0300 Subject: [PATCH 193/439] drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c: fix uninit var Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c index 0a4ab598d818..6dc91e1f74c5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c @@ -272,7 +272,7 @@ wl_gather_ap_stadata(void *handle, void *event_info, u8 event) wl_event_msg_t *e; wl_ap_sta_data_t *sta_data; - wl_ap_sta_data_t temp_sta_data; + wl_ap_sta_data_t temp_sta_data = {0}; void *data = NULL; int i; int ret; From 4f2e98f01bdb6519ae891158ff9a022d5619bc2c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 22 Feb 2021 17:30:20 +0300 Subject: [PATCH 194/439] bcmdhd_101_16/dhd_rtt.c: fix uninit var Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c index 46f415c2387f..7f6c5d5adbe5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c @@ -1571,7 +1571,7 @@ static int dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version) { int ret; - ftm_subcmd_info_t subcmd_info; + ftm_subcmd_info_t subcmd_info = {}; subcmd_info.name = "ver"; subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION; subcmd_info.handler = NULL; From 18fb287bcc36c0802b0c8d137a8dfe9ec942373d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:39:56 +0300 Subject: [PATCH 195/439] drivers/ccic/max77705_usbc: fix multiple assignments warning Signed-off-by: Denis Efremov --- drivers/ccic/max77705_usbc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/ccic/max77705_usbc.c b/drivers/ccic/max77705_usbc.c index ba9893f534f4..aeb15c951e15 100644 --- a/drivers/ccic/max77705_usbc.c +++ b/drivers/ccic/max77705_usbc.c @@ -2117,9 +2117,10 @@ void max77705_usbc_clear_queue(struct max77705_usbc_platform_data *usbc_data) while (!is_empty_usbc_cmd_queue(cmd_queue)) { init_usbc_cmd_data(&cmd_data); dequeue_usbc_cmd(cmd_queue, &cmd_data); - if (max77705_check_recover_opcode(cmd_data.opcode)) - usbc_data->recover_opcode_list[cmd_data.opcode] - = usbc_data->need_recover = true; + if (max77705_check_recover_opcode(cmd_data.opcode)) { + usbc_data->recover_opcode_list[cmd_data.opcode] = true; + usbc_data->need_recover = true; + } } usbc_data->opcode_stamp = 0; msg_maxim("OUT"); From 3f2e30ab383e4407fa52902c4e95190c89b0bd84 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:40:40 +0300 Subject: [PATCH 196/439] drivers/hid/hid-samsung: fix set_bit(EV_REP, hi->input->evbit) call Signed-off-by: Denis Efremov --- drivers/hid/hid-samsung.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index f8746ddadf77..977f50de5b2e 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -142,8 +142,8 @@ static int samsung_kbd_input_mapping(struct hid_device *hdev, usage->hid & HID_USAGE); if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) { - switch (usage->hid & HID_USAGE) { set_bit(EV_REP, hi->input->evbit); + switch (usage->hid & HID_USAGE) { /* Only for UK keyboard */ /* key found */ #ifdef CONFIG_HID_KK_UPGRADE @@ -356,8 +356,8 @@ static int samsung_universal_kbd_input_mapping(struct hid_device *hdev, usage->hid & HID_USAGE); if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) { - switch (usage->hid & HID_USAGE) { set_bit(EV_REP, hi->input->evbit); + switch (usage->hid & HID_USAGE) { /* Only for UK keyboard */ /* key found */ #ifdef CONFIG_HID_KK_UPGRADE From 236c35943478415892cbf46fe19ac6a44010c375 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 13 Sep 2020 23:14:22 +0300 Subject: [PATCH 197/439] drivers/samsung/sec_dump_sink: check kstrtouint() result Signed-off-by: Denis Efremov --- drivers/samsung/sec_dump_sink.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/samsung/sec_dump_sink.c b/drivers/samsung/sec_dump_sink.c index 167e0822e598..04d400248854 100644 --- a/drivers/samsung/sec_dump_sink.c +++ b/drivers/samsung/sec_dump_sink.c @@ -27,7 +27,9 @@ static int initialized; static int sec_sdcard_ramdump(const char *val, const struct kernel_param *kp) { - kstrtouint(val, 16, &dump_sink); + if (kstrtouint(val, 16, &dump_sink)) + return 0; + pr_crit("%s: %s %x\n", __func__, val, dump_sink); if (!initialized) From 90f5db46a98e7da572b572909fa4fce029ccdc4c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:52:19 +0300 Subject: [PATCH 198/439] drivers/sensorhub/brcm/ssp: use int instead of bool Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/ssp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp.h b/drivers/sensorhub/brcm/ssp.h index 0b679d9a8fb5..30a86de66fe3 100644 --- a/drivers/sensorhub/brcm/ssp.h +++ b/drivers/sensorhub/brcm/ssp.h @@ -1026,7 +1026,7 @@ struct ssp_data { /* AP suspend check flag*/ bool IsAPsuspend; /* no ack about mcu_resp pin*/ - bool IsNoRespCnt; + int IsNoRespCnt; /* hall ic */ bool hall_ic_status; // 0: open 1: close }; From eeeb5422f6406308b7be5436c1a4044e638f4cba Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:53:56 +0300 Subject: [PATCH 199/439] drivers/soc/samsung/exynos_cpu_perf: drop redundant sprintf() args Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c | 2 +- drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c index 462c55f75669..7beb28db6ab7 100644 --- a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c +++ b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c @@ -144,7 +144,7 @@ static int cpufreq_log_thread(void *data) } } // mif, gpu, task - ret += snprintf(buf + ret, buf_size - ret, "05-mif_cur 06-gpu_util 06-gpu_cur 07-task_cpu\n", grp_num, cpu); + ret += snprintf(buf + ret, buf_size - ret, "05-mif_cur 06-gpu_util 06-gpu_cur 07-task_cpu\n"); //--------------------- // body diff --git a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c index 0ffff700f4c2..4c047272e3e8 100644 --- a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c +++ b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c @@ -362,7 +362,7 @@ static ssize_t show_result(char *buf) if (cpu == cluster_first_cpu[cluster_index]) { /* header: cpufreq */ - ret += snprintf(buf + ret, PAGE_SIZE - ret, "#freq ", cpu); + ret += snprintf(buf + ret, PAGE_SIZE - ret, "#freq "); for (freq = 0; freq < MAX_FREQ; freq++) { freq_value = cpufreq_list[cluster_index][freq]; if (freq_value == 0) { From fd4f2638c135208a430516a907259d5fe9f7ef9c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:55:18 +0300 Subject: [PATCH 200/439] drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq: cast buf to char * Signed-off-by: Denis Efremov --- drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c index 7beb28db6ab7..786fc6dcd5ee 100644 --- a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c +++ b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c @@ -268,7 +268,7 @@ static int run_seq_show(struct seq_file *file, void *iter) if (is_running) { seq_printf(file, "NO RESULT\n"); } else { - seq_printf(file, "%s", buf); // PRINT RESULT + seq_printf(file, "%s", (char *)buf); // PRINT RESULT } return 0; } From 9c56f15401f7e36a66c80d2258c8521a2312d8c5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 10:57:26 +0300 Subject: [PATCH 201/439] drivers/sensorhub/brcm/sx9330: fix regist, val type to unsigned Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/sx9330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index f83bab018699..267c43700639 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -632,7 +632,7 @@ static ssize_t sx9330_set_offset_calibration_store(struct device *dev, static ssize_t sx9330_register_write_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int regist = 0, val = 0; + uint32_t regist, val = 0; struct sx9330_p *data = dev_get_drvdata(dev); if (sscanf(buf, "%6x,%10x", ®ist, &val) != 2) { From 832ba46b3bc2bd267d8de8bb72b86299bd748ead Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 11:38:45 +0300 Subject: [PATCH 202/439] drivers/video/fbdev/exynos/dpu20/dpp_drv: fix reduntant arg check Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/dpp_drv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c index d26d2f94d5bb..24554dbdad42 100644 --- a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c @@ -654,8 +654,7 @@ static long dpp_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg break; case DPP_STOP: - if (&arg != NULL) - reset = (bool)arg; + reset = (bool)arg; #ifdef CONFIG_EXYNOS_MCD_HDR ret = dpp_mcd_stop(dpp); #endif From dc9f9c654374b5707530ea33cc360b70ed80cbf1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 11:47:59 +0300 Subject: [PATCH 203/439] drivers/video/fbdev/exynos/panel/sysfs: drop redundant snprintf() arg Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/panel/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/sysfs.c b/drivers/video/fbdev/exynos/panel/sysfs.c index bdf14b146b6e..9cba2004219e 100644 --- a/drivers/video/fbdev/exynos/panel/sysfs.c +++ b/drivers/video/fbdev/exynos/panel/sysfs.c @@ -1279,7 +1279,7 @@ static ssize_t self_mask_check_show(struct device *dev, len = snprintf(buf, PAGE_SIZE, "%d", success_check); for (i = 0; i < aod->props.self_mask_checksum_len; i++) len += snprintf(buf + len, PAGE_SIZE - len, " %02x", recv_checksum[i]); - len += snprintf(buf + len, PAGE_SIZE - len, "\n", recv_checksum[i]); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); kfree(recv_checksum); } else { snprintf(buf, PAGE_SIZE, "-1\n"); From 5c3cd44fe983133655128bb5c197ca88ee2b5f2e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 12:28:48 +0300 Subject: [PATCH 204/439] drivers/sensorhub/brcm/sx9330: fix strncpy() call warning Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/sx9330.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index 267c43700639..6ba8bcbfe308 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -109,7 +109,8 @@ struct sx9330_p { s32 max_normal_diff; int debug_count; - char hall_ic[6]; +#define HALL_IC_LEN 6 + char hall_ic[HALL_IC_LEN]; int is_unknown_mode; int motion; @@ -119,12 +120,12 @@ struct sx9330_p { int pre_attach; }; -static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[]) +static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[HALL_IC_LEN]) { int iRet = 0; mm_segment_t old_fs; struct file *filep; - char hall_sysfs[5]; + char hall_sysfs[HALL_IC_LEN]; old_fs = get_fs(); set_fs(KERNEL_DS); @@ -146,7 +147,7 @@ static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[]) set_fs(old_fs); return -EIO; } else { - strncpy(hall_ic_status, hall_sysfs, sizeof(hall_sysfs)); + strncpy(hall_ic_status, hall_sysfs, HALL_IC_LEN); } filp_close(filep, current->files); From cf5f693ff24f8d23ef5b74be1940ab9852f8ed26 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 21:48:28 +0300 Subject: [PATCH 205/439] drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android: fix adps_mode check Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c index b31a85c011c5..45e3616b6515 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c @@ -8986,7 +8986,7 @@ wl_android_set_adps_mode(struct net_device *dev, const char* string_num) adps_mode = bcm_atoi(string_num); WL_ERR(("%s: SET_ADPS %d\n", __FUNCTION__, adps_mode)); - if ((adps_mode < 0) && (1 < adps_mode)) { + if ((adps_mode < 0) || (1 < adps_mode)) { WL_ERR(("wl_android_set_adps_mode: Invalid value %d.\n", adps_mode)); return -EINVAL; } From 092e2ee3c5e97eef98494da8a261b1c2b091810d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 21:49:49 +0300 Subject: [PATCH 206/439] mmap/rmap: fix pointer cast to enum warning Signed-off-by: Denis Efremov --- mm/rmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index a9ff86a08143..11308147e7d6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1342,7 +1342,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, struct page *subpage; bool ret = true; unsigned long start = address, end; - enum ttu_flags flags = (enum ttu_flags)arg; + enum ttu_flags flags = (uintptr_t)arg; /* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) From 3c6c42e1e9ffd7f1ff2f8f2dca3a698cc0dec51e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 21:50:37 +0300 Subject: [PATCH 207/439] drivers/misc/modem_v1/modem_main: fix pointer to enum cast warning Signed-off-by: Denis Efremov --- drivers/misc/modem_v1/modem_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/modem_v1/modem_main.c b/drivers/misc/modem_v1/modem_main.c index 6fc81df4d485..c7de1b23d2cf 100644 --- a/drivers/misc/modem_v1/modem_main.c +++ b/drivers/misc/modem_v1/modem_main.c @@ -662,7 +662,7 @@ enum mif_sim_mode { static int simslot_count(struct seq_file *m, void *v) { - enum mif_sim_mode mode = (enum mif_sim_mode)m->private; + enum mif_sim_mode mode = (uintptr_t)m->private; seq_printf(m, "%u\n", mode); return 0; From 8dea1869f0b5cb46b0203fcccf9ddfd16bc467f1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 21:51:07 +0300 Subject: [PATCH 208/439] drivers/scsi/ufs/ufshcd: fix out-of-bounds buffer write Signed-off-by: Denis Efremov --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 21af2b4bce2f..aea5e05d157c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -10122,7 +10122,7 @@ static void ufs_sec_send_errinfo(void *data) { static struct ufs_hba *hba; struct SEC_UFS_counting *err_info; - char buf[22]; + char buf[23]; if (data) { hba = (struct ufs_hba *)data; From 74a56d8e3d90175f5893f1a63edca20f9eacd764 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 22:10:40 +0300 Subject: [PATCH 209/439] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs: suppress clang warning Signed-off-by: Denis Efremov --- .../media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c index 8a65ef35bfec..6665f71ca897 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c @@ -1283,7 +1283,7 @@ static ssize_t camera_ssrm_camera_info_store(struct device *dev, ret_count = sscanf(buf, "%d%d%d%d%d%d%d", &temp.operation, &temp.cameraID, &temp.previewMinFPS, &temp.previewMaxFPS, &temp.previewSizeWidth, &temp.previewSizeHeight, &temp.sensorOn); - if (ret_count > sizeof(SsrmCameraInfo)/sizeof(int)) { + if (ret_count > sizeof(SsrmCameraInfo)/(sizeof(int))) { return -EINVAL; } From a8388fce0572273c9cc3cfceae188af6ff46742a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 07:53:43 +0300 Subject: [PATCH 210/439] drivers/samsung/debug/sec_debug_test: fix simulate_SYNC_IRQ_LOCKUP() Signed-off-by: Denis Efremov --- drivers/samsung/debug/sec_debug_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/samsung/debug/sec_debug_test.c b/drivers/samsung/debug/sec_debug_test.c index 96245ce6bf18..8aabe2eb09d6 100644 --- a/drivers/samsung/debug/sec_debug_test.c +++ b/drivers/samsung/debug/sec_debug_test.c @@ -893,7 +893,7 @@ static void simulate_SYNC_IRQ_LOCKUP(char *arg) if (arg) { if (!kstrtol(arg, 10, &irq)) { - struct irq_desc *desc = irq_to_desc(i); + struct irq_desc *desc = irq_to_desc(irq); if (desc && desc->action && desc->action->thread_fn) desc->action->thread_fn = dummy_wait_for_completion_irq_handler; From ba7c5a863331f66249354145ff4d938c0a74ba15 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 09:28:24 +0300 Subject: [PATCH 211/439] drivers/media/tdmb/fc8080/ficdecoder: use unsigned 0x7fU constant to suppress the warning Signed-off-by: Denis Efremov --- drivers/media/tdmb/fc8080/ficdecoder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/tdmb/fc8080/ficdecoder.c b/drivers/media/tdmb/fc8080/ficdecoder.c index 337bb60c5aa8..b9fae7bc349b 100644 --- a/drivers/media/tdmb/fc8080/ficdecoder.c +++ b/drivers/media/tdmb/fc8080/ficdecoder.c @@ -861,7 +861,7 @@ int fig0_ext10_decoder(u8 *fibBuffer, int figLength) u8 hour = 0; /*minutes = 0, seconds = 0*/ u16 milliseconds = 0; - MJD = (fibBuffer[0] & 0x7f) << 10; + MJD = (fibBuffer[0] & 0x7fU) << 10; MJD |= (fibBuffer[1] << 2); MJD |= (fibBuffer[2] & 0xc0) >> 6; /*LSI = (fibBuffer[2] & 0x20) >> 5; */ From f0f730844c8702c2484eaf16e857a393f0d2bd05 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 10:07:18 +0300 Subject: [PATCH 212/439] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs: fix sscanf format Signed-off-by: Denis Efremov --- .../media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c index 6665f71ca897..3d3e75b38d8d 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c @@ -2761,7 +2761,7 @@ static ssize_t camera_front_tof_check_pd_store(struct device *dev, return -ENODEV; } - ret_count = sscanf(buf, "%d", &value); + ret_count = sscanf(buf, "%hhd", &value); camera_tof_set_laser_current(SENSOR_POSITION_FRONT_TOF, value); return count; } From addeacc11d1f3e02178091d88db070e0d6ee7641 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 16:02:14 +0300 Subject: [PATCH 213/439] drivers/battery_v2/mfc_s2miw04_charger: fix scanf format Signed-off-by: Denis Efremov --- drivers/battery_v2/mfc_s2miw04_charger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/battery_v2/mfc_s2miw04_charger.c b/drivers/battery_v2/mfc_s2miw04_charger.c index 9ccc1f29062a..21cabb16aa93 100644 --- a/drivers/battery_v2/mfc_s2miw04_charger.c +++ b/drivers/battery_v2/mfc_s2miw04_charger.c @@ -4440,7 +4440,7 @@ ssize_t mfc_s2miw04_store_attrs(struct device *dev, ret = count; break; case MFC_PACKET: - if (sscanf(buf, "0x%4x 0x%4x 0x%4x\n", &header, &data_com, &data_val) == 3) { + if (sscanf(buf, "0x%4hhx 0x%4hhx 0x%4hhx\n", &header, &data_com, &data_val) == 3) { dev_info(charger->dev, "%s 0x%x, 0x%x, 0x%x \n", __func__, header, data_com, data_val); mfc_send_packet(charger, header, data_com, &data_val, 1); } From 0f47b975b9a5115c494bac2389b2a414fc849c89 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 16:02:47 +0300 Subject: [PATCH 214/439] drivers/input/keyboard/stm/fsr1ad04: move G1,G2 definitions to .c file Signed-off-by: Denis Efremov --- .../input/keyboard/stm/fsr1ad04/stm_fsr_functions.c | 8 ++++++++ drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h | 10 +++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c index c5d49a34b193..f01c90c296bc 100644 --- a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c +++ b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c @@ -29,6 +29,14 @@ #include "stm_fsr_sidekey.h" +int G1[4] = { -2400, -2400, -4800, -4800 }; +int G2[4][4] = { + {-250, -250, -500, -500}, + {-125, -125, -250, -250}, + {-500, -500, -1000, -1000}, + {-250, -250, -500, -500} +}; + static void fw_update(void *device_data); static void get_chip_vendor(void *device_data); static void get_chip_name(void *device_data); diff --git a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h index ebfff8b2f944..f3329f60949b 100644 --- a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h +++ b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h @@ -144,13 +144,9 @@ struct fsr_sidekey_plat_data { #define BUFFER_MAX ((256 * 1024) - 16) -static int G1[4] = { -2400, -2400, -4800, -4800 }; -static int G2[4][4] = { - {-250, -250, -500, -500}, - {-125, -125, -250, -250}, - {-500, -500, -1000, -1000}, - {-250, -250, -500, -500} -}; +extern int G1[4]; +extern int G2[4][4]; + enum { TYPE_RAW_DATA, From 42cf243420c566bda862a55ad1c205c2c5122601 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 16:03:24 +0300 Subject: [PATCH 215/439] drivers/sensorhub/brcm/sx9360: fix strncpy() call warning Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/sx9360.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/sensorhub/brcm/sx9360.c b/drivers/sensorhub/brcm/sx9360.c index 835f3cdac3a6..de1c7e6a43dd 100644 --- a/drivers/sensorhub/brcm/sx9360.c +++ b/drivers/sensorhub/brcm/sx9360.c @@ -133,7 +133,8 @@ struct sx9360_p { s16 max_normal_diff; int debug_count; - char hall_ic[6]; +#define HALL_IC_LEN 6 + char hall_ic[HALL_IC_LEN]; int is_unknown_mode; int motion; bool first_working; @@ -148,12 +149,12 @@ struct sx9360_p { #endif }; -static int sx9360_check_hallic_state(char *file_path, char hall_ic_status[]) +static int sx9360_check_hallic_state(char *file_path, char hall_ic_status[HALL_IC_LEN]) { int iRet = 0; mm_segment_t old_fs; struct file *filep; - char hall_sysfs[5]; + char hall_sysfs[HALL_IC_LEN]; old_fs = get_fs(); set_fs(KERNEL_DS); @@ -175,7 +176,7 @@ static int sx9360_check_hallic_state(char *file_path, char hall_ic_status[]) set_fs(old_fs); return -EIO; } else { - strncpy(hall_ic_status, hall_sysfs, sizeof(hall_sysfs)); + strncpy(hall_ic_status, hall_sysfs, HALL_IC_LEN); } filp_close(filep, current->files); From c60ec24dd2011781d64059c30b66cb4086b4ca6e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 16:05:15 +0300 Subject: [PATCH 216/439] drivers/input/touchscreen/sec_ts/y771_d/sec_ts: check regulator_enable() result Signed-off-by: Denis Efremov --- drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c b/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c index 51b608b11356..534601f073a5 100644 --- a/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c +++ b/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c @@ -2100,8 +2100,14 @@ int sec_ts_power(void *data, bool on) if (regulator_is_enabled(regulator_dvdd)) { ret = regulator_disable(regulator_dvdd); if (ret) { + int ret; + input_err(true, &ts->client->dev, "%s: failed to disable dvdd: %d\n", __func__, ret); - regulator_enable(regulator_avdd); + ret = regulator_enable(regulator_avdd); + if (ret < 0) { + input_err(true, &ts->client->dev, "%s: failed to reenable dvdd: %d\n", __func__, ret); + } + goto out; } } else { From 60ec4a0b1c4827521885af3afc7521175709677c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 23 Sep 2020 18:20:22 +0300 Subject: [PATCH 217/439] drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata: fix initialization warning Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c index 6dc91e1f74c5..b62966916486 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c @@ -272,7 +272,7 @@ wl_gather_ap_stadata(void *handle, void *event_info, u8 event) wl_event_msg_t *e; wl_ap_sta_data_t *sta_data; - wl_ap_sta_data_t temp_sta_data = {0}; + wl_ap_sta_data_t temp_sta_data = {}; void *data = NULL; int i; int ret; From d971e951a20cb6dec8a00103c4a3d5293957929e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 20 Sep 2020 16:20:37 +0300 Subject: [PATCH 218/439] init/main: use __initdata_or_module for initcall_sec_debug Signed-off-by: Denis Efremov --- init/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/main.c b/init/main.c index c1326c2b549e..2366627a67e2 100644 --- a/init/main.c +++ b/init/main.c @@ -1017,7 +1017,7 @@ __setup("initcall_blacklist=", initcall_blacklist); #ifdef CONFIG_SEC_BOOTSTAT -static bool __init_or_module initcall_sec_debug = true; +static bool __initdata_or_module initcall_sec_debug = true; static int __init_or_module do_one_initcall_sec_debug(initcall_t fn) { From c8f446bd14f8c2b1a72d7b189f6c1fc35ec3932a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:28:36 +0300 Subject: [PATCH 219/439] security/sdp/dd_kernel_crypto: fix misleading indentation Signed-off-by: Denis Efremov --- security/sdp/dd_kernel_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/sdp/dd_kernel_crypto.c b/security/sdp/dd_kernel_crypto.c index c6aa2a066bbd..14f90f613853 100644 --- a/security/sdp/dd_kernel_crypto.c +++ b/security/sdp/dd_kernel_crypto.c @@ -643,7 +643,7 @@ int dd_sec_crypt_bio_pages(struct dd_info *info, struct bio *orig, if (rw == DD_ENCRYPT) memcpy(&clone->bi_iter, &iter_backup, sizeof(struct bvec_iter)); - return 0; + return 0; } void dd_hex_key_dump(const char* tag, uint8_t *data, size_t data_len) From dc258445355dc467e0f117eb1ef77f0c5dd8d33d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 12 Sep 2020 01:29:41 +0300 Subject: [PATCH 220/439] net/mptcp/mptcp_fullmesh: fix misleading indentation Signed-off-by: Denis Efremov --- net/mptcp/mptcp_fullmesh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/mptcp_fullmesh.c b/net/mptcp/mptcp_fullmesh.c index ce8343de9add..56a066ed4ef0 100644 --- a/net/mptcp/mptcp_fullmesh.c +++ b/net/mptcp/mptcp_fullmesh.c @@ -1169,7 +1169,7 @@ static int inet6_addr_event(struct notifier_block *this, unsigned long event, event == NETDEV_CHANGE)) return NOTIFY_DONE; - addr6_event_handler(ifa6, event, net); + addr6_event_handler(ifa6, event, net); return NOTIFY_DONE; } From 7c45c9702b61c18f45c8e3a2d71ea7f986309f99 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:45:05 +0300 Subject: [PATCH 221/439] drivers/sensorhub/brcm/bbdpl/bbd: fix misleading indentation Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/bbdpl/bbd.c | 32 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/sensorhub/brcm/bbdpl/bbd.c b/drivers/sensorhub/brcm/bbdpl/bbd.c index d29857429971..5627b24d89b1 100644 --- a/drivers/sensorhub/brcm/bbdpl/bbd.c +++ b/drivers/sensorhub/brcm/bbdpl/bbd.c @@ -903,22 +903,22 @@ ssize_t bbd_urgent_patch_read(struct file *user_filp, char __user *buf, size_t s } else is_signed = true; - if (is_signed == false) { - pr_err("[SSPBBD] %s : urgent_patch is not signed", __func__); - kfree(urgent_buffer); - return 0; - } - - urgent_patch_size = ret; - pr_err("[SSPBBD] %s : total: %d patch size: %d", __func__, fsize, urgent_patch_size); - - if (offset >= urgent_patch_size) { // signal EOF - pr_err("[SSPBBD] %s : signal EOF", __func__); - - *ppos = 0; - kfree(urgent_buffer); - return 0; - } + if (is_signed == false) { + pr_err("[SSPBBD] %s : urgent_patch is not signed", __func__); + kfree(urgent_buffer); + return 0; + } + + urgent_patch_size = ret; + pr_err("[SSPBBD] %s : total: %lld patch size: %d", __func__, fsize, urgent_patch_size); + + if (offset >= urgent_patch_size) { // signal EOF + pr_err("[SSPBBD] %s : signal EOF", __func__); + + *ppos = 0; + kfree(urgent_buffer); + return 0; + } if (offset + size > urgent_patch_size) rd_size = urgent_patch_size - offset; From 70fbe6fc9da4d811a253b67b26f88cc6bced5066 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 00:46:18 +0300 Subject: [PATCH 222/439] drivers/sensorhub/brcm/ssp_bbd: fix misleading indentation Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/ssp_bbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp_bbd.c b/drivers/sensorhub/brcm/ssp_bbd.c index 1c9d641ed869..8987fc69f563 100644 --- a/drivers/sensorhub/brcm/ssp_bbd.c +++ b/drivers/sensorhub/brcm/ssp_bbd.c @@ -304,7 +304,7 @@ int callback_bbd_on_mcu_ready(void *ssh_data, bool ready) continue; if(src[i] == ';') break; - dst[idx++] = src[i]; + dst[idx++] = src[i]; } } int callback_bbd_on_control(void *ssh_data, const char *str_ctrl) From c14922cb73040d7af36a2cacc96c67f750f5efb7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 11:36:51 +0300 Subject: [PATCH 223/439] drivers/usb/gadget/function/f_conn_gadget: fix misleading indentation Signed-off-by: Denis Efremov --- drivers/usb/gadget/function/f_conn_gadget.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/usb/gadget/function/f_conn_gadget.c b/drivers/usb/gadget/function/f_conn_gadget.c index e0bafd0ca278..e6e02f7b819b 100644 --- a/drivers/usb/gadget/function/f_conn_gadget.c +++ b/drivers/usb/gadget/function/f_conn_gadget.c @@ -1284,8 +1284,8 @@ static int conn_gadget_setup(struct conn_gadget_instance *fi_conn_gadget) return 0; err_: - if (dev->rd_queue_buf) - vfree(dev->rd_queue_buf); + if (dev->rd_queue_buf) + vfree(dev->rd_queue_buf); _conn_gadget_dev = NULL; kfree(dev); @@ -1304,8 +1304,8 @@ static void conn_gadget_cleanup(struct kref *kref) misc_deregister(&conn_gadget_device); - if (_conn_gadget_dev->rd_queue_buf) - vfree(_conn_gadget_dev->rd_queue_buf); + if (_conn_gadget_dev->rd_queue_buf) + vfree(_conn_gadget_dev->rd_queue_buf); kfree(_conn_gadget_dev); _conn_gadget_dev = NULL; From 8ac04da36be388f406c86b63b1ff804651168b2b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 11:39:30 +0300 Subject: [PATCH 224/439] drivers/usb/core/devio: fix misleading indentation Move dev_info() out of switch. Signed-off-by: Denis Efremov --- drivers/usb/core/devio.c | 56 +++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index f230da5ac6de..d4f4c040ad65 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -2189,36 +2189,38 @@ static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; - else switch (ctl->ioctl_code) { - dev_info(&ps->dev->dev,"%s ioctl_code %d\n", __func__, ctl->ioctl_code); - /* disconnect kernel driver from interface */ - case USBDEVFS_DISCONNECT: - if (intf->dev.driver) { - driver = to_usb_driver(intf->dev.driver); - dev_dbg(&intf->dev, "disconnect by usbfs\n"); - usb_driver_release_interface(driver, intf); - } else - retval = -ENODATA; - break; + else { + dev_info(&ps->dev->dev,"%s ioctl_code %d\n", __func__, ctl->ioctl_code); + switch (ctl->ioctl_code) { + /* disconnect kernel driver from interface */ + case USBDEVFS_DISCONNECT: + if (intf->dev.driver) { + driver = to_usb_driver(intf->dev.driver); + dev_dbg(&intf->dev, "disconnect by usbfs\n"); + usb_driver_release_interface(driver, intf); + } else + retval = -ENODATA; + break; - /* let kernel drivers try to (re)bind to the interface */ - case USBDEVFS_CONNECT: - if (!intf->dev.driver) - retval = device_attach(&intf->dev); - else - retval = -EBUSY; - break; + /* let kernel drivers try to (re)bind to the interface */ + case USBDEVFS_CONNECT: + if (!intf->dev.driver) + retval = device_attach(&intf->dev); + else + retval = -EBUSY; + break; - /* talk directly to the interface's driver */ - default: - if (intf->dev.driver) - driver = to_usb_driver(intf->dev.driver); - if (driver == NULL || driver->unlocked_ioctl == NULL) { - retval = -ENOTTY; - } else { - retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); - if (retval == -ENOIOCTLCMD) + /* talk directly to the interface's driver */ + default: + if (intf->dev.driver) + driver = to_usb_driver(intf->dev.driver); + if (driver == NULL || driver->unlocked_ioctl == NULL) { retval = -ENOTTY; + } else { + retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); + if (retval == -ENOIOCTLCMD) + retval = -ENOTTY; + } } } From 05061a487f5c819bf83caf70a98fc61191a331ea Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 12:44:32 +0300 Subject: [PATCH 225/439] net/netfilter/linkforward: fix misleading indentation Signed-off-by: Denis Efremov --- net/netfilter/linkforward.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/net/netfilter/linkforward.c b/net/netfilter/linkforward.c index 07dfa50c3cda..8f90816d7463 100644 --- a/net/netfilter/linkforward.c +++ b/net/netfilter/linkforward.c @@ -224,18 +224,20 @@ int linkforward_add(__be16 dst_port, struct nf_conntrack_tuple *t_rpl, struct nf if (!room_found) { last_conn_idx++; + if (last_conn_idx == MAX_CONNECTION_CNT) last_conn_idx = 0; - i = last_conn_idx; - conn[i].enabled = true; - conn[i].dst_port = dst_port; - conn[i].netdev = netdev; - memcpy(&conn[i].t[0], t_org, sizeof(struct nf_conntrack_tuple)); - memcpy(&conn[i].t[1], t_rpl, sizeof(struct nf_conntrack_tuple)); + + i = last_conn_idx; + conn[i].enabled = true; + conn[i].dst_port = dst_port; + conn[i].netdev = netdev; + memcpy(&conn[i].t[0], t_org, sizeof(struct nf_conntrack_tuple)); + memcpy(&conn[i].t[1], t_rpl, sizeof(struct nf_conntrack_tuple)); #ifdef CONFIG_CP_DIT - dit_set_nat_local_addr(t_org->src.u3.ip); - dit_set_nat_filter(i, IPPROTO_TCP, 0xffffffff, 0xffff, dst_port); + dit_set_nat_local_addr(t_org->src.u3.ip); + dit_set_nat_filter(i, IPPROTO_TCP, 0xffffffff, 0xffff, dst_port); #endif } From 16fd7a1b5258d922fb6278ed59f7f6230dc50ee6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Sep 2020 22:14:20 +0300 Subject: [PATCH 226/439] drivers/sensorhub/brcm/ssp_i2c: fix misleading indentation Signed-off-by: Denis Efremov --- drivers/sensorhub/brcm/ssp_i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp_i2c.c b/drivers/sensorhub/brcm/ssp_i2c.c index 37e1522ee2d2..4c59c007e98c 100644 --- a/drivers/sensorhub/brcm/ssp_i2c.c +++ b/drivers/sensorhub/brcm/ssp_i2c.c @@ -315,7 +315,7 @@ int send_instruction(struct ssp_data *data, u8 uInst, if (uLength >= 9) BatchTimeforReset = *(unsigned int *)(&uSendBuf[4]);// Add / change normal case, not factory. //pr_info("[SSP] %s timeForRest %d", __func__, BatchTimeforReset); - data->IsBypassMode[uSensorType] = (BatchTimeforReset == 0); + data->IsBypassMode[uSensorType] = (BatchTimeforReset == 0); //pr_info("[SSP] sensor%d mode%d Time %lld\n", uSensorType, data->IsBypassMode[uSensorType], current_Ts); } return iRet; From 954ce0c8efd21650292e16e2a9954b69b3266101 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 23 Mar 2020 21:05:20 +0300 Subject: [PATCH 227/439] net: ipv4: lock the initial TCP window size to 64K Signed-off-by: Denis Efremov --- net/ipv4/Kconfig | 7 +++++++ net/ipv4/tcp_output.c | 9 +++++++++ 2 files changed, 16 insertions(+) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 93def62ca73e..73a6b13e33ba 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -463,6 +463,13 @@ config INET_DIAG_DESTROY had been disconnected. If unsure, say N. +config LARGE_TCP_INITIAL_BUFFER + bool "TCP: lock the initial window size to 64K" + default n + ---help--- + Lock the initial TCP window size to 64K. + If unsure, say N. + menuconfig TCP_CONG_ADVANCED bool "TCP: advanced congestion control" ---help--- diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2b6e5715844f..17ace4982a1e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -272,6 +272,15 @@ void tcp_select_initial_window(int __space, __u32 mss, *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); } +#ifdef CONFIG_LARGE_TCP_INITIAL_BUFFER + pr_info("TCP: default window size: %u\n", *rcv_wnd); + /* Lock the initial TCP window size to 64K. + * Assuming 1500 packet size, 64240 is the largest multiple + * of MSS (44 * 1460) under 65535 (2 << 15). + */ + *rcv_wnd = 64240; +#endif + /* Set the clamp no higher than max representable value */ (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); } From ca28e83d9d1cee236f2f7d5f1784b67da4591a1e Mon Sep 17 00:00:00 2001 From: franciscofranco Date: Sun, 17 Sep 2017 02:48:54 +0200 Subject: [PATCH 228/439] fs: fsync on/off support [efremov: change permissions from 0755 to 0644] Signed-off-by: djb77 Signed-off-by: Denis Efremov --- fs/sync.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/fs/sync.c b/fs/sync.c index afb091ba9ecc..2f6aca5f0cdd 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,9 @@ #include #include "internal.h" +bool fsync_enabled = true; +module_param(fsync_enabled, bool, 0644); + #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) @@ -406,10 +410,15 @@ void emergency_sync(void) */ SYSCALL_DEFINE1(syncfs, int, fd) { - struct fd f = fdget(fd); + struct fd f; struct super_block *sb; int ret; + if (!fsync_enabled) + return 0; + + f = fdget(fd); + if (!f.file) return -EBADF; sb = f.file->f_path.dentry->d_sb; @@ -437,6 +446,9 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; + if (!fsync_enabled) + return 0; + if (!file->f_op->fsync) return -EINVAL; if (!datasync && (inode->i_state & I_DIRTY_TIME)) { @@ -459,6 +471,9 @@ EXPORT_SYMBOL(vfs_fsync_range); */ int vfs_fsync(struct file *file, int datasync) { + if (!fsync_enabled) + return 0; + return vfs_fsync_range(file, 0, LLONG_MAX, datasync); } EXPORT_SYMBOL(vfs_fsync); @@ -487,10 +502,15 @@ static void inc_fsync_time_cnt(unsigned long end, unsigned long start) static int do_fsync(unsigned int fd, int datasync) { - struct fd f = fdget(fd); + struct fd f; int ret = -EBADF; unsigned long stamp = jiffies; + if (!fsync_enabled) + return 0; + + f = fdget(fd); + if (f.file) { ret = vfs_fsync(f.file, datasync); fdput(f); @@ -502,11 +522,17 @@ static int do_fsync(unsigned int fd, int datasync) SYSCALL_DEFINE1(fsync, unsigned int, fd) { + if (!fsync_enabled) + return 0; + return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { + if (!fsync_enabled) + return 0; + return do_fsync(fd, 1); } @@ -566,6 +592,9 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, loff_t endbyte; /* inclusive */ umode_t i_mode; + if (!fsync_enabled) + return 0; + ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; From 8adea38fd25ad8838c75880d5a72ab192b6a7b4f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 12 Apr 2020 09:20:35 +0300 Subject: [PATCH 229/439] fs: add NOATIME/RELATIME default mount option Signed-off-by: Denis Efremov --- fs/Kconfig | 14 ++++++++++++++ fs/namespace.c | 5 +++++ 2 files changed, 19 insertions(+) diff --git a/fs/Kconfig b/fs/Kconfig index 6f97b4d448d1..b42d356344ab 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -12,6 +12,20 @@ config INTERRUPTIBLE_SYNC bool "Support interruptible sync for Samsung Mobile Device" default y +choice + prompt "Default mount option (RELATIME/NOATIME)" + default DEFAULT_MNT_RELATIME + help + Select default mount option. + + config DEFAULT_MNT_RELATIME + bool "RELATIME" + + config DEFAULT_MNT_NOATIME + bool "NOATIME" +endchoice + + if BLOCK config FS_IOMAP diff --git a/fs/namespace.c b/fs/namespace.c index c06a0954a9d2..ff787d3fb622 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3469,9 +3469,14 @@ long do_mount(const char *dev_name, const char __user *dir_name, if (retval) goto dput_out; +#ifdef CONFIG_DEFAULT_MNT_NOATIME + if (!(flags & MS_RELATIME)) + mnt_flags |= MNT_NOATIME; +#else /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; +#endif /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) From 29fbce0f82acd866b67d2d27b3e31b1a07177784 Mon Sep 17 00:00:00 2001 From: Sultanxda Date: Fri, 21 Oct 2016 01:37:16 -0700 Subject: [PATCH 230/439] proc: Remove SafetyNet flags from /proc/cmdline Userspace parses this and sets the ro.boot.verifiedbootstate prop according to the value that this flag has. When ro.boot.verifiedbootstate is not 'green', SafetyNet is tripped and fails the CTS test. Hide verifiedbootstate from /proc/cmdline in order to fix the failed SafetyNet CTS check. SafetyNet checks androidboot.veritymode in Nougat, so remove it. Additionally, remove androidboot.enable_dm_verity and androidboot.secboot in case SafetyNet will check them in the future. Signed-off-by: Sultanxda Signed-off-by: Denis Efremov --- fs/proc/cmdline.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c index 403cbb12a6e9..bb4ff145fc6a 100644 --- a/fs/proc/cmdline.c +++ b/fs/proc/cmdline.c @@ -3,10 +3,13 @@ #include #include #include +#include + +static char new_command_line[COMMAND_LINE_SIZE]; static int cmdline_proc_show(struct seq_file *m, void *v) { - seq_printf(m, "%s\n", saved_command_line); + seq_printf(m, "%s\n", new_command_line); return 0; } @@ -22,8 +25,38 @@ static const struct file_operations cmdline_proc_fops = { .release = single_release, }; +static void remove_flag(char *cmd, const char *flag) +{ + char *start_addr, *end_addr; + + /* Ensure all instances of a flag are removed */ + while ((start_addr = strstr(cmd, flag))) { + end_addr = strchr(start_addr, ' '); + if (end_addr) + memmove(start_addr, end_addr + 1, strlen(end_addr)); + else + *(start_addr - 1) = '\0'; + } +} + +static void remove_safetynet_flags(char *cmd) +{ + remove_flag(cmd, "androidboot.enable_dm_verity="); + remove_flag(cmd, "androidboot.secboot="); + remove_flag(cmd, "androidboot.verifiedbootstate="); + remove_flag(cmd, "androidboot.veritymode="); +} + static int __init proc_cmdline_init(void) { + strcpy(new_command_line, saved_command_line); + + /* + * Remove various flags from command line seen by userspace in order to + * pass SafetyNet CTS check. + */ + remove_safetynet_flags(new_command_line); + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); return 0; } From 09b33c0a0ae5628792a902a1d918d3191252f562 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 6 Feb 2020 14:29:34 +0300 Subject: [PATCH 231/439] proc: add CONFIG_PROC_REMOVE_SAFETYNET_FLAGS Signed-off-by: Denis Efremov --- fs/proc/Kconfig | 9 +++++++++ fs/proc/cmdline.c | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 54e04a53bd70..5e14d454c865 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -62,6 +62,15 @@ config PROC_SYSCTL building a kernel for install/rescue disks or your system is very limited in memory. +config PROC_REMOVE_SAFETYNET_FLAGS + bool "Hide SafetyNet flags in cmdline" if EXPERT + depends on PROC_FS + default n + ---help--- + Remove "androidboot.enable_dm_verity", "androidboot.secboot", + "androidboot.verifiedbootstate", "androidboot.veritymode" from + cmdline. This will help to bypass SafetyNet checks. + config PROC_PAGE_MONITOR default y depends on PROC_FS && MMU diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c index bb4ff145fc6a..50bdc4052aea 100644 --- a/fs/proc/cmdline.c +++ b/fs/proc/cmdline.c @@ -25,6 +25,8 @@ static const struct file_operations cmdline_proc_fops = { .release = single_release, }; + +#ifdef CONFIG_PROC_REMOVE_SAFETYNET_FLAGS static void remove_flag(char *cmd, const char *flag) { char *start_addr, *end_addr; @@ -46,16 +48,19 @@ static void remove_safetynet_flags(char *cmd) remove_flag(cmd, "androidboot.verifiedbootstate="); remove_flag(cmd, "androidboot.veritymode="); } +#endif static int __init proc_cmdline_init(void) { strcpy(new_command_line, saved_command_line); +#ifdef CONFIG_PROC_REMOVE_SAFETYNET_FLAGS /* * Remove various flags from command line seen by userspace in order to * pass SafetyNet CTS check. */ remove_safetynet_flags(new_command_line); +#endif proc_create("cmdline", 0, NULL, &cmdline_proc_fops); return 0; From 86ee76420c528640c10b6e4288d1f4d6da7f953d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 19:10:21 +0300 Subject: [PATCH 232/439] android: Add superuser driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Run `su` to get root. HELLO NAÏVE TECH JOURNALISTS: This commit here introduces a driver used during development, because having access to scary debugging facilities is useful during kernel development. This driver is disabled by default, and when it is enabled, it spews warnings all over the place to encourage people who turn it on by accident to turn it off. It's exceedingly unlikely that somebody turns this on without intending to do so. You really have to fish around and make a concerted effort to get it enabled and working, and the warning messages it gives during the build and boot processes are really an eyesore. So, if you've found this commit because some idiot shipped a kernel to their users, you have every reason to blame that idiot, and not this commit. And if the fool insists it was an 'accident', he's not telling the truth. This is from: https://git.zx2c4.com/kernel-assisted-superuser/about Signed-off-by: Denis Efremov --- drivers/base/Kconfig | 9 +++ drivers/base/Makefile | 1 + drivers/base/superuser.c | 143 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 153 insertions(+) create mode 100644 drivers/base/superuser.c diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 65db84f77442..0ae31e75040d 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -357,3 +357,12 @@ config CPU_CAPACITY_FIXUP Enable to avoid that unity set cpuset of its client task to big core only. endmenu +config ASSISTED_SUPERUSER + bool "Kernel-assisted superuser" + select SECURITY_SELINUX_DEVELOP if SECURITY_SELINUX + ---help--- + This driver gives trivial root access by typing `su` in a + shell. It is a security disaster, and nobody should enable + this catastrophe of a driver. + + Say N here unless you have a vendetta against kittens. diff --git a/drivers/base/Makefile b/drivers/base/Makefile index e32a52490051..465e8de0d7da 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -30,3 +30,4 @@ obj-y += test/ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +obj-$(CONFIG_ASSISTED_SUPERUSER) += superuser.o diff --git a/drivers/base/superuser.c b/drivers/base/superuser.c new file mode 100644 index 000000000000..0e70b55c0c7e --- /dev/null +++ b/drivers/base/superuser.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2018 Jason A. Donenfeld . All Rights Reserved. + */ + +/* Hello. If this is enabled in your kernel for some reason, whoever is + * distributing your kernel to you is a complete moron, and you shouldn't + * use their kernel anymore. But it's not my fault! People: don't enable + * this driver! (Note that the existence of this file does not imply the + * driver is actually in use. Look in your .config to see whether this is + * enabled.) -Jason + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include + +static bool is_su(const char __user *filename) +{ + static const char su_path[] = "/system/bin/su"; + char ufn[sizeof(su_path)]; + + return likely(!copy_from_user(ufn, filename, sizeof(ufn))) && + unlikely(!memcmp(ufn, su_path, sizeof(ufn))); +} + +static void __user *userspace_stack_buffer(const void *d, size_t len) +{ + /* To avoid having to mmap a page in userspace, just write below the stack pointer. */ + char __user *p = (void __user *)current_user_stack_pointer() - len; + + return copy_to_user(p, d, len) ? NULL : p; +} + +static char __user *sh_user_path(void) +{ + static const char sh_path[] = "/system/bin/sh"; + + return userspace_stack_buffer(sh_path, sizeof(sh_path)); +} + +static long(*old_newfstatat)(int dfd, const char __user *filename, + struct stat *statbuf, int flag); +static long new_newfstatat(int dfd, const char __user *filename, + struct stat __user *statbuf, int flag) +{ + if (!is_su(filename)) + return old_newfstatat(dfd, filename, statbuf, flag); + return old_newfstatat(dfd, sh_user_path(), statbuf, flag); +} + +static long(*old_faccessat)(int dfd, const char __user *filename, int mode); +static long new_faccessat(int dfd, const char __user *filename, int mode) +{ + if (!is_su(filename)) + return old_faccessat(dfd, filename, mode); + return old_faccessat(dfd, sh_user_path(), mode); +} + +extern int selinux_enforcing; +static long (*old_execve)(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp); +static long new_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp) +{ + static const char now_root[] = "You are now root.\n"; + struct cred *cred; + + if (!is_su(filename)) + return old_execve(filename, argv, envp); + + if (!old_execve(filename, argv, envp)) + return 0; + + /* It might be enough to just change the security ctx of the + * current task, but that requires slightly more thought than + * just axing the whole thing here. + */ + selinux_enforcing = 0; + + /* Rather than the usual commit_creds(prepare_kernel_cred(NULL)) idiom, + * we manually zero out the fields in our existing one, so that we + * don't have to futz with the task's key ring for disk access. + */ + cred = (struct cred *)__task_cred(current); + memset(&cred->uid, 0, sizeof(cred->uid)); + memset(&cred->gid, 0, sizeof(cred->gid)); + memset(&cred->suid, 0, sizeof(cred->suid)); + memset(&cred->euid, 0, sizeof(cred->euid)); + memset(&cred->egid, 0, sizeof(cred->egid)); + memset(&cred->fsuid, 0, sizeof(cred->fsuid)); + memset(&cred->fsgid, 0, sizeof(cred->fsgid)); + memset(&cred->cap_inheritable, 0xff, sizeof(cred->cap_inheritable)); + memset(&cred->cap_permitted, 0xff, sizeof(cred->cap_permitted)); + memset(&cred->cap_effective, 0xff, sizeof(cred->cap_effective)); + memset(&cred->cap_bset, 0xff, sizeof(cred->cap_bset)); + memset(&cred->cap_ambient, 0xff, sizeof(cred->cap_ambient)); + + sys_write(2, userspace_stack_buffer(now_root, sizeof(now_root)), + sizeof(now_root) - 1); + return old_execve(sh_user_path(), argv, envp); +} + +extern const unsigned long sys_call_table[]; +static void read_syscall(void **ptr, unsigned int syscall) +{ + *ptr = READ_ONCE(*((void **)sys_call_table + syscall)); +} +static void replace_syscall(unsigned int syscall, void *ptr) +{ + WRITE_ONCE(*((void **)sys_call_table + syscall), ptr); +} +#define read_and_replace_syscall(name) do { \ + read_syscall((void **)&old_ ## name, __NR_ ## name); \ + replace_syscall(__NR_ ## name, &new_ ## name); \ +} while (0) + +static int superuser_init(void) +{ + pr_err("WARNING WARNING WARNING WARNING WARNING\n"); + pr_err("This kernel has kernel-assisted superuser and contains a\n"); + pr_err("trivial way to get root. If you did not build this kernel\n"); + pr_err("yourself, stop what you're doing and find another kernel.\n"); + pr_err("This one is not safe to use.\n"); + pr_err("WARNING WARNING WARNING WARNING WARNING\n"); + + read_and_replace_syscall(newfstatat); + read_and_replace_syscall(faccessat); + read_and_replace_syscall(execve); + + return 0; +} + +module_init(superuser_init); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Kernel-assisted superuser for Android"); +MODULE_AUTHOR("Jason A. Donenfeld "); From 5b031e4a32b4eb4b301cb82f4b2fc5083eac3995 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 21 Jan 2020 21:16:55 +0300 Subject: [PATCH 233/439] drivers/base/Kconfig: CONFIG_ASSISTED_SUPERUSER default n Signed-off-by: Denis Efremov --- drivers/base/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 0ae31e75040d..117492e7265d 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -360,6 +360,7 @@ endmenu config ASSISTED_SUPERUSER bool "Kernel-assisted superuser" select SECURITY_SELINUX_DEVELOP if SECURITY_SELINUX + default n ---help--- This driver gives trivial root access by typing `su` in a shell. It is a security disaster, and nobody should enable From 7371a5b82ab8c15b464a1829f5ac41f309f24bca Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 00:24:31 +0300 Subject: [PATCH 234/439] initramfs: allow CONFIG_INITRAMFS_FORCE Signed-off-by: Denis Efremov --- usr/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/usr/Kconfig b/usr/Kconfig index 43658b8a975e..24f6b30cc206 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -24,7 +24,8 @@ config INITRAMFS_SOURCE config INITRAMFS_FORCE bool "Ignore the initramfs passed by the bootloader" - depends on CMDLINE_EXTEND || CMDLINE_FORCE + depends on BLK_DEV_INITRD + default n help This option causes the kernel to ignore the initramfs image (or initrd image) passed to it by the bootloader. This is From d74ba164e5bf71c59808fd357d09f89644006b93 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 00:39:13 +0300 Subject: [PATCH 235/439] usr: add CONFIG_INITRAMFS_SKIP Signed-off-by: Denis Efremov --- init/initramfs.c | 4 ++++ usr/Kconfig | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/init/initramfs.c b/init/initramfs.c index 5ea7f1b5ec44..2c5be3446fed 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -608,6 +608,7 @@ static void __init clean_rootfs(void) } #endif +#ifdef CONFIG_INITRAMFS_SKIP static int __initdata do_skip_initramfs; static int __init skip_initramfs_param(char *str) @@ -618,16 +619,19 @@ static int __init skip_initramfs_param(char *str) return 1; } __setup("skip_initramfs", skip_initramfs_param); +#endif static int __init populate_rootfs(void) { char *err; +#ifdef CONFIG_INITRAMFS_SKIP if (do_skip_initramfs) { if (initrd_start) free_initrd(); return default_rootfs(); } +#endif /* Load the built in initramfs */ err = unpack_to_rootfs(__initramfs_start, __initramfs_size); diff --git a/usr/Kconfig b/usr/Kconfig index 24f6b30cc206..b3ed941a9f64 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -22,6 +22,14 @@ config INITRAMFS_SOURCE If you are not sure, leave it blank. +config INITRAMFS_SKIP + bool "Boot without initramfs if skip_initramfs in cmdline" + depends on BLK_DEV_INITRD + default y + help + Adds handling of "skip_initramfs" cmdline. Allows bootloader to + force booting from root partition. + config INITRAMFS_FORCE bool "Ignore the initramfs passed by the bootloader" depends on BLK_DEV_INITRD From 2851eae75104f505b41243ff46211db3448aa324 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 09:46:06 +0300 Subject: [PATCH 236/439] initramfs: integrate magiskinit64 Signed-off-by: Denis Efremov --- usr/magisk/.gitignore | 3 +++ usr/magisk/backup_magisk | 3 +++ usr/magisk/initramfs_list | 7 +++++ usr/magisk/update_magisk.sh | 54 +++++++++++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+) create mode 100644 usr/magisk/.gitignore create mode 100644 usr/magisk/backup_magisk create mode 100644 usr/magisk/initramfs_list create mode 100755 usr/magisk/update_magisk.sh diff --git a/usr/magisk/.gitignore b/usr/magisk/.gitignore new file mode 100644 index 000000000000..6efeeffea17a --- /dev/null +++ b/usr/magisk/.gitignore @@ -0,0 +1,3 @@ +magiskinit +magiskinit64 +magisk_version diff --git a/usr/magisk/backup_magisk b/usr/magisk/backup_magisk new file mode 100644 index 000000000000..8070a531554f --- /dev/null +++ b/usr/magisk/backup_magisk @@ -0,0 +1,3 @@ +KEEPVERITY=true +KEEPFORCEENCRYPT=true +RECOVERYMODE=false diff --git a/usr/magisk/initramfs_list b/usr/magisk/initramfs_list new file mode 100644 index 000000000000..6fc1b73bfd2d --- /dev/null +++ b/usr/magisk/initramfs_list @@ -0,0 +1,7 @@ +dir /.backup 0705 0 0 +file /init usr/magisk/magiskinit 0755 0 0 +file /.backup/.magisk usr/magisk/backup_magisk 0705 0 0 +dir /overlay.d 0750 0 0 +dir /overlay.d/sbin 0750 0 0 +file /overlay.d/sbin/magisk32.xz usr/magisk/magisk32.xz 0644 0 0 +file /overlay.d/sbin/magisk64.xz usr/magisk/magisk64.xz 0644 0 0 diff --git a/usr/magisk/update_magisk.sh b/usr/magisk/update_magisk.sh new file mode 100755 index 000000000000..7bf57a36b086 --- /dev/null +++ b/usr/magisk/update_magisk.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +ver="$(cat "$DIR/magisk_version" 2>/dev/null || echo -n 'none')" + +if [ "x$1" = "xcanary" ] +then + nver="canary" + magisk_link="https://github.com/topjohnwu/magisk-files/raw/${nver}/app-debug.apk" +elif [ "x$1" = "xalpha" ] +then + nver="alpha" + magisk_link="https://github.com/vvb2060/magisk_files/raw/${nver}/app-release.apk" +else + if [ "x$1" = "x" ]; then + nver="$(curl -s https://github.com/topjohnwu/Magisk/releases | grep -m 1 -Poe 'Magisk v[\d\.]+' | cut -d ' ' -f 2)" + else + nver="$1" + fi + magisk_link="https://github.com/topjohnwu/Magisk/releases/download/${nver}/Magisk-${nver}.apk" +fi + +if [ \( -n "$nver" \) -a \( "$nver" != "$ver" \) -o ! \( -f "$DIR/magiskinit" \) -o \( "$nver" = "canary" \) -o \( "$nver" = "alpha" \) ] +then + echo "Updating Magisk from $ver to $nver" + curl -s --output "$DIR/magisk.zip" -L "$magisk_link" + if fgrep 'Not Found' "$DIR/magisk.zip"; then + curl -s --output "$DIR/magisk.zip" -L "${magisk_link%.apk}.zip" + fi + if unzip -o "$DIR/magisk.zip" arm/magiskinit64 -d "$DIR"; then + mv -f "$DIR/arm/magiskinit64" "$DIR/magiskinit" + : > "$DIR/magisk32.xz" + : > "$DIR/magisk64.xz" + elif unzip -o "$DIR/magisk.zip" lib/armeabi-v7a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/armeabi-v7a/libmagisk64.so -d "$DIR"; then + mv -f "$DIR/lib/armeabi-v7a/libmagiskinit.so" "$DIR/magiskinit" + mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" + mv -f "$DIR/lib/armeabi-v7a/libmagisk64.so" "$DIR/magisk64" + xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" + else + unzip -o "$DIR/magisk.zip" lib/arm64-v8a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/arm64-v8a/libmagisk64.so -d "$DIR" + mv -f "$DIR/lib/arm64-v8a/libmagiskinit.so" "$DIR/magiskinit" + mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" + mv -f "$DIR/lib/arm64-v8a/libmagisk64.so" "$DIR/magisk64" + xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" + fi + echo -n "$nver" > "$DIR/magisk_version" + rm "$DIR/magisk.zip" + touch "$DIR/initramfs_list" +else + echo "Nothing to be done: Magisk version $nver" +fi From 47a0a2f4461bf84c75d94956d9baa2bb8b02b7ed Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 30 Sep 2020 13:32:47 +0300 Subject: [PATCH 237/439] usr/Makefile: support relative paths for objtree Signed-off-by: Denis Efremov --- scripts/gen_initramfs_list.sh | 12 +++++++++++- usr/Makefile | 4 +++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index 86a3c0e5cfbc..a9c0936f752c 100755 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh @@ -203,11 +203,21 @@ input_file() { fi if [ -z ${dep_list} ]; then print_mtime "$1" >> ${output} - cat "$1" >> ${output} + cat "$1" | while read type dir file perm ; do + if [ "$type" = "file" ]; then + if [ "$1" != "${1#/}" ]; then + file="$(readlink -f "${srctree}/${file}")" + fi + fi + echo $type "${dir}" "${file}" $perm >> ${output} + done else echo "$1 \\" cat "$1" | while read type dir file perm ; do if [ "$type" = "file" ]; then + if [ "$1" != "${1#/}" ]; then + file="$(readlink -f "${srctree}/${file}")" + fi echo "$file \\"; fi done diff --git a/usr/Makefile b/usr/Makefile index 237a028693ce..024ca58c0e39 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -26,7 +26,9 @@ $(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE hostprogs-y := gen_init_cpio initramfs := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \ - $(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d) + $(if $(patsubst /%,,$(CONFIG_INITRAMFS_SOURCE)), \ + $(abspath $(srctree)/$(CONFIG_INITRAMFS_SOURCE)), \ + $(CONFIG_INITRAMFS_SOURCE)),-d) ramfs-args := \ $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \ $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) From 9e84ba6ee2631053d14b21718622f8f97984f009 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 18:36:48 +0300 Subject: [PATCH 238/439] drivers: cpufreq: replace performance with schedutil as fallback sched Default fallback scheduler for ondemand and conservative schedulers is performance one. This commit makes schedutil the default fallback scheduler. Signed-off-by: Denis Efremov --- drivers/cpufreq/Kconfig | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2057f36da919..bf084b3a6715 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -80,7 +80,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" select CPU_FREQ_GOV_ONDEMAND - select CPU_FREQ_GOV_PERFORMANCE + select CPU_FREQ_GOV_SCHEDUTIL help Use the CPUFreq governor 'ondemand' as default. This allows you to get a full dynamic frequency capable system by simply @@ -92,7 +92,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" select CPU_FREQ_GOV_CONSERVATIVE - select CPU_FREQ_GOV_PERFORMANCE + select CPU_FREQ_GOV_SCHEDUTIL help Use the CPUFreq governor 'conservative' as default. This allows you to get a full dynamic frequency capable system by simply @@ -105,7 +105,6 @@ config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL bool "schedutil" depends on SMP select CPU_FREQ_GOV_SCHEDUTIL - select CPU_FREQ_GOV_PERFORMANCE help Use the 'schedutil' CPUFreq governor by default. If unsure, have a look at the help section of that governor. The fallback From 79e44f77abc8af79d4f4c4d053bfb706f10270af Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 18 Mar 2020 09:49:27 +0300 Subject: [PATCH 239/439] Makefile: add KCONFIG_BUILTINCONFIG Signed-off-by: Denis Efremov --- Makefile | 2 ++ kernel/Makefile | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cce28dd381d6..9190284c413b 100644 --- a/Makefile +++ b/Makefile @@ -350,7 +350,9 @@ endif hdr-arch := $(SRCARCH) KCONFIG_CONFIG ?= .config +KCONFIG_BUILTINCONFIG ?= $(KCONFIG_CONFIG) export KCONFIG_CONFIG +export KCONFIG_BUILTINCONFIG # SHELL used by kbuild CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ diff --git a/kernel/Makefile b/kernel/Makefile index 1d5db8de62a8..919682930875 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -122,7 +122,7 @@ obj-$(CONFIG_INTELLIGENCE) += intelligence.o $(obj)/configs.o: $(obj)/config_data.h targets += config_data.gz -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE +$(obj)/config_data.gz: $(KCONFIG_BUILTINCONFIG) FORCE $(call if_changed,gzip) filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/basic/bin2c; echo "MAGIC_END;") From fa075cf6dba4fd332d45243bc4fcce86091d1ee0 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 30 Nov 2020 21:10:56 +0300 Subject: [PATCH 240/439] HZ: add 50hz config Signed-off-by: Denis Efremov --- kernel/Kconfig.hz | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 2a202a846757..2305717d4057 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -15,6 +15,8 @@ choice environment leading to NR_CPUS * HZ number of timer interrupts per second. + config HZ_50 + bool "50 HZ" config HZ_100 bool "100 HZ" @@ -49,6 +51,7 @@ endchoice config HZ int + default 50 if HZ_50 default 100 if HZ_100 default 250 if HZ_250 default 300 if HZ_300 From 758e0da8f440d9e6128d46aa985800853c8cd339 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 3 Jan 2021 16:50:35 +0300 Subject: [PATCH 241/439] HZ: add 25hz config Signed-off-by: Denis Efremov --- kernel/Kconfig.hz | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 2305717d4057..13424b6381ea 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -15,6 +15,9 @@ choice environment leading to NR_CPUS * HZ number of timer interrupts per second. + config HZ_25 + bool "25 HZ" + config HZ_50 bool "50 HZ" @@ -51,6 +54,7 @@ endchoice config HZ int + default 25 if HZ_25 default 50 if HZ_50 default 100 if HZ_100 default 250 if HZ_250 From e8933c1d0639b6895937c7c9c362437bfb9b0ffe Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Sat, 21 Apr 2018 00:08:51 -0700 Subject: [PATCH 242/439] battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs --- drivers/battery_v2/sec_battery.c | 36 +++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/battery_v2/sec_battery.c b/drivers/battery_v2/sec_battery.c index 5eb41950e4c8..8a69238af42e 100644 --- a/drivers/battery_v2/sec_battery.c +++ b/drivers/battery_v2/sec_battery.c @@ -55,16 +55,22 @@ static enum power_supply_property sec_battery_props[] = { static enum power_supply_property sec_power_props[] = { POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static enum power_supply_property sec_wireless_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static enum power_supply_property sec_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static enum power_supply_property sec_ps_props[] = { @@ -6675,8 +6681,20 @@ static int sec_usb_get_property(struct power_supply *psy, { struct sec_battery_info *battery = power_supply_get_drvdata(psy); - if (psp != POWER_SUPPLY_PROP_ONLINE) + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + /* V -> uV */ + val->intval = battery->input_voltage * 1000000; + return 0; + case POWER_SUPPLY_PROP_CURRENT_MAX: + /* mA -> uA */ + val->intval = battery->pdata->charging_current[battery->cable_type].input_current_limit * 1000; + return 0; + default: return -EINVAL; + } if ((battery->health == POWER_SUPPLY_HEALTH_OVERVOLTAGE) || (battery->health == POWER_SUPPLY_HEALTH_UNDERVOLTAGE)) { @@ -6753,6 +6771,14 @@ static int sec_ac_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TEMP: val->intval = battery->chg_temp; break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + /* V -> uV */ + val->intval = battery->input_voltage * 1000000; + return 0; + case POWER_SUPPLY_PROP_CURRENT_MAX: + /* mA -> uA */ + val->intval = battery->pdata->charging_current[battery->cable_type].input_current_limit * 1000; + return 0; case POWER_SUPPLY_PROP_MAX ... POWER_SUPPLY_EXT_PROP_MAX: switch (ext_psp) { case POWER_SUPPLY_EXT_PROP_WATER_DETECT: @@ -6801,6 +6827,14 @@ static int sec_wireless_get_property(struct power_supply *psy, else val->intval = 0; break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + /* V -> uV */ + val->intval = battery->input_voltage * 1000000; + return 0; + case POWER_SUPPLY_PROP_CURRENT_MAX: + /* mA -> uA */ + val->intval = battery->pdata->charging_current[battery->cable_type].input_current_limit * 1000; + return 0; default: return -EINVAL; } From a4aa53940e8e6d07eecbd7923579f13069b8fe7e Mon Sep 17 00:00:00 2001 From: Paul Keith Date: Fri, 2 Mar 2018 04:51:53 +0100 Subject: [PATCH 243/439] fs: sdfat: Add config option to register sdFAT for exFAT Change-Id: Id57abf0a4bd0b433fecc622eecb383cd4ea29d17 Signed-off-by: Paul Keith --- fs/sdfat/Kconfig | 7 +++++++ fs/sdfat/sdfat.c | 26 +++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index e849b25af347..62eb87bab6f6 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -15,6 +15,13 @@ config SDFAT_FS To compile this as a module, choose M here: the module will be called sdfat_core and sdfat_fs. +config SDFAT_USE_FOR_EXFAT + bool "Register sdFAT as exFAT" + default y + depends on SDFAT_FS && !EXFAT_FS + help + If you want to register sdFAT as available for exFAT, say Y. + config SDFAT_DELAYED_META_DIRTY bool "Enable delayed metadata dirty" default y diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 516e15129cf5..464402aab7b5 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -5177,6 +5177,20 @@ static struct file_system_type sdfat_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; +#ifdef CONFIG_SDFAT_USE_FOR_EXFAT +static struct file_system_type exfat_fs_type = { + .owner = THIS_MODULE, + .name = "exfat", + .mount = sdfat_fs_mount, +#ifdef CONFIG_SDFAT_DBG_IOCTL + .kill_sb = sdfat_debug_kill_sb, +#else + .kill_sb = kill_block_super, +#endif /* CONFIG_SDFAT_DBG_IOCTL */ + .fs_flags = FS_REQUIRES_DEV, +}; +#endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ + static int __init init_sdfat_fs(void) { int err; @@ -5219,6 +5233,14 @@ static int __init init_sdfat_fs(void) goto error; } +#ifdef CONFIG_SDFAT_USE_FOR_EXFAT + err = register_filesystem(&exfat_fs_type); + if (err) { + pr_err("[SDFAT] failed to register for exfat filesystem\n"); + goto error; + } +#endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ + return 0; error: sdfat_uevent_uninit(); @@ -5257,7 +5279,9 @@ static void __exit exit_sdfat_fs(void) sdfat_destroy_inodecache(); unregister_filesystem(&sdfat_fs_type); - +#ifdef CONFIG_SDFAT_USE_FOR_EXFAT + unregister_filesystem(&exfat_fs_type); +#endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ fsapi_shutdown(); } From 46145dce4b5e27f6ad824e109e079e36371c9b07 Mon Sep 17 00:00:00 2001 From: Paul Keith Date: Fri, 2 Mar 2018 05:10:27 +0100 Subject: [PATCH 244/439] fs: sdfat: Add config option to register sdFAT for VFAT --- fs/sdfat/Kconfig | 7 +++++++ fs/sdfat/sdfat.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index 62eb87bab6f6..bcad29f51b3e 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -22,6 +22,13 @@ config SDFAT_USE_FOR_EXFAT help If you want to register sdFAT as available for exFAT, say Y. +config SDFAT_USE_FOR_VFAT + bool "Register sdFAT as VFAT" + default y + depends on SDFAT_FS && !VFAT_FS + help + If you want to register sdFAT as available for VFAT, say Y. + config SDFAT_DELAYED_META_DIRTY bool "Enable delayed metadata dirty" default y diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 464402aab7b5..041087b36c3f 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -4644,6 +4644,12 @@ enum { Opt_discard, Opt_fs, Opt_adj_req, +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + Opt_shortname_lower, + Opt_shortname_win95, + Opt_shortname_winnt, + Opt_shortname_mixed, +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ }; static const match_table_t sdfat_tokens = { @@ -4672,6 +4678,12 @@ static const match_table_t sdfat_tokens = { {Opt_discard, "discard"}, {Opt_fs, "fs=%s"}, {Opt_adj_req, "adj_req"}, +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + {Opt_shortname_lower, "shortname=lower"}, + {Opt_shortname_win95, "shortname=win95"}, + {Opt_shortname_winnt, "shortname=winnt"}, + {Opt_shortname_mixed, "shortname=mixed"}, +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ {Opt_err, NULL} }; @@ -4838,6 +4850,14 @@ static int parse_options(struct super_block *sb, char *options, int silent, IMSG("adjust request config is not enabled. ignore\n"); #endif break; +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + case Opt_shortname_lower: + case Opt_shortname_win95: + case Opt_shortname_mixed: + pr_warn("[SDFAT] DRAGONS AHEAD! sdFAT only understands \"shortname=winnt\"!\n"); + case Opt_shortname_winnt: + break; +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ default: if (!silent) { sdfat_msg(sb, KERN_ERR, @@ -5191,6 +5211,20 @@ static struct file_system_type exfat_fs_type = { }; #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ +#ifdef CONFIG_SDFAT_USE_FOR_VFAT +static struct file_system_type vfat_fs_type = { + .owner = THIS_MODULE, + .name = "vfat", + .mount = sdfat_fs_mount, +#ifdef CONFIG_SDFAT_DBG_IOCTL + .kill_sb = sdfat_debug_kill_sb, +#else + .kill_sb = kill_block_super, +#endif /* CONFIG_SDFAT_DBG_IOCTL */ + .fs_flags = FS_REQUIRES_DEV, +}; +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ + static int __init init_sdfat_fs(void) { int err; @@ -5241,6 +5275,14 @@ static int __init init_sdfat_fs(void) } #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + err = register_filesystem(&vfat_fs_type); + if (err) { + pr_err("[SDFAT] failed to register for vfat filesystem\n"); + goto error; + } +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ + return 0; error: sdfat_uevent_uninit(); @@ -5282,6 +5324,9 @@ static void __exit exit_sdfat_fs(void) #ifdef CONFIG_SDFAT_USE_FOR_EXFAT unregister_filesystem(&exfat_fs_type); #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + unregister_filesystem(&vfat_fs_type); +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ fsapi_shutdown(); } From 2af94dd8ee315ccc7b18fa5f260c476a11b2b6d3 Mon Sep 17 00:00:00 2001 From: Paul Keith Date: Wed, 28 Mar 2018 19:52:29 +0200 Subject: [PATCH 245/439] fs: sdfat: Add MODULE_ALIAS_FS for supported filesystems --- fs/sdfat/sdfat.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 041087b36c3f..9b7a63581160 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -5196,6 +5196,7 @@ static struct file_system_type sdfat_fs_type = { #endif /* CONFIG_SDFAT_DBG_IOCTL */ .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("sdfat"); #ifdef CONFIG_SDFAT_USE_FOR_EXFAT static struct file_system_type exfat_fs_type = { @@ -5209,6 +5210,7 @@ static struct file_system_type exfat_fs_type = { #endif /* CONFIG_SDFAT_DBG_IOCTL */ .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("exfat"); #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ #ifdef CONFIG_SDFAT_USE_FOR_VFAT @@ -5223,6 +5225,7 @@ static struct file_system_type vfat_fs_type = { #endif /* CONFIG_SDFAT_DBG_IOCTL */ .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("vfat"); #endif /* CONFIG_SDFAT_USE_FOR_VFAT */ static int __init init_sdfat_fs(void) From 22628fe884f7bfbcd6cc6d176ea1396f983cf794 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 00:28:34 +0300 Subject: [PATCH 246/439] fs: sdfat: don't use sdfat for exfat/vfat by default Signed-off-by: Denis Efremov --- fs/sdfat/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index bcad29f51b3e..7b0e7777ea5c 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -17,14 +17,14 @@ config SDFAT_FS config SDFAT_USE_FOR_EXFAT bool "Register sdFAT as exFAT" - default y + default n depends on SDFAT_FS && !EXFAT_FS help If you want to register sdFAT as available for exFAT, say Y. config SDFAT_USE_FOR_VFAT bool "Register sdFAT as VFAT" - default y + default n depends on SDFAT_FS && !VFAT_FS help If you want to register sdFAT as available for VFAT, say Y. From 26d847fe9520fe0f7ab6ff9f89f183be6d6bd8d9 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 00:39:44 +0300 Subject: [PATCH 247/439] net: wireguard: add wireguard module Signed-off-by: Denis Efremov --- net/Kconfig | 1 + net/Makefile | 1 + net/wireguard/Kconfig | 33 + net/wireguard/Makefile | 15 + net/wireguard/allowedips.c | 386 + net/wireguard/allowedips.h | 59 + net/wireguard/compat/Makefile.include | 111 + .../compat/checksum/checksum_partial_compat.h | 208 + net/wireguard/compat/compat-asm.h | 86 + net/wireguard/compat/compat.h | 1199 +++ net/wireguard/compat/dst_cache/dst_cache.c | 177 + .../compat/dst_cache/include/net/dst_cache.h | 97 + .../dstmetadata/include/net/dst_metadata.h | 3 + .../compat/fpu-x86/include/asm/fpu/api.h | 1 + .../include/asm/intel-family.h | 73 + net/wireguard/compat/memneq/include.h | 5 + net/wireguard/compat/memneq/memneq.c | 170 + .../compat/neon-arm/include/asm/neon.h | 7 + .../compat/ptr_ring/include/linux/ptr_ring.h | 674 ++ .../compat/simd-asm/include/asm/simd.h | 21 + .../compat/simd/include/linux/simd.h | 69 + .../compat/siphash/include/linux/siphash.h | 134 + net/wireguard/compat/siphash/siphash.c | 539 + .../skb_array/include/linux/skb_array.h | 11 + .../udp_tunnel/include/net/udp_tunnel.h | 94 + net/wireguard/compat/udp_tunnel/udp_tunnel.c | 396 + .../udp_tunnel/udp_tunnel_partial_compat.h | 226 + net/wireguard/compat/version/linux/version.h | 10 + net/wireguard/cookie.c | 236 + net/wireguard/cookie.h | 59 + net/wireguard/crypto/Makefile.include | 57 + net/wireguard/crypto/include/zinc/blake2s.h | 56 + net/wireguard/crypto/include/zinc/chacha20.h | 70 + .../crypto/include/zinc/chacha20poly1305.h | 50 + .../crypto/include/zinc/curve25519.h | 28 + net/wireguard/crypto/include/zinc/poly1305.h | 31 + net/wireguard/crypto/zinc.h | 15 + .../crypto/zinc/blake2s/blake2s-x86_64-glue.c | 72 + .../crypto/zinc/blake2s/blake2s-x86_64.S | 258 + net/wireguard/crypto/zinc/blake2s/blake2s.c | 271 + .../crypto/zinc/chacha20/chacha20-arm-glue.c | 98 + .../crypto/zinc/chacha20/chacha20-arm.pl | 1227 +++ .../crypto/zinc/chacha20/chacha20-arm64.pl | 1163 +++ .../crypto/zinc/chacha20/chacha20-mips-glue.c | 27 + .../crypto/zinc/chacha20/chacha20-mips.S | 424 + .../zinc/chacha20/chacha20-unrolled-arm.S | 461 + .../zinc/chacha20/chacha20-x86_64-glue.c | 105 + .../crypto/zinc/chacha20/chacha20-x86_64.pl | 4106 ++++++++ net/wireguard/crypto/zinc/chacha20/chacha20.c | 191 + net/wireguard/crypto/zinc/chacha20poly1305.c | 398 + .../zinc/curve25519/curve25519-arm-glue.c | 43 + .../crypto/zinc/curve25519/curve25519-arm.S | 2064 ++++ .../zinc/curve25519/curve25519-fiat32.c | 860 ++ .../zinc/curve25519/curve25519-hacl64.c | 779 ++ .../zinc/curve25519/curve25519-x86_64-glue.c | 44 + .../zinc/curve25519/curve25519-x86_64.c | 1580 +++ .../crypto/zinc/curve25519/curve25519.c | 109 + .../crypto/zinc/poly1305/poly1305-arm-glue.c | 140 + .../crypto/zinc/poly1305/poly1305-arm.pl | 1276 +++ .../crypto/zinc/poly1305/poly1305-arm64.pl | 974 ++ .../crypto/zinc/poly1305/poly1305-donna32.c | 205 + .../crypto/zinc/poly1305/poly1305-donna64.c | 182 + .../crypto/zinc/poly1305/poly1305-mips-glue.c | 37 + .../crypto/zinc/poly1305/poly1305-mips.S | 407 + .../crypto/zinc/poly1305/poly1305-mips64.pl | 467 + .../zinc/poly1305/poly1305-x86_64-glue.c | 156 + .../crypto/zinc/poly1305/poly1305-x86_64.pl | 4266 ++++++++ net/wireguard/crypto/zinc/poly1305/poly1305.c | 165 + net/wireguard/crypto/zinc/selftest/blake2s.c | 2090 ++++ net/wireguard/crypto/zinc/selftest/chacha20.c | 2698 +++++ .../crypto/zinc/selftest/chacha20poly1305.c | 9076 +++++++++++++++++ .../crypto/zinc/selftest/curve25519.c | 1315 +++ net/wireguard/crypto/zinc/selftest/poly1305.c | 1107 ++ net/wireguard/crypto/zinc/selftest/run.h | 48 + net/wireguard/device.c | 475 + net/wireguard/device.h | 62 + net/wireguard/main.c | 84 + net/wireguard/messages.h | 128 + net/wireguard/netlink.c | 658 ++ net/wireguard/netlink.h | 12 + net/wireguard/noise.c | 830 ++ net/wireguard/noise.h | 135 + net/wireguard/peer.c | 240 + net/wireguard/peer.h | 86 + net/wireguard/peerlookup.c | 226 + net/wireguard/peerlookup.h | 64 + net/wireguard/queueing.c | 108 + net/wireguard/queueing.h | 217 + net/wireguard/ratelimiter.c | 235 + net/wireguard/ratelimiter.h | 19 + net/wireguard/receive.c | 602 ++ net/wireguard/selftest/allowedips.c | 676 ++ net/wireguard/selftest/counter.c | 111 + net/wireguard/selftest/ratelimiter.c | 226 + net/wireguard/send.c | 420 + net/wireguard/socket.c | 437 + net/wireguard/socket.h | 44 + net/wireguard/timers.c | 243 + net/wireguard/timers.h | 31 + net/wireguard/uapi/wireguard.h | 196 + net/wireguard/version.h | 3 + 101 files changed, 50865 insertions(+) create mode 100644 net/wireguard/Kconfig create mode 100644 net/wireguard/Makefile create mode 100644 net/wireguard/allowedips.c create mode 100644 net/wireguard/allowedips.h create mode 100644 net/wireguard/compat/Makefile.include create mode 100644 net/wireguard/compat/checksum/checksum_partial_compat.h create mode 100644 net/wireguard/compat/compat-asm.h create mode 100644 net/wireguard/compat/compat.h create mode 100644 net/wireguard/compat/dst_cache/dst_cache.c create mode 100644 net/wireguard/compat/dst_cache/include/net/dst_cache.h create mode 100644 net/wireguard/compat/dstmetadata/include/net/dst_metadata.h create mode 100644 net/wireguard/compat/fpu-x86/include/asm/fpu/api.h create mode 100644 net/wireguard/compat/intel-family-x86/include/asm/intel-family.h create mode 100644 net/wireguard/compat/memneq/include.h create mode 100644 net/wireguard/compat/memneq/memneq.c create mode 100644 net/wireguard/compat/neon-arm/include/asm/neon.h create mode 100644 net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h create mode 100644 net/wireguard/compat/simd-asm/include/asm/simd.h create mode 100644 net/wireguard/compat/simd/include/linux/simd.h create mode 100644 net/wireguard/compat/siphash/include/linux/siphash.h create mode 100644 net/wireguard/compat/siphash/siphash.c create mode 100644 net/wireguard/compat/skb_array/include/linux/skb_array.h create mode 100644 net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h create mode 100644 net/wireguard/compat/udp_tunnel/udp_tunnel.c create mode 100644 net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h create mode 100644 net/wireguard/compat/version/linux/version.h create mode 100644 net/wireguard/cookie.c create mode 100644 net/wireguard/cookie.h create mode 100644 net/wireguard/crypto/Makefile.include create mode 100644 net/wireguard/crypto/include/zinc/blake2s.h create mode 100644 net/wireguard/crypto/include/zinc/chacha20.h create mode 100644 net/wireguard/crypto/include/zinc/chacha20poly1305.h create mode 100644 net/wireguard/crypto/include/zinc/curve25519.h create mode 100644 net/wireguard/crypto/include/zinc/poly1305.h create mode 100644 net/wireguard/crypto/zinc.h create mode 100644 net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S create mode 100644 net/wireguard/crypto/zinc/blake2s/blake2s.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-mips.S create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20.c create mode 100644 net/wireguard/crypto/zinc/chacha20poly1305.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-arm.S create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-mips.S create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305.c create mode 100644 net/wireguard/crypto/zinc/selftest/blake2s.c create mode 100644 net/wireguard/crypto/zinc/selftest/chacha20.c create mode 100644 net/wireguard/crypto/zinc/selftest/chacha20poly1305.c create mode 100644 net/wireguard/crypto/zinc/selftest/curve25519.c create mode 100644 net/wireguard/crypto/zinc/selftest/poly1305.c create mode 100644 net/wireguard/crypto/zinc/selftest/run.h create mode 100644 net/wireguard/device.c create mode 100644 net/wireguard/device.h create mode 100644 net/wireguard/main.c create mode 100644 net/wireguard/messages.h create mode 100644 net/wireguard/netlink.c create mode 100644 net/wireguard/netlink.h create mode 100644 net/wireguard/noise.c create mode 100644 net/wireguard/noise.h create mode 100644 net/wireguard/peer.c create mode 100644 net/wireguard/peer.h create mode 100644 net/wireguard/peerlookup.c create mode 100644 net/wireguard/peerlookup.h create mode 100644 net/wireguard/queueing.c create mode 100644 net/wireguard/queueing.h create mode 100644 net/wireguard/ratelimiter.c create mode 100644 net/wireguard/ratelimiter.h create mode 100644 net/wireguard/receive.c create mode 100644 net/wireguard/selftest/allowedips.c create mode 100644 net/wireguard/selftest/counter.c create mode 100644 net/wireguard/selftest/ratelimiter.c create mode 100644 net/wireguard/send.c create mode 100644 net/wireguard/socket.c create mode 100644 net/wireguard/socket.h create mode 100644 net/wireguard/timers.c create mode 100644 net/wireguard/timers.h create mode 100644 net/wireguard/uapi/wireguard.h create mode 100644 net/wireguard/version.h diff --git a/net/Kconfig b/net/Kconfig index dd36e445c7a1..c22a07b94c7e 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -85,6 +85,7 @@ config INET Short answer: say Y. if INET +source "net/wireguard/Kconfig" source "net/ipv4/Kconfig" source "net/ipv6/Kconfig" source "net/netlabel/Kconfig" diff --git a/net/Makefile b/net/Makefile index 864f6593220e..26bddd8997e2 100644 --- a/net/Makefile +++ b/net/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET) += $(tmp-y) obj-$(CONFIG_LLC) += llc/ obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ bpf/ obj-$(CONFIG_NETFILTER) += netfilter/ +obj-$(CONFIG_WIREGUARD) += wireguard/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ diff --git a/net/wireguard/Kconfig b/net/wireguard/Kconfig new file mode 100644 index 000000000000..156e9dbfc051 --- /dev/null +++ b/net/wireguard/Kconfig @@ -0,0 +1,33 @@ +config WIREGUARD + tristate "IP: WireGuard secure network tunnel" + depends on NET && INET + depends on IPV6 || !IPV6 + select NET_UDP_TUNNEL + select DST_CACHE + select CRYPTO + select CRYPTO_ALGAPI + select VFP + select VFPv3 if CPU_V7 + select NEON if CPU_V7 + select KERNEL_MODE_NEON if CPU_V7 + default m + help + WireGuard is a secure, fast, and easy to use replacement for IPsec + that uses modern cryptography and clever networking tricks. It's + designed to be fairly general purpose and abstract enough to fit most + use cases, while at the same time remaining extremely simple to + configure. See www.wireguard.com for more info. + + It's safe to say Y or M here, as the driver is very lightweight and + is only in use when an administrator chooses to add an interface. + +config WIREGUARD_DEBUG + bool "Debugging checks and verbose messages" + depends on WIREGUARD + help + This will write log messages for handshake and other events + that occur for a WireGuard interface. It will also perform some + extra validation checks and unit tests at various points. This is + only useful for debugging. + + Say N here unless you know what you're doing. diff --git a/net/wireguard/Makefile b/net/wireguard/Makefile new file mode 100644 index 000000000000..c17546eaeedc --- /dev/null +++ b/net/wireguard/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + +ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' +ccflags-y += -Wframe-larger-than=2048 +ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG -g +ccflags-$(if $(WIREGUARD_VERSION),y,) += -D'WIREGUARD_VERSION="$(WIREGUARD_VERSION)"' + +wireguard-y := main.o noise.o device.o peer.o timers.o queueing.o send.o receive.o socket.o peerlookup.o allowedips.o ratelimiter.o cookie.o netlink.o + +include $(src)/crypto/Makefile.include +include $(src)/compat/Makefile.include + +obj-$(if $(KBUILD_EXTMOD),m,$(CONFIG_WIREGUARD)) := wireguard.o diff --git a/net/wireguard/allowedips.c b/net/wireguard/allowedips.c new file mode 100644 index 000000000000..9a4c8ff32d9d --- /dev/null +++ b/net/wireguard/allowedips.c @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "allowedips.h" +#include "peer.h" + +static struct kmem_cache *node_cache; + +static void swap_endian(u8 *dst, const u8 *src, u8 bits) +{ + if (bits == 32) { + *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); + } else if (bits == 128) { + ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); + ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); + } +} + +static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, + u8 cidr, u8 bits) +{ + node->cidr = cidr; + node->bit_at_a = cidr / 8U; +#ifdef __LITTLE_ENDIAN + node->bit_at_a ^= (bits / 8U - 1U) % 8U; +#endif + node->bit_at_b = 7U - (cidr % 8U); + node->bitlen = bits; + memcpy(node->bits, src, bits / 8U); +} + +static inline u8 choose(struct allowedips_node *node, const u8 *key) +{ + return (key[node->bit_at_a] >> node->bit_at_b) & 1; +} + +static void push_rcu(struct allowedips_node **stack, + struct allowedips_node __rcu *p, unsigned int *len) +{ + if (rcu_access_pointer(p)) { + WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); + stack[(*len)++] = rcu_dereference_raw(p); + } +} + +static void node_free_rcu(struct rcu_head *rcu) +{ + kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); +} + +static void root_free_rcu(struct rcu_head *rcu) +{ + struct allowedips_node *node, *stack[128] = { + container_of(rcu, struct allowedips_node, rcu) }; + unsigned int len = 1; + + while (len > 0 && (node = stack[--len])) { + push_rcu(stack, node->bit[0], &len); + push_rcu(stack, node->bit[1], &len); + kmem_cache_free(node_cache, node); + } +} + +static void root_remove_peer_lists(struct allowedips_node *root) +{ + struct allowedips_node *node, *stack[128] = { root }; + unsigned int len = 1; + + while (len > 0 && (node = stack[--len])) { + push_rcu(stack, node->bit[0], &len); + push_rcu(stack, node->bit[1], &len); + if (rcu_access_pointer(node->peer)) + list_del(&node->peer_list); + } +} + +static unsigned int fls128(u64 a, u64 b) +{ + return a ? fls64(a) + 64U : fls64(b); +} + +static u8 common_bits(const struct allowedips_node *node, const u8 *key, + u8 bits) +{ + if (bits == 32) + return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key); + else if (bits == 128) + return 128U - fls128( + *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0], + *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]); + return 0; +} + +static bool prefix_matches(const struct allowedips_node *node, const u8 *key, + u8 bits) +{ + /* This could be much faster if it actually just compared the common + * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and + * the rest, but it turns out that common_bits is already super fast on + * modern processors, even taking into account the unfortunate bswap. + * So, we just inline it like this instead. + */ + return common_bits(node, key, bits) >= node->cidr; +} + +static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, + const u8 *key) +{ + struct allowedips_node *node = trie, *found = NULL; + + while (node && prefix_matches(node, key, bits)) { + if (rcu_access_pointer(node->peer)) + found = node; + if (node->cidr == bits) + break; + node = rcu_dereference_bh(node->bit[choose(node, key)]); + } + return found; +} + +/* Returns a strong reference to a peer */ +static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits, + const void *be_ip) +{ + /* Aligned so it can be passed to fls/fls64 */ + u8 ip[16] __aligned(__alignof(u64)); + struct allowedips_node *node; + struct wg_peer *peer = NULL; + + swap_endian(ip, be_ip, bits); + + rcu_read_lock_bh(); +retry: + node = find_node(rcu_dereference_bh(root), bits, ip); + if (node) { + peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer)); + if (!peer) + goto retry; + } + rcu_read_unlock_bh(); + return peer; +} + +static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, + u8 cidr, u8 bits, struct allowedips_node **rnode, + struct mutex *lock) +{ + struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); + struct allowedips_node *parent = NULL; + bool exact = false; + + while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) { + parent = node; + if (parent->cidr == cidr) { + exact = true; + break; + } + node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); + } + *rnode = parent; + return exact; +} + +static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) +{ + node->parent_bit_packed = (unsigned long)parent | bit; + rcu_assign_pointer(*parent, node); +} + +static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) +{ + u8 bit = choose(parent, node->bits); + connect_node(&parent->bit[bit], bit, node); +} + +static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + u8 cidr, struct wg_peer *peer, struct mutex *lock) +{ + struct allowedips_node *node, *parent, *down, *newnode; + + if (unlikely(cidr > bits || !peer)) + return -EINVAL; + + if (!rcu_access_pointer(*trie)) { + node = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!node)) + return -ENOMEM; + RCU_INIT_POINTER(node->peer, peer); + list_add_tail(&node->peer_list, &peer->allowedips_list); + copy_and_assign_cidr(node, key, cidr, bits); + connect_node(trie, 2, node); + return 0; + } + if (node_placement(*trie, key, cidr, bits, &node, lock)) { + rcu_assign_pointer(node->peer, peer); + list_move_tail(&node->peer_list, &peer->allowedips_list); + return 0; + } + + newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!newnode)) + return -ENOMEM; + RCU_INIT_POINTER(newnode->peer, peer); + list_add_tail(&newnode->peer_list, &peer->allowedips_list); + copy_and_assign_cidr(newnode, key, cidr, bits); + + if (!node) { + down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); + } else { + const u8 bit = choose(node, key); + down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); + if (!down) { + connect_node(&node->bit[bit], bit, newnode); + return 0; + } + } + cidr = min(cidr, common_bits(down, key, bits)); + parent = node; + + if (newnode->cidr == cidr) { + choose_and_connect_node(newnode, down); + if (!parent) + connect_node(trie, 2, newnode); + else + choose_and_connect_node(parent, newnode); + return 0; + } + + node = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!node)) { + list_del(&newnode->peer_list); + kmem_cache_free(node_cache, newnode); + return -ENOMEM; + } + INIT_LIST_HEAD(&node->peer_list); + copy_and_assign_cidr(node, newnode->bits, cidr, bits); + + choose_and_connect_node(node, down); + choose_and_connect_node(node, newnode); + if (!parent) + connect_node(trie, 2, node); + else + choose_and_connect_node(parent, node); + return 0; +} + +void wg_allowedips_init(struct allowedips *table) +{ + table->root4 = table->root6 = NULL; + table->seq = 1; +} + +void wg_allowedips_free(struct allowedips *table, struct mutex *lock) +{ + struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6; + + ++table->seq; + RCU_INIT_POINTER(table->root4, NULL); + RCU_INIT_POINTER(table->root6, NULL); + if (rcu_access_pointer(old4)) { + struct allowedips_node *node = rcu_dereference_protected(old4, + lockdep_is_held(lock)); + + root_remove_peer_lists(node); + call_rcu(&node->rcu, root_free_rcu); + } + if (rcu_access_pointer(old6)) { + struct allowedips_node *node = rcu_dereference_protected(old6, + lockdep_is_held(lock)); + + root_remove_peer_lists(node); + call_rcu(&node->rcu, root_free_rcu); + } +} + +int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock) +{ + /* Aligned so it can be passed to fls */ + u8 key[4] __aligned(__alignof(u32)); + + ++table->seq; + swap_endian(key, (const u8 *)ip, 32); + return add(&table->root4, 32, key, cidr, peer, lock); +} + +int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock) +{ + /* Aligned so it can be passed to fls64 */ + u8 key[16] __aligned(__alignof(u64)); + + ++table->seq; + swap_endian(key, (const u8 *)ip, 128); + return add(&table->root6, 128, key, cidr, peer, lock); +} + +void wg_allowedips_remove_by_peer(struct allowedips *table, + struct wg_peer *peer, struct mutex *lock) +{ + struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; + bool free_parent; + + if (list_empty(&peer->allowedips_list)) + return; + ++table->seq; + list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { + list_del_init(&node->peer_list); + RCU_INIT_POINTER(node->peer, NULL); + if (node->bit[0] && node->bit[1]) + continue; + child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], + lockdep_is_held(lock)); + if (child) + child->parent_bit_packed = node->parent_bit_packed; + parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); + *parent_bit = child; + parent = (void *)parent_bit - + offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); + free_parent = !rcu_access_pointer(node->bit[0]) && + !rcu_access_pointer(node->bit[1]) && + (node->parent_bit_packed & 3) <= 1 && + !rcu_access_pointer(parent->peer); + if (free_parent) + child = rcu_dereference_protected( + parent->bit[!(node->parent_bit_packed & 1)], + lockdep_is_held(lock)); + call_rcu(&node->rcu, node_free_rcu); + if (!free_parent) + continue; + if (child) + child->parent_bit_packed = parent->parent_bit_packed; + *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; + call_rcu(&parent->rcu, node_free_rcu); + } +} + +int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) +{ + const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U); + swap_endian(ip, node->bits, node->bitlen); + memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes); + if (node->cidr) + ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U); + + *cidr = node->cidr; + return node->bitlen == 32 ? AF_INET : AF_INET6; +} + +/* Returns a strong reference to a peer */ +struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, + struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) + return lookup(table->root4, 32, &ip_hdr(skb)->daddr); + else if (skb->protocol == htons(ETH_P_IPV6)) + return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr); + return NULL; +} + +/* Returns a strong reference to a peer */ +struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, + struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) + return lookup(table->root4, 32, &ip_hdr(skb)->saddr); + else if (skb->protocol == htons(ETH_P_IPV6)) + return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr); + return NULL; +} + +int __init wg_allowedips_slab_init(void) +{ + node_cache = KMEM_CACHE(allowedips_node, 0); + return node_cache ? 0 : -ENOMEM; +} + +void wg_allowedips_slab_uninit(void) +{ + rcu_barrier(); + kmem_cache_destroy(node_cache); +} + +#include "selftest/allowedips.c" diff --git a/net/wireguard/allowedips.h b/net/wireguard/allowedips.h new file mode 100644 index 000000000000..2346c797eb4d --- /dev/null +++ b/net/wireguard/allowedips.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_ALLOWEDIPS_H +#define _WG_ALLOWEDIPS_H + +#include +#include +#include + +struct wg_peer; + +struct allowedips_node { + struct wg_peer __rcu *peer; + struct allowedips_node __rcu *bit[2]; + u8 cidr, bit_at_a, bit_at_b, bitlen; + u8 bits[16] __aligned(__alignof(u64)); + + /* Keep rarely used members at bottom to be beyond cache line. */ + unsigned long parent_bit_packed; + union { + struct list_head peer_list; + struct rcu_head rcu; + }; +}; + +struct allowedips { + struct allowedips_node __rcu *root4; + struct allowedips_node __rcu *root6; + u64 seq; +} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ + +void wg_allowedips_init(struct allowedips *table); +void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); +int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock); +int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock); +void wg_allowedips_remove_by_peer(struct allowedips *table, + struct wg_peer *peer, struct mutex *lock); +/* The ip input pointer should be __aligned(__alignof(u64))) */ +int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr); + +/* These return a strong reference to a peer: */ +struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, + struct sk_buff *skb); +struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, + struct sk_buff *skb); + +#ifdef DEBUG +bool wg_allowedips_selftest(void); +#endif + +int wg_allowedips_slab_init(void); +void wg_allowedips_slab_uninit(void); + +#endif /* _WG_ALLOWEDIPS_H */ diff --git a/net/wireguard/compat/Makefile.include b/net/wireguard/compat/Makefile.include new file mode 100644 index 000000000000..df7670ae8d6c --- /dev/null +++ b/net/wireguard/compat/Makefile.include @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + +kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) + +ccflags-y += -include $(kbuild-dir)/compat/compat.h +asflags-y += -include $(kbuild-dir)/compat/compat-asm.h +LINUXINCLUDE := -DCOMPAT_VERSION=$(VERSION) -DCOMPAT_PATCHLEVEL=$(PATCHLEVEL) -DCOMPAT_SUBLEVEL=$(SUBLEVEL) -I$(kbuild-dir)/compat/version $(LINUXINCLUDE) + +ifeq ($(wildcard $(srctree)/include/linux/ptr_ring.h),) +ccflags-y += -I$(kbuild-dir)/compat/ptr_ring/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/skb_array.h),) +ccflags-y += -I$(kbuild-dir)/compat/skb_array/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/siphash.h),) +ccflags-y += -I$(kbuild-dir)/compat/siphash/include +wireguard-y += compat/siphash/siphash.o +endif + +ifeq ($(wildcard $(srctree)/include/net/dst_cache.h),) +ccflags-y += -I$(kbuild-dir)/compat/dst_cache/include +wireguard-y += compat/dst_cache/dst_cache.o +endif + +ifeq ($(wildcard $(srctree)/arch/x86/include/asm/intel-family.h)$(CONFIG_X86),y) +ccflags-y += -I$(kbuild-dir)/compat/intel-family-x86/include +endif + +ifeq ($(wildcard $(srctree)/arch/x86/include/asm/fpu/api.h)$(CONFIG_X86),y) +ccflags-y += -I$(kbuild-dir)/compat/fpu-x86/include +endif + +ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/simd.h)$(shell grep -s -F "generic-y += simd.h" "$(srctree)/arch/$(SRCARCH)/Makefile" "$(srctree)/arch/$(SRCARCH)/Makefile"),) +ccflags-y += -I$(kbuild-dir)/compat/simd-asm/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/simd.h),) +ccflags-y += -I$(kbuild-dir)/compat/simd/include +endif + +ifeq ($(wildcard $(srctree)/include/net/udp_tunnel.h),) +ccflags-y += -I$(kbuild-dir)/compat/udp_tunnel/include +wireguard-y += compat/udp_tunnel/udp_tunnel.o +endif + +ifeq ($(shell grep -s -F "int crypto_memneq" "$(srctree)/include/crypto/algapi.h"),) +ccflags-y += -include $(kbuild-dir)/compat/memneq/include.h +wireguard-y += compat/memneq/memneq.o +endif + +ifeq ($(shell grep -s -F "addr_gen_mode" "$(srctree)/include/linux/ipv6.h"),) +ccflags-y += -DCOMPAT_CANNOT_USE_DEV_CNF +endif + +ifdef CONFIG_HZ +ifeq ($(wildcard $(CURDIR)/include/generated/timeconst.h),) +ccflags-y += $(shell bash -c '((a=$(CONFIG_HZ), b=1000000)); while ((b > 0)); do ((t=b, b=a%b, a=t)); done; echo "-DHZ_TO_USEC_NUM=$$((1000000/a)) -DHZ_TO_USEC_DEN=$$(($(CONFIG_HZ)/a))";') +endif +endif + +ifeq ($(wildcard $(srctree)/arch/arm/include/asm/neon.h)$(CONFIG_ARM),y) +ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include +endif +ifeq ($(wildcard $(srctree)/arch/arm64/include/asm/neon.h)$(CONFIG_ARM64),y) +ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include +endif + +ifeq ($(wildcard $(srctree)/include/net/dst_metadata.h),) +ccflags-y += -I$(kbuild-dir)/compat/dstmetadata/include +endif + +ifeq ($(CONFIG_X86_64),y) + ifeq ($(ssse3_instr),) + ssse3_instr := $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) + ccflags-y += $(ssse3_instr) + asflags-y += $(ssse3_instr) + endif + ifeq ($(avx_instr),) + avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) + ccflags-y += $(avx_instr) + asflags-y += $(avx_instr) + endif + ifeq ($(avx2_instr),) + avx2_instr := $(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) + ccflags-y += $(avx2_instr) + asflags-y += $(avx2_instr) + endif + ifeq ($(avx512_instr),) + avx512_instr := $(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) + ccflags-y += $(avx512_instr) + asflags-y += $(avx512_instr) + endif + ifeq ($(bmi2_instr),) + bmi2_instr :=$(call as-instr,mulx %rax$(comma)%rax$(comma)%rax,-DCONFIG_AS_BMI2=1) + ccflags-y += $(bmi2_instr) + asflags-y += $(bmi2_instr) + endif + ifeq ($(adx_instr),) + adx_instr :=$(call as-instr,adcx %rax$(comma)%rax,-DCONFIG_AS_ADX=1) + ccflags-y += $(adx_instr) + asflags-y += $(adx_instr) + endif +endif + +ifneq ($(shell grep -s -F "\#define LINUX_PACKAGE_ID \" Debian " "$(CURDIR)/include/generated/package.h"),) +ccflags-y += -DISDEBIAN +endif diff --git a/net/wireguard/compat/checksum/checksum_partial_compat.h b/net/wireguard/compat/checksum/checksum_partial_compat.h new file mode 100644 index 000000000000..3dfe5397a94f --- /dev/null +++ b/net/wireguard/compat/checksum/checksum_partial_compat.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#include +#include +#include + +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 +static inline int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, unsigned int max) +{ + if (skb_headlen(skb) >= len) + return 0; + if (max > skb->len) + max = skb->len; + if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) + return -ENOMEM; + if (skb_headlen(skb) < len) + return -EPROTO; + return 0; +} +#define MAX_IP_HDR_LEN 128 +static inline int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate) +{ + unsigned int off; + bool fragment; + int err; + fragment = false; + err = skb_maybe_pull_tail(skb, sizeof(struct iphdr), MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) + fragment = true; + off = ip_hdrlen(skb); + err = -EPROTO; + if (fragment) + goto out; + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct tcphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); + break; + case IPPROTO_UDP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct udphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + udp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); + break; + default: + goto out; + } + err = 0; +out: + return err; +} +#define MAX_IPV6_HDR_LEN 256 +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) +static inline int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) +{ + int err; + u8 nexthdr; + unsigned int off; + unsigned int len; + bool fragment; + bool done; + fragment = false; + done = false; + off = sizeof(struct ipv6hdr); + err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + nexthdr = ipv6_hdr(skb)->nexthdr; + len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); + while (off <= len && !done) { + switch (nexthdr) { + case IPPROTO_DSTOPTS: + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: { + struct ipv6_opt_hdr *hp; + + err = skb_maybe_pull_tail(skb, off + sizeof(struct ipv6_opt_hdr), MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); + nexthdr = hp->nexthdr; + off += ipv6_optlen(hp); + break; + } + case IPPROTO_FRAGMENT: { + struct frag_hdr *hp; + err = skb_maybe_pull_tail(skb, off + sizeof(struct frag_hdr), MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct frag_hdr, skb, off); + if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) + fragment = true; + nexthdr = hp->nexthdr; + off += sizeof(struct frag_hdr); + break; + } + default: + done = true; + break; + } + } + err = -EPROTO; + if (!done || fragment) + goto out; + switch (nexthdr) { + case IPPROTO_TCP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct tcphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); + break; + case IPPROTO_UDP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct udphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + udp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); + break; + default: + goto out; + } + err = 0; +out: + return err; +} +static inline int skb_checksum_setup(struct sk_buff *skb, bool recalculate) +{ + int err; + switch (skb->protocol) { + case htons(ETH_P_IP): + err = skb_checksum_setup_ip(skb, recalculate); + break; + + case htons(ETH_P_IPV6): + err = skb_checksum_setup_ipv6(skb, recalculate); + break; + default: + err = -EPROTO; + break; + } + return err; +} diff --git a/net/wireguard/compat/compat-asm.h b/net/wireguard/compat/compat-asm.h new file mode 100644 index 000000000000..345087bf0de8 --- /dev/null +++ b/net/wireguard/compat/compat-asm.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_COMPATASM_H +#define _WG_COMPATASM_H + +#include +#include +#include + +#ifdef RHEL_MAJOR +#if RHEL_MAJOR == 7 +#define ISRHEL7 +#elif RHEL_MAJOR == 8 +#define ISRHEL8 +#endif +#endif + +/* PaX compatibility */ +#if defined(RAP_PLUGIN) && defined(RAP_ENTRY) +#undef ENTRY +#define ENTRY RAP_ENTRY +#endif + +#if defined(__LINUX_ARM_ARCH__) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro ret\c, reg +#if __LINUX_ARM_ARCH__ < 6 + mov\c pc, \reg +#else + .ifeqs "\reg", "lr" + bx\c \reg + .else + mov\c pc, \reg + .endif +#endif + .endm + .endr +#endif + +#if defined(__LINUX_ARM_ARCH__) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) +#include +#define lspush push +#define lspull pull +#undef push +#undef pull +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISRHEL8) && !defined(SYM_FUNC_START) +#define SYM_FUNC_START ENTRY +#define SYM_FUNC_END ENDPROC +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) +#define blake2s_compress_ssse3 zinc_blake2s_compress_ssse3 +#define blake2s_compress_avx512 zinc_blake2s_compress_avx512 +#define poly1305_init_arm zinc_poly1305_init_arm +#define poly1305_blocks_arm zinc_poly1305_blocks_arm +#define poly1305_emit_arm zinc_poly1305_emit_arm +#define poly1305_blocks_neon zinc_poly1305_blocks_neon +#define poly1305_emit_neon zinc_poly1305_emit_neon +#define poly1305_init_mips zinc_poly1305_init_mips +#define poly1305_blocks_mips zinc_poly1305_blocks_mips +#define poly1305_emit_mips zinc_poly1305_emit_mips +#define poly1305_init_x86_64 zinc_poly1305_init_x86_64 +#define poly1305_blocks_x86_64 zinc_poly1305_blocks_x86_64 +#define poly1305_emit_x86_64 zinc_poly1305_emit_x86_64 +#define poly1305_emit_avx zinc_poly1305_emit_avx +#define poly1305_blocks_avx zinc_poly1305_blocks_avx +#define poly1305_blocks_avx2 zinc_poly1305_blocks_avx2 +#define poly1305_blocks_avx512 zinc_poly1305_blocks_avx512 +#define curve25519_neon zinc_curve25519_neon +#define hchacha20_ssse3 zinc_hchacha20_ssse3 +#define chacha20_ssse3 zinc_chacha20_ssse3 +#define chacha20_avx2 zinc_chacha20_avx2 +#define chacha20_avx512 zinc_chacha20_avx512 +#define chacha20_avx512vl zinc_chacha20_avx512vl +#define chacha20_mips zinc_chacha20_mips +#define chacha20_arm zinc_chacha20_arm +#define hchacha20_arm zinc_hchacha20_arm +#define chacha20_neon zinc_chacha20_neon +#endif + +#endif /* _WG_COMPATASM_H */ diff --git a/net/wireguard/compat/compat.h b/net/wireguard/compat/compat.h new file mode 100644 index 000000000000..69dada89494f --- /dev/null +++ b/net/wireguard/compat/compat.h @@ -0,0 +1,1199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_COMPAT_H +#define _WG_COMPAT_H + +#include +#include +#include +#include + +#ifdef RHEL_MAJOR +#if RHEL_MAJOR == 7 +#define ISRHEL7 +#elif RHEL_MAJOR == 8 +#define ISRHEL8 +#endif +#endif +#ifdef UTS_UBUNTU_RELEASE_ABI +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +#define ISUBUNTU1604 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +#define ISUBUNTU1804 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define ISUBUNTU1904 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) +#define ISUBUNTU1910 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) +#error "WireGuard requires Linux >= 3.10" +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +#error "WireGuard has been merged into Linux >= 5.6 and therefore this compatibility module is no longer required." +#endif + +#if defined(ISRHEL7) +#include +#define headers_end headers_start +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) +#define headers_start data +#define headers_end data +#endif + +#include +#include +#ifndef __ro_after_init +#define __ro_after_init __read_mostly +#endif + +#include +#ifndef READ_ONCE +#define READ_ONCE ACCESS_ONCE +#endif +#ifndef WRITE_ONCE +#ifdef ACCESS_ONCE_RW +#define WRITE_ONCE(p, v) (ACCESS_ONCE_RW(p) = (v)) +#else +#define WRITE_ONCE(p, v) (ACCESS_ONCE(p) = (v)) +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include "udp_tunnel/udp_tunnel_partial_compat.h" +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(DEBUG) && defined(net_dbg_ratelimited) +#undef net_dbg_ratelimited +#define net_dbg_ratelimited(fmt, ...) do { if (0) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include +#ifndef RCU_LOCKDEP_WARN +#define RCU_LOCKDEP_WARN(cond, message) rcu_lockdep_assert(!(cond), message) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(ISRHEL7) +#define ipv6_dst_lookup(a, b, c, d) ipv6_dst_lookup(b, c, d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 83) +#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup_flow(b, c, d) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) && !defined(ISUBUNTU1904)) || (!defined(ISRHEL8) && !defined(ISDEBIAN) && !defined(ISUBUNTU1804) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224) && !defined(ISUBUNTU1604) && !defined(ISRHEL7)) +#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7) +#include +struct ipv6_stub_type { + void *udpv6_encap_enable; + int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); +}; +static const struct ipv6_stub_type ipv6_stub_impl = { + .udpv6_encap_enable = (void *)1, + .ipv6_dst_lookup = ip6_dst_lookup +}; +static const struct ipv6_stub_type *ipv6_stub = &ipv6_stub_impl; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7) +#include +static inline bool ipv6_mod_enabled(void) +{ + return ipv6_stub != NULL && ipv6_stub->udpv6_encap_enable != NULL; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) && !defined(ISRHEL7) +#include +static inline void skb_reset_tc(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_verd = 0; +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include +#include +static inline u32 __compat_get_random_u32(void) +{ + static siphash_key_t key; + static u32 counter = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + static bool has_seeded = false; + if (unlikely(!has_seeded)) { + get_random_bytes(&key, sizeof(key)); + has_seeded = true; + } +#else + get_random_once(&key, sizeof(key)); +#endif + return siphash_2u32(counter++, get_random_int(), &key); +} +#define get_random_u32 __compat_get_random_u32 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(ISRHEL7) +static inline void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; +} +#define COMPAT_CANNOT_USE_CSUM_LEVEL +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include +#ifndef netdev_alloc_pcpu_stats +#define pcpu_sw_netstats pcpu_tstats +#endif +#ifndef netdev_alloc_pcpu_stats +#define netdev_alloc_pcpu_stats alloc_percpu +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(ISRHEL7) +#include +#ifndef netdev_alloc_pcpu_stats +#define netdev_alloc_pcpu_stats(type) \ +({ \ + typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ + if (pcpu_stats) { \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ + typeof(type) *stat; \ + stat = per_cpu_ptr(pcpu_stats, __cpu); \ + u64_stats_init(&stat->syncp); \ + } \ + } \ + pcpu_stats; \ +}) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include "checksum/checksum_partial_compat.h" +static inline void *__compat_pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) +{ + if (tail != skb) { + skb->data_len += len; + skb->len += len; + } + return skb_put(tail, len); +} +#define pskb_put __compat_pskb_put +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7) +#include +static inline void skb_scrub_packet(struct sk_buff *skb, bool xnet) +{ +#ifdef CONFIG_CAVIUM_OCTEON_IPFWD_OFFLOAD + memset(&skb->cvm_info, 0, sizeof(skb->cvm_info)); + skb->cvm_reserved = 0; +#endif + skb->tstamp.tv64 = 0; + skb->pkt_type = PACKET_HOST; + skb->skb_iif = 0; + skb_dst_drop(skb); + secpath_reset(skb); + nf_reset(skb); + nf_reset_trace(skb); + if (!xnet) + return; + skb_orphan(skb); + skb->mark = 0; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) +#define skb_scrub_packet(a, b) skb_scrub_packet(a) +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 63)) && !defined(ISRHEL7) +#include +static inline u32 __compat_prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64)prandom_u32() * ep_ro) >> 32); +} +#define prandom_u32_max __compat_prandom_u32_max +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include +#ifndef U8_MAX +#define U8_MAX ((u8)~0U) +#endif +#ifndef S8_MAX +#define S8_MAX ((s8)(U8_MAX >> 1)) +#endif +#ifndef S8_MIN +#define S8_MIN ((s8)(-S8_MAX - 1)) +#endif +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif +#ifndef S16_MAX +#define S16_MAX ((s16)(U16_MAX >> 1)) +#endif +#ifndef S16_MIN +#define S16_MIN ((s16)(-S16_MAX - 1)) +#endif +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif +#ifndef S32_MAX +#define S32_MAX ((s32)(U32_MAX >> 1)) +#endif +#ifndef S32_MIN +#define S32_MIN ((s32)(-S32_MAX - 1)) +#endif +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif +#ifndef S64_MAX +#define S64_MAX ((s64)(U64_MAX >> 1)) +#endif +#ifndef S64_MIN +#define S64_MIN ((s64)(-S64_MAX - 1)) +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 3) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 35) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 24) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 33) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 60) && !defined(ISRHEL7)) +static inline void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + barrier(); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(ISRHEL7) +static const struct in6_addr __compat_in6addr_any = IN6ADDR_ANY_INIT; +#define in6addr_any __compat_in6addr_any +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) +#include +#include +#include +struct rng_initializer { + struct completion done; + struct random_ready_callback cb; +}; +static inline void rng_initialized_callback(struct random_ready_callback *cb) +{ + complete(&container_of(cb, struct rng_initializer, cb)->done); +} +static inline int wait_for_random_bytes(void) +{ + static bool rng_is_initialized = false; + int ret; + if (unlikely(!rng_is_initialized)) { + struct rng_initializer rng = { + .done = COMPLETION_INITIALIZER(rng.done), + .cb = { .owner = THIS_MODULE, .func = rng_initialized_callback } + }; + ret = add_random_ready_callback(&rng.cb); + if (!ret) { + ret = wait_for_completion_interruptible(&rng.done); + if (ret) { + del_random_ready_callback(&rng.cb); + return ret; + } + } else if (ret != -EALREADY) + return ret; + rng_is_initialized = true; + } + return 0; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +/* This is a disaster. Without this API, we really have no way of + * knowing if it's initialized. We just return that it has and hope + * for the best... */ +static inline int wait_for_random_bytes(void) +{ + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 285)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) && !defined(ISRHEL8) +#include +#include +struct rng_is_initialized_callback { + struct random_ready_callback cb; + atomic_t *rng_state; +}; +static inline void rng_is_initialized_callback(struct random_ready_callback *cb) +{ + struct rng_is_initialized_callback *rdy = container_of(cb, struct rng_is_initialized_callback, cb); + atomic_set(rdy->rng_state, 2); + kfree(rdy); +} +static inline bool rng_is_initialized(void) +{ + static atomic_t rng_state = ATOMIC_INIT(0); + + if (atomic_read(&rng_state) == 2) + return true; + + if (atomic_cmpxchg(&rng_state, 0, 1) == 0) { + int ret; + struct rng_is_initialized_callback *rdy = kmalloc(sizeof(*rdy), GFP_ATOMIC); + if (!rdy) { + atomic_set(&rng_state, 0); + return false; + } + rdy->cb.owner = THIS_MODULE; + rdy->cb.func = rng_is_initialized_callback; + rdy->rng_state = &rng_state; + ret = add_random_ready_callback(&rdy->cb); + if (ret) + kfree(rdy); + if (ret == -EALREADY) { + atomic_set(&rng_state, 2); + return true; + } else if (ret) + atomic_set(&rng_state, 0); + return false; + } + return false; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +/* This is a disaster. Without this API, we really have no way of + * knowing if it's initialized. We just return that it has and hope + * for the best... */ +static inline bool rng_is_initialized(void) +{ + return true; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) +static inline int get_random_bytes_wait(void *buf, int nbytes) +{ + int ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; + get_random_bytes(buf, nbytes); + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7) +#define system_power_efficient_wq system_unbound_wq +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#include +#ifndef ktime_get_real_ts64 +#define timespec64 timespec +#define ktime_get_real_ts64 ktime_get_real_ts +#endif +#else +#include +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +static inline u64 __compat_jiffies64_to_nsecs(u64 j) +{ +#if !(NSEC_PER_SEC % HZ) + return (NSEC_PER_SEC / HZ) * j; +#else + return div_u64(j * HZ_TO_USEC_NUM, HZ_TO_USEC_DEN) * 1000; +#endif +} +#define jiffies64_to_nsecs __compat_jiffies64_to_nsecs +#endif +static inline u64 ktime_get_coarse_boottime_ns(void) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + return ktime_to_ns(ktime_get_boottime()); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 12) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 53) + return ktime_to_ns(ktime_mono_to_any(ns_to_ktime(jiffies64_to_nsecs(get_jiffies_64())), TK_OFFS_BOOT)); +#else + return ktime_to_ns(ktime_get_coarse_boottime()); +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include +static inline __be32 __compat_confirm_addr_indev(struct in_device *in_dev, __be32 dst, __be32 local, int scope) +{ + int same = 0; + __be32 addr = 0; + for_ifa(in_dev) { + if (!addr && (local == ifa->ifa_local || !local) && ifa->ifa_scope <= scope) { + addr = ifa->ifa_local; + if (same) + break; + } + if (!same) { + same = (!local || inet_ifa_match(local, ifa)) && (!dst || inet_ifa_match(dst, ifa)); + if (same && addr) { + if (local || !dst) + break; + if (inet_ifa_match(addr, ifa)) + break; + if (ifa->ifa_scope <= scope) { + addr = ifa->ifa_local; + break; + } + same = 0; + } + } + } endfor_ifa(in_dev); + return same ? addr : 0; +} +static inline __be32 __compat_inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope) +{ + __be32 addr = 0; + struct net_device *dev; + if (in_dev) + return __compat_confirm_addr_indev(in_dev, dst, local, scope); + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + in_dev = __in_dev_get_rcu(dev); + if (in_dev) { + addr = __compat_confirm_addr_indev(in_dev, dst, local, scope); + if (addr) + break; + } + } + rcu_read_unlock(); + return addr; +} +#define inet_confirm_addr __compat_inet_confirm_addr +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#include +#include +static inline void *__compat_kvmalloc(size_t size, gfp_t flags) +{ + gfp_t kmalloc_flags = flags; + void *ret; + if (size > PAGE_SIZE) { + kmalloc_flags |= __GFP_NOWARN; + if (!(kmalloc_flags & __GFP_REPEAT) || (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + kmalloc_flags |= __GFP_NORETRY; + } + ret = kmalloc(size, kmalloc_flags); + if (ret || size <= PAGE_SIZE) + return ret; + return __vmalloc(size, flags, PAGE_KERNEL); +} +static inline void *__compat_kvzalloc(size_t size, gfp_t flags) +{ + return __compat_kvmalloc(size, flags | __GFP_ZERO); +} +#define kvmalloc __compat_kvmalloc +#define kvzalloc __compat_kvzalloc +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) +#include +#include +static inline void __compat_kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} +#define kvfree __compat_kvfree +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#include +static inline void *__compat_kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + if (n != 0 && SIZE_MAX / n < size) + return NULL; + return kvmalloc(n * size, flags); +} +#define kvmalloc_array __compat_kvmalloc_array +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +#include +#include +static inline void *__compat_kvcalloc(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array(n, size, flags | __GFP_ZERO); +} +#define kvcalloc __compat_kvcalloc +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9) +#include +#define priv_destructor destructor +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) +#define wg_newlink(a,b,c,d,e) wg_newlink(a,b,c,d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#include +#define nlmsg_parse(a, b, c, d, e, f) nlmsg_parse(a, b, c, d, e) +#define nla_parse_nested(a, b, c, d, e) nla_parse_nested(a, b, c, d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(ISRHEL7) +static inline struct nlattr **genl_family_attrbuf(const struct genl_family *family) +{ + return family->attrbuf; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +#define PTR_ERR_OR_ZERO(p) PTR_RET(p) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) +#include +#define nla_put_u64_64bit(a, b, c, d) nla_put_u64(a, b, c) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +#include +#ifndef GENL_UNS_ADMIN_PERM +#define GENL_UNS_ADMIN_PERM GENL_ADMIN_PERM +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(ISRHEL7) +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) +#define genl_register_family(a) genl_register_family_with_ops(a, genl_ops, ARRAY_SIZE(genl_ops)) +#define COMPAT_CANNOT_USE_CONST_GENL_OPS +#else +#define genl_register_family(a) genl_register_family_with_ops(a, genl_ops) +#endif +#define COMPAT_CANNOT_USE_GENL_NOPS +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 2) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 16) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 65) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 101) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 84) +#define __COMPAT_NETLINK_DUMP_BLOCK { \ + int ret; \ + skb->end -= nlmsg_total_size(sizeof(int)); \ + ret = wg_get_device_dump_real(skb, cb); \ + skb->end += nlmsg_total_size(sizeof(int)); \ + return ret; \ +} +#define __COMPAT_NETLINK_DUMP_OVERRIDE +#else +#define __COMPAT_NETLINK_DUMP_BLOCK return wg_get_device_dump_real(skb, cb); +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 8) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 25) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 87) +#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \ +static int wg_get_device_dump(a, b) { \ + struct wg_device *wg = (struct wg_device *)cb->args[0]; \ + if (!wg) { \ + int ret = wg_get_device_start(cb); \ + if (ret) \ + return ret; \ + } \ + __COMPAT_NETLINK_DUMP_BLOCK \ +} \ +static int wg_get_device_dump_real(a, b) +#define COMPAT_CANNOT_USE_NETLINK_START +#elif defined(__COMPAT_NETLINK_DUMP_OVERRIDE) +#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \ +static int wg_get_device_dump(a, b) { \ + __COMPAT_NETLINK_DUMP_BLOCK \ +} \ +static int wg_get_device_dump_real(a, b) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define COMPAT_CANNOT_USE_IN6_DEV_GET +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#define COMPAT_CANNOT_USE_IFF_NO_QUEUE +#endif + +#if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +#include +#include +static inline int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name) +{ + return boot_cpu_has(X86_FEATURE_XSAVE) && xgetbv(XCR_XFEATURE_ENABLED_MASK) & xfeatures_needed; +} +#endif +#ifndef XFEATURE_MASK_YMM +#define XFEATURE_MASK_YMM XSTATE_YMM +#endif +#ifndef XFEATURE_MASK_SSE +#define XFEATURE_MASK_SSE XSTATE_SSE +#endif +#ifndef XSTATE_AVX512 +#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) +#endif +#ifndef XFEATURE_MASK_AVX512 +#define XFEATURE_MASK_AVX512 XSTATE_AVX512 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && defined(CONFIG_X86_64) +/* This is incredibly dumb and reckless, but as it turns out, there's + * not really hardware Linux runs properly on that supports F but not BW + * and VL, so in practice this isn't so bad. Plus, this is compat layer, + * so the bar remains fairly low. + */ +#include +#ifndef X86_FEATURE_AVX512BW +#define X86_FEATURE_AVX512BW X86_FEATURE_AVX512F +#endif +#ifndef X86_FEATURE_AVX512VL +#define X86_FEATURE_AVX512VL X86_FEATURE_AVX512F +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +struct __compat_dummy_container { char dev; }; +#define netdev_notifier_info net_device *)data); __attribute((unused)) char __compat_dummy_variable = ((struct __compat_dummy_container +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a)) +#define from_timer(var, callback_timer, timer_fieldname) container_of(callback_timer, typeof(*var), timer_fieldname) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 3) +#define COMPAT_CANNOT_USE_AVX512 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#include +#define genl_dump_check_consistent(a, b) genl_dump_check_consistent(a, b, &genl_family) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && !defined(ISRHEL7) +static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned int len) +{ + void *tmp = skb_put(skb, len); + memcpy(tmp, data, len); + return tmp; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7) +#define napi_complete_done(n, work_done) napi_complete(n) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#include +/* NAPI_STATE_SCHED gets set by netif_napi_add anyway, so this is safe. + * Also, kernels without NAPI_STATE_NO_BUSY_POLL don't have a call to + * napi_hash_add inside of netif_napi_add. + */ +#define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) ({ int __compat_p1 = atomic_read(v); smp_rmb(); __compat_p1; }) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) ({ smp_wmb(); atomic_set(v, i); }) +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 285)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + + while (size > 0) { + *d++ = *s1++ ^ *s2++; + size -= sizeof(unsigned long); + } + } else { + if (unlikely(dst != src1)) + memmove(dst, src1, size); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + crypto_xor(dst, src2, size); +#else + __crypto_xor(dst, src2, size); +#endif + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define read_cpuid_part() read_cpuid_part_number() +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7) +#define hlist_add_behind(a, b) hlist_add_after(b, a) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISRHEL8) +#define totalram_pages() totalram_pages +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +struct __kernel_timespec { + int64_t tv_sec, tv_nsec; +}; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) +#include +#ifdef __kernel_timespec +#undef __kernel_timespec +struct __kernel_timespec { + int64_t tv_sec, tv_nsec; +}; +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +#include +#define skb_probe_transport_header(a) skb_probe_transport_header(a, 0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && !defined(ISRHEL7) +#define ignore_df local_df +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +/* Note that all intentional uses of the non-_bh variety need to explicitly + * undef these, conditionalized on COMPAT_CANNOT_DEPRECIATE_BH_RCU. + */ +#include +static __always_inline void old_synchronize_rcu(void) +{ + synchronize_rcu(); +} +static __always_inline void old_call_rcu(void *a, void *b) +{ + call_rcu(a, b); +} +static __always_inline void old_rcu_barrier(void) +{ + rcu_barrier(); +} +#ifdef synchronize_rcu +#undef synchronize_rcu +#endif +#ifdef call_rcu +#undef call_rcu +#endif +#ifdef rcu_barrier +#undef rcu_barrier +#endif +#define synchronize_rcu synchronize_rcu_bh +#define call_rcu call_rcu_bh +#define rcu_barrier rcu_barrier_bh +#define COMPAT_CANNOT_DEPRECIATE_BH_RCU +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 10) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) && !defined(ISRHEL8)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 217) +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8) +#include +#ifndef NLA_POLICY_EXACT_LEN +#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_UNSPEC, .len = _len } +#endif +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8) +#include +#ifndef NLA_POLICY_MIN_LEN +#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_UNSPEC, .len = _len } +#endif +#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && defined(__aarch64__) +#define cpu_have_named_feature(name) (elf_hwcap & (HWCAP_ ## name)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +#include +#ifndef offsetofend +#define offsetofend(TYPE, MEMBER) (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) && !defined(ISRHEL8) +#define genl_dumpit_info(cb) ({ \ + struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \ + BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \ + a->attrs = genl_family_attrbuf(&genl_family); \ + if (nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, a->attrs, genl_family.maxattr, device_policy, NULL) < 0) \ + memset(a->attrs, 0, (genl_family.maxattr + 1) * sizeof(struct nlattr *)); \ + a; \ +}) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +#include +#ifndef skb_list_walk_safe +#define skb_list_walk_safe(first, skb, next) \ + for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next), (next) = (skb) ? (skb)->next : NULL) +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 200) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 249)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 285)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 320)) +#define blake2s_init zinc_blake2s_init +#define blake2s_init_key zinc_blake2s_init_key +#define blake2s_update zinc_blake2s_update +#define blake2s_final zinc_blake2s_final +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) +#define blake2s_hmac zinc_blake2s_hmac +#define chacha20 zinc_chacha20 +#define hchacha20 zinc_hchacha20 +#define chacha20poly1305_encrypt zinc_chacha20poly1305_encrypt +#define chacha20poly1305_encrypt_sg_inplace zinc_chacha20poly1305_encrypt_sg_inplace +#define chacha20poly1305_decrypt zinc_chacha20poly1305_decrypt +#define chacha20poly1305_decrypt_sg_inplace zinc_chacha20poly1305_decrypt_sg_inplace +#define xchacha20poly1305_encrypt zinc_xchacha20poly1305_encrypt +#define xchacha20poly1305_decrypt zinc_xchacha20poly1305_decrypt +#define curve25519 zinc_curve25519 +#define curve25519_generate_secret zinc_curve25519_generate_secret +#define curve25519_generate_public zinc_curve25519_generate_public +#define poly1305_init zinc_poly1305_init +#define poly1305_update zinc_poly1305_update +#define poly1305_final zinc_poly1305_final +#define blake2s_compress_ssse3 zinc_blake2s_compress_ssse3 +#define blake2s_compress_avx512 zinc_blake2s_compress_avx512 +#define poly1305_init_arm zinc_poly1305_init_arm +#define poly1305_blocks_arm zinc_poly1305_blocks_arm +#define poly1305_emit_arm zinc_poly1305_emit_arm +#define poly1305_blocks_neon zinc_poly1305_blocks_neon +#define poly1305_emit_neon zinc_poly1305_emit_neon +#define poly1305_init_mips zinc_poly1305_init_mips +#define poly1305_blocks_mips zinc_poly1305_blocks_mips +#define poly1305_emit_mips zinc_poly1305_emit_mips +#define poly1305_init_x86_64 zinc_poly1305_init_x86_64 +#define poly1305_blocks_x86_64 zinc_poly1305_blocks_x86_64 +#define poly1305_emit_x86_64 zinc_poly1305_emit_x86_64 +#define poly1305_emit_avx zinc_poly1305_emit_avx +#define poly1305_blocks_avx zinc_poly1305_blocks_avx +#define poly1305_blocks_avx2 zinc_poly1305_blocks_avx2 +#define poly1305_blocks_avx512 zinc_poly1305_blocks_avx512 +#define curve25519_neon zinc_curve25519_neon +#define hchacha20_ssse3 zinc_hchacha20_ssse3 +#define chacha20_ssse3 zinc_chacha20_ssse3 +#define chacha20_avx2 zinc_chacha20_avx2 +#define chacha20_avx512 zinc_chacha20_avx512 +#define chacha20_avx512vl zinc_chacha20_avx512vl +#define chacha20_mips zinc_chacha20_mips +#define chacha20_arm zinc_chacha20_arm +#define hchacha20_arm zinc_hchacha20_arm +#define chacha20_neon zinc_chacha20_neon +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7) +#include +static inline int skb_ensure_writable(struct sk_buff *skb, int write_len) +{ + if (!pskb_may_pull(skb, write_len)) + return -ENOMEM; + + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 102) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 178) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 223) && LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 259) || defined(ISRHEL8) || defined(ISUBUNTU1804) +#include +#include +#if IS_ENABLED(CONFIG_NF_NAT) +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +#include +#endif +static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + __be32 orig_ip; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmp_send(skb_in, type, code, info); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct iphdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct iphdr)))) + goto out; + + orig_ip = ip_hdr(skb_in)->saddr; + ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmp_send(skb_in, type, code, info); + ip_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct in6_addr orig_ip; + struct nf_conn *ct; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmpv6_send(skb_in, type, code, info); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) + goto out; + + orig_ip = ipv6_hdr(skb_in)->saddr; + ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmpv6_send(skb_in, type, code, info); + ipv6_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +#else +static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmp_send(skb_in, type, code, info); +} +static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmpv6_send(skb_in, type, code, info); +} +#endif +#define icmp_ndo_send __compat_icmp_ndo_send +#define icmpv6_ndo_send __compat_icmpv6_ndo_send +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#define COMPAT_CANNOT_USE_MAX_MTU +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) && !defined(ISUBUNTU1910) && !defined(ISUBUNTU1904) && !defined(ISRHEL8)) +#include +#include +static inline void skb_reset_redirect(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_SCHED + skb_reset_tc(skb); +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#define skb_get_hash skb_get_rxhash +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(ISRHEL7) +#define hash rxhash +#define l4_hash l4_rxhash +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7) +#define sw_hash ignore_df = 0; skb->nf_trace = skb->ooo_okay +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) +#define pre_exit exit +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) +#include +#include +#include +static inline __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) +{ + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && + ip_hdr(skb)->version == 4) + return htons(ETH_P_IP); + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && + ipv6_hdr(skb)->version == 6) + return htons(ETH_P_IPV6); + return 0; +} +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || defined(ISRHEL8) +static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; +#else +#define header_ops hard_header_len +#define ip_tunnel_header_ops *(char *)0 - (char *)0 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) +#define kfree_sensitive(a) kzfree(a) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(ISRHEL7) +#define xchg_release xchg +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) +#include +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; +#define COMPAT_HAS_DEFINED_DST_CACHE_PCPU +static inline void dst_cache_reset_now(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + dst_cache->reset_ts = jiffies; + for_each_possible_cpu(i) { + struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i); + struct dst_entry *dst = idst->dst; + + idst->cookie = 0; + idst->dst = NULL; + dst_release(dst); + } +} +#endif + +#if defined(ISUBUNTU1604) || defined(ISRHEL7) +#include +#ifndef _WG_LINUX_SIPHASH_H +#define hsiphash_1u32 siphash_1u32 +#define hsiphash_2u32 siphash_2u32 +#define hsiphash_3u32 siphash_3u32 +#define hsiphash_key_t siphash_key_t +#endif +#endif + +#ifdef CONFIG_VE +#include +#ifdef NETIF_F_VIRTUAL +#undef NETIF_F_LLTX +#define NETIF_F_LLTX (__NETIF_F(LLTX) | __NETIF_F(VIRTUAL)) +#endif +#endif + +/* https://github.com/ClangBuiltLinux/linux/issues/7 */ +#if defined( __clang__) && (!defined(CONFIG_CLANG_VERSION) || CONFIG_CLANG_VERSION < 80000) +#include +#undef BUILD_BUG_ON +#define BUILD_BUG_ON(x) +#endif + +/* PaX compatibility */ +#ifdef CONSTIFY_PLUGIN +#include +#undef __read_mostly +#define __read_mostly +#endif +#if (defined(CONFIG_PAX) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#include +#define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer) +#define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer) +#define wg_expired_new_handshake(a) wg_expired_new_handshake(unsigned long timer) +#define wg_expired_zero_key_material(a) wg_expired_zero_key_material(unsigned long timer) +#define wg_expired_send_persistent_keepalive(a) wg_expired_send_persistent_keepalive(unsigned long timer) +#undef timer_setup +#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a)) +#undef from_timer +#define from_timer(var, callback_timer, timer_fieldname) container_of((struct timer_list *)callback_timer, typeof(*var), timer_fieldname) +#endif + +#endif /* _WG_COMPAT_H */ diff --git a/net/wireguard/compat/dst_cache/dst_cache.c b/net/wireguard/compat/dst_cache/dst_cache.c new file mode 100644 index 000000000000..f74c43c550eb --- /dev/null +++ b/net/wireguard/compat/dst_cache/dst_cache.c @@ -0,0 +1,177 @@ +/* + * net/core/dst_cache.c - dst entry cache + * + * Copyright (c) 2016 Paolo Abeni + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_IPV6) +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 50) +static inline u32 rt6_get_cookie(const struct rt6_info *rt) +{ + if ((unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from)) + rt = (struct rt6_info *)(rt->dst.from); + + return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; +} +#endif +#endif +#include + +#ifndef COMPAT_HAS_DEFINED_DST_CACHE_PCPU +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; +#endif + +static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, + struct dst_entry *dst, u32 cookie) +{ + dst_release(dst_cache->dst); + if (dst) + dst_hold(dst); + + dst_cache->cookie = cookie; + dst_cache->dst = dst; +} + +static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache, + struct dst_cache_pcpu *idst) +{ + struct dst_entry *dst; + + dst = idst->dst; + if (!dst) + goto fail; + + /* the cache already hold a dst reference; it can't go away */ + dst_hold(dst); + + if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) || + (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) { + dst_cache_per_cpu_dst_set(idst, NULL, 0); + dst_release(dst); + goto fail; + } + return dst; + +fail: + idst->refresh_ts = jiffies; + return NULL; +} + +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache) +{ + if (!dst_cache->cache) + return NULL; + + return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); +} + +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in_saddr.s_addr; + return container_of(dst, struct rtable, dst); +} + +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(idst, dst, 0); + idst->in_saddr.s_addr = saddr; +} + +#if IS_ENABLED(CONFIG_IPV6) +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst, + rt6_get_cookie((struct rt6_info *)dst)); + idst->in6_saddr = *addr; +} + +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in6_saddr; + return dst; +} +#endif + +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) + BUG_ON(gfp & GFP_ATOMIC); + dst_cache->cache = alloc_percpu(struct dst_cache_pcpu); +#else + dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu, + gfp | __GFP_ZERO); +#endif + if (!dst_cache->cache) + return -ENOMEM; + + dst_cache_reset(dst_cache); + return 0; +} + +void dst_cache_destroy(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + for_each_possible_cpu(i) + dst_release(per_cpu_ptr(dst_cache->cache, i)->dst); + + free_percpu(dst_cache->cache); +} diff --git a/net/wireguard/compat/dst_cache/include/net/dst_cache.h b/net/wireguard/compat/dst_cache/include/net/dst_cache.h new file mode 100644 index 000000000000..48021c0d6be1 --- /dev/null +++ b/net/wireguard/compat/dst_cache/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _WG_NET_DST_CACHE_H +#define _WG_NET_DST_CACHE_H + +#include +#include +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unused. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif /* _WG_NET_DST_CACHE_H */ diff --git a/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h b/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h new file mode 100644 index 000000000000..995094d4f099 --- /dev/null +++ b/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h @@ -0,0 +1,3 @@ +#ifndef skb_valid_dst +#define skb_valid_dst(skb) (!!skb_dst(skb)) +#endif diff --git a/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h b/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h new file mode 100644 index 000000000000..f3f9117bcb88 --- /dev/null +++ b/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h @@ -0,0 +1 @@ +#include diff --git a/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h b/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h new file mode 100644 index 000000000000..35a6bc4da8ad --- /dev/null +++ b/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INTEL_FAMILY_H +#define _ASM_X86_INTEL_FAMILY_H + +/* + * "Big Core" Processors (Branded as Core, Xeon, etc...) + * + * The "_X" parts are generally the EP and EX Xeons, or the + * "Extreme" ones, like Broadwell-E. + * + * Things ending in "2" are usually because we have no better + * name for them. There's no processor called "SILVERMONT2". + */ + +#define INTEL_FAM6_CORE_YONAH 0x0E + +#define INTEL_FAM6_CORE2_MEROM 0x0F +#define INTEL_FAM6_CORE2_MEROM_L 0x16 +#define INTEL_FAM6_CORE2_PENRYN 0x17 +#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D + +#define INTEL_FAM6_NEHALEM 0x1E +#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ +#define INTEL_FAM6_NEHALEM_EP 0x1A +#define INTEL_FAM6_NEHALEM_EX 0x2E + +#define INTEL_FAM6_WESTMERE 0x25 +#define INTEL_FAM6_WESTMERE_EP 0x2C +#define INTEL_FAM6_WESTMERE_EX 0x2F + +#define INTEL_FAM6_SANDYBRIDGE 0x2A +#define INTEL_FAM6_SANDYBRIDGE_X 0x2D +#define INTEL_FAM6_IVYBRIDGE 0x3A +#define INTEL_FAM6_IVYBRIDGE_X 0x3E + +#define INTEL_FAM6_HASWELL_CORE 0x3C +#define INTEL_FAM6_HASWELL_X 0x3F +#define INTEL_FAM6_HASWELL_ULT 0x45 +#define INTEL_FAM6_HASWELL_GT3E 0x46 + +#define INTEL_FAM6_BROADWELL_CORE 0x3D +#define INTEL_FAM6_BROADWELL_GT3E 0x47 +#define INTEL_FAM6_BROADWELL_X 0x4F +#define INTEL_FAM6_BROADWELL_XEON_D 0x56 + +#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E +#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E +#define INTEL_FAM6_SKYLAKE_X 0x55 +#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E +#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E + +/* "Small Core" Processors (Atom) */ + +#define INTEL_FAM6_ATOM_PINEVIEW 0x1C +#define INTEL_FAM6_ATOM_LINCROFT 0x26 +#define INTEL_FAM6_ATOM_PENWELL 0x27 +#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 +#define INTEL_FAM6_ATOM_CEDARVIEW 0x36 +#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ +#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ +#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ +#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ +#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ +#define INTEL_FAM6_ATOM_GOLDMONT 0x5C +#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ +#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A + +/* Xeon Phi */ + +#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ +#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ + +#endif /* _ASM_X86_INTEL_FAMILY_H */ diff --git a/net/wireguard/compat/memneq/include.h b/net/wireguard/compat/memneq/include.h new file mode 100644 index 000000000000..2d18acd9b6c8 --- /dev/null +++ b/net/wireguard/compat/memneq/include.h @@ -0,0 +1,5 @@ +extern noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} diff --git a/net/wireguard/compat/memneq/memneq.c b/net/wireguard/compat/memneq/memneq.c new file mode 100644 index 000000000000..1c427d405537 --- /dev/null +++ b/net/wireguard/compat/memneq/memneq.c @@ -0,0 +1,170 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan + * Daniel Borkmann + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define COMPILER_OPTIMIZER_HIDE_VAR(var) asm("" : "=r" (var) : "0" (var)) + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + COMPILER_OPTIMIZER_HIDE_VAR(neq); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + COMPILER_OPTIMIZER_HIDE_VAR(neq); + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ + unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) { + neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } else if (sizeof(unsigned int) == 4) { + neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } + + return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/net/wireguard/compat/neon-arm/include/asm/neon.h b/net/wireguard/compat/neon-arm/include/asm/neon.h new file mode 100644 index 000000000000..980d831e201a --- /dev/null +++ b/net/wireguard/compat/neon-arm/include/asm/neon.h @@ -0,0 +1,7 @@ +#ifndef _ARCH_ARM_ASM_NEON +#define _ARCH_ARM_ASM_NEON +#define kernel_neon_begin() \ + BUILD_BUG_ON_MSG(1, "This kernel does not support ARM NEON") +#define kernel_neon_end() \ + BUILD_BUG_ON_MSG(1, "This kernel does not support ARM NEON") +#endif diff --git a/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h b/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h new file mode 100644 index 000000000000..9d514bac1388 --- /dev/null +++ b/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Definitions for the 'struct ptr_ring' datastructure. + * + * Author: + * Michael S. Tsirkin + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This is a limited-size FIFO maintaining pointers in FIFO order, with + * one CPU producing entries and another consuming entries from a FIFO. + * + * This implementation tries to minimize cache-contention when there is a + * single producer and a single consumer CPU. + */ + +#ifndef _LINUX_PTR_RING_H +#define _LINUX_PTR_RING_H 1 + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include +#include +#endif + +struct ptr_ring { + int producer ____cacheline_aligned_in_smp; + spinlock_t producer_lock; + int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ + int consumer_tail; /* next entry to invalidate */ + spinlock_t consumer_lock; + /* Shared consumer/producer data */ + /* Read-only by both the producer and the consumer */ + int size ____cacheline_aligned_in_smp; /* max entries in queue */ + int batch; /* number of entries to consume in a batch */ + void **queue; +}; + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + * + * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: + * see e.g. ptr_ring_full. + */ +static inline bool __ptr_ring_full(struct ptr_ring *r) +{ + return r->queue[r->producer]; +} + +static inline bool ptr_ring_full(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_full(r); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_full_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data. + */ +static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + + WRITE_ONCE(r->queue[r->producer++], ptr); + if (unlikely(r->producer >= r->size)) + r->producer = 0; + return 0; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ +static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +static inline void *__ptr_ring_peek(struct ptr_ring *r) +{ + if (likely(r->size)) + return READ_ONCE(r->queue[r->consumer_head]); + return NULL; +} + +/* + * Test ring empty status without taking any locks. + * + * NB: This is only safe to call if ring is never resized. + * + * However, if some other CPU consumes ring entries at the same time, the value + * returned is not guaranteed to be correct. + * + * In this case - to avoid incorrectly detecting the ring + * as empty - the CPU consuming the ring entries is responsible + * for either consuming all ring entries until the ring is empty, + * or synchronizing with some other CPU and causing it to + * re-test __ptr_ring_empty and/or consume the ring enteries + * after the synchronization point. + * + * Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + */ +static inline bool __ptr_ring_empty(struct ptr_ring *r) +{ + if (likely(r->size)) + return !r->queue[READ_ONCE(r->consumer_head)]; + return true; +} + +static inline bool ptr_ring_empty(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_empty(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_empty_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r) +{ + /* Fundamentally, what we want to do is update consumer + * index and zero out the entry so producer can reuse it. + * Doing it naively at each consume would be as simple as: + * consumer = r->consumer; + * r->queue[consumer++] = NULL; + * if (unlikely(consumer >= r->size)) + * consumer = 0; + * r->consumer = consumer; + * but that is suboptimal when the ring is full as producer is writing + * out new entries in the same cache line. Defer these updates until a + * batch of entries has been consumed. + */ + /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty + * to work correctly. + */ + int consumer_head = r->consumer_head; + int head = consumer_head++; + + /* Once we have processed enough entries invalidate them in + * the ring all at once so producer can reuse their space in the ring. + * We also do this when we reach end of the ring - not mandatory + * but helps keep the implementation simple. + */ + if (unlikely(consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size)) { + /* Zero out entries in the reverse order: this way we touch the + * cache line that producer might currently be reading the last; + * producer won't make progress and touch other cache lines + * besides the first one until we write out all entries. + */ + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = consumer_head; + } + if (unlikely(consumer_head >= r->size)) { + consumer_head = 0; + r->consumer_tail = 0; + } + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, consumer_head); +} + +static inline void *__ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + ptr = __ptr_ring_peek(r); + if (ptr) + __ptr_ring_discard_one(r); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + /* The READ_ONCE in __ptr_ring_peek doesn't imply a barrier on old kernels. */ + smp_read_barrier_depends(); +#endif + + return ptr; +} + +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ +static inline void *ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + spin_lock(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_irq(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_irq(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_irq(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_any(struct ptr_ring *r) +{ + unsigned long flags; + void *ptr; + + spin_lock_irqsave(&r->consumer_lock, flags); + ptr = __ptr_ring_consume(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ptr; +} + +static inline void *ptr_ring_consume_bh(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_bh(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_bh(&r->consumer_lock); + + return ptr; +} + +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Cast to structure type and call a function without discarding from FIFO. + * Function must return a value. + * Callers must take consumer_lock. + */ +#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) + +#define PTR_RING_PEEK_CALL(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + unsigned long __PTR_RING_PEEK_CALL_f;\ + \ + spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See + * documentation for vmalloc for which of them are legal. + */ +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +{ + if (size > KMALLOC_MAX_SIZE / sizeof(void *)) + return NULL; + return kvmalloc(size * sizeof(void *), gfp | __GFP_ZERO); +} + +static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) +{ + r->size = size; + r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); + /* We need to set batch at least to 1 to make logic + * in __ptr_ring_discard_one work correctly. + * Batching too much (because ring is small) would cause a lot of + * burstiness. Needs tuning, for now disable batching. + */ + if (r->batch > r->size / 2 || !r->batch) + r->batch = 1; +} + +static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +{ + r->queue = __ptr_ring_init_queue_alloc(size, gfp); + if (!r->queue) + return -ENOMEM; + + __ptr_ring_set_size(r, size); + r->producer = r->consumer_head = r->consumer_tail = 0; + spin_lock_init(&r->producer_lock); + spin_lock_init(&r->consumer_lock); + + return 0; +} + +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, head); + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + +static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, + int size, gfp_t gfp, + void (*destroy)(void *)) +{ + int producer = 0; + void **old; + void *ptr; + + while ((ptr = __ptr_ring_consume(r))) + if (producer < size) + queue[producer++] = ptr; + else if (destroy) + destroy(ptr); + + if (producer >= size) + producer = 0; + __ptr_ring_set_size(r, size); + r->producer = producer; + r->consumer_head = 0; + r->consumer_tail = 0; + old = r->queue; + r->queue = queue; + + return old; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + void (*destroy)(void *)) +{ + unsigned long flags; + void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **old; + + if (!queue) + return -ENOMEM; + + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); + + old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); + + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); + + kvfree(old); + + return 0; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, + unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) +{ + unsigned long flags; + void ***queues; + int i; + + queues = kmalloc_array(nrings, sizeof(*queues), gfp); + if (!queues) + goto noqueues; + + for (i = 0; i < nrings; ++i) { + queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + if (!queues[i]) + goto nomem; + } + + for (i = 0; i < nrings; ++i) { + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); + queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], + size, gfp, destroy); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); + } + + for (i = 0; i < nrings; ++i) + kvfree(queues[i]); + + kfree(queues); + + return 0; + +nomem: + while (--i >= 0) + kvfree(queues[i]); + + kfree(queues); + +noqueues: + return -ENOMEM; +} + +static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) +{ + void *ptr; + + if (destroy) + while ((ptr = ptr_ring_consume(r))) + destroy(ptr); + kvfree(r->queue); +} + +#endif /* _LINUX_PTR_RING_H */ diff --git a/net/wireguard/compat/simd-asm/include/asm/simd.h b/net/wireguard/compat/simd-asm/include/asm/simd.h new file mode 100644 index 000000000000..a975b38b5578 --- /dev/null +++ b/net/wireguard/compat/simd-asm/include/asm/simd.h @@ -0,0 +1,21 @@ +#ifndef _COMPAT_ASM_SIMD_H +#define _COMPAT_ASM_SIMD_H + +#if defined(CONFIG_X86_64) +#include +#endif + +static __must_check inline bool may_use_simd(void) +{ +#if defined(CONFIG_X86_64) + return irq_fpu_usable(); +#elif defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON) + return true; +#elif defined(CONFIG_ARM) && defined(CONFIG_KERNEL_MODE_NEON) + return !in_nmi() && !in_irq() && !in_serving_softirq(); +#else + return false; +#endif +} + +#endif diff --git a/net/wireguard/compat/simd/include/linux/simd.h b/net/wireguard/compat/simd/include/linux/simd.h new file mode 100644 index 000000000000..e7f2550320c7 --- /dev/null +++ b/net/wireguard/compat/simd/include/linux/simd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_SIMD_H +#define _WG_SIMD_H + +#include +#include +#if defined(CONFIG_X86_64) +#include +#elif defined(CONFIG_KERNEL_MODE_NEON) +#include +#endif + +typedef enum { + HAVE_NO_SIMD = 1 << 0, + HAVE_FULL_SIMD = 1 << 1, + HAVE_SIMD_IN_USE = 1 << 31 +} simd_context_t; + +#define DONT_USE_SIMD ((simd_context_t []){ HAVE_NO_SIMD }) + +static inline void simd_get(simd_context_t *ctx) +{ + *ctx = !IS_ENABLED(CONFIG_PREEMPT_RT) && !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && may_use_simd() ? HAVE_FULL_SIMD : HAVE_NO_SIMD; +} + +static inline void simd_put(simd_context_t *ctx) +{ +#if defined(CONFIG_X86_64) + if (*ctx & HAVE_SIMD_IN_USE) + kernel_fpu_end(); +#elif defined(CONFIG_KERNEL_MODE_NEON) + if (*ctx & HAVE_SIMD_IN_USE) + kernel_neon_end(); +#endif + *ctx = HAVE_NO_SIMD; +} + +static inline bool simd_relax(simd_context_t *ctx) +{ +#ifdef CONFIG_PREEMPT + if ((*ctx & HAVE_SIMD_IN_USE) && need_resched()) { + simd_put(ctx); + simd_get(ctx); + return true; + } +#endif + return false; +} + +static __must_check inline bool simd_use(simd_context_t *ctx) +{ + if (!(*ctx & HAVE_FULL_SIMD)) + return false; + if (*ctx & HAVE_SIMD_IN_USE) + return true; +#if defined(CONFIG_X86_64) + kernel_fpu_begin(); +#elif defined(CONFIG_KERNEL_MODE_NEON) + kernel_neon_begin(); +#endif + *ctx |= HAVE_SIMD_IN_USE; + return true; +} + +#endif /* _WG_SIMD_H */ diff --git a/net/wireguard/compat/siphash/include/linux/siphash.h b/net/wireguard/compat/siphash/include/linux/siphash.h new file mode 100644 index 000000000000..3b30b3c47778 --- /dev/null +++ b/net/wireguard/compat/siphash/include/linux/siphash.h @@ -0,0 +1,134 @@ +/* Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#ifndef _WG_LINUX_SIPHASH_H +#define _WG_LINUX_SIPHASH_H + +#include +#include + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); + return ___siphash_aligned(data, len, key); +} + +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); + return ___hsiphash_aligned(data, len, key); +} + +#endif /* _WG_LINUX_SIPHASH_H */ diff --git a/net/wireguard/compat/siphash/siphash.c b/net/wireguard/compat/siphash/siphash.c new file mode 100644 index 000000000000..7dc72cb4a710 --- /dev/null +++ b/net/wireguard/compat/siphash/siphash.c @@ -0,0 +1,539 @@ +/* Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#ifdef __LITTLE_ENDIAN +#define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) +#else +#define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) +#endif +#endif + +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 +#include +#include +#endif + +#define SIPROUND \ + do { \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ + } while (0) + +#define PREAMBLE(len) \ + u64 v0 = 0x736f6d6570736575ULL; \ + u64 v1 = 0x646f72616e646f6dULL; \ + u64 v2 = 0x6c7967656e657261ULL; \ + u64 v3 = 0x7465646279746573ULL; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +#endif + +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} + +/** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 + * @first: first u64 + * @key: the siphash key + */ +u64 siphash_1u64(const u64 first, const siphash_key_t *key) +{ + PREAMBLE(8) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + POSTAMBLE +} + +/** + * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 + * @first: first u64 + * @second: second u64 + * @key: the siphash key + */ +u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} + +/** + * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @key: the siphash key + */ +u64 siphash_3u64(const u64 first, const u64 second, const u64 third, + const siphash_key_t *key) +{ + PREAMBLE(24) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + POSTAMBLE +} + +/** + * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @forth: forth u64 + * @key: the siphash key + */ +u64 siphash_4u64(const u64 first, const u64 second, const u64 third, + const u64 forth, const siphash_key_t *key) +{ + PREAMBLE(32) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + v3 ^= forth; + SIPROUND; + SIPROUND; + v0 ^= forth; + POSTAMBLE +} + +u64 siphash_1u32(const u32 first, const siphash_key_t *key) +{ + PREAMBLE(4) + b |= first; + POSTAMBLE +} + +u64 siphash_3u32(const u32 first, const u32 second, const u32 third, + const siphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + PREAMBLE(12) + v3 ^= combined; + SIPROUND; + SIPROUND; + v0 ^= combined; + b |= third; + POSTAMBLE +} + +#if BITS_PER_LONG == 64 +/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for + * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. + */ + +#define HSIPROUND SIPROUND +#define HPREAMBLE(len) PREAMBLE(len) +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +#endif + +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} + +/** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + b |= first; + HPOSTAMBLE +} + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(8) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(12) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + b |= third; + HPOSTAMBLE +} + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(16) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + combined = (u64)forth << 32 | third; + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +#else +#define HSIPROUND \ + do { \ + v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ + v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ + v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ + v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ + } while (0) + +#define HPREAMBLE(len) \ + u32 v0 = 0; \ + u32 v1 = 0; \ + u32 v2 = 0x6c796765U; \ + u32 v3 = 0x74656462U; \ + u32 b = ((u32)(len)) << 24; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return v1 ^ v3; + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = le32_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; fallthrough; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +#endif + +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = get_unaligned_le32(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; fallthrough; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} + +/** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + v3 ^= first; + HSIPROUND; + v0 ^= first; + HPOSTAMBLE +} + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + HPREAMBLE(8) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + HPOSTAMBLE +} + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + HPREAMBLE(12) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + HPOSTAMBLE +} + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + HPREAMBLE(16) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + v3 ^= forth; + HSIPROUND; + v0 ^= forth; + HPOSTAMBLE +} +#endif diff --git a/net/wireguard/compat/skb_array/include/linux/skb_array.h b/net/wireguard/compat/skb_array/include/linux/skb_array.h new file mode 100644 index 000000000000..c91fedcdbfc6 --- /dev/null +++ b/net/wireguard/compat/skb_array/include/linux/skb_array.h @@ -0,0 +1,11 @@ +#ifndef _WG_SKB_ARRAY_H +#define _WG_SKB_ARRAY_H + +#include + +static void __skb_array_destroy_skb(void *ptr) +{ + kfree_skb(ptr); +} + +#endif diff --git a/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h b/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h new file mode 100644 index 000000000000..8999527d6952 --- /dev/null +++ b/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h @@ -0,0 +1,94 @@ +#ifndef _WG_NET_UDP_TUNNEL_H +#define _WG_NET_UDP_TUNNEL_H + +#include +#include + +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#endif + +struct udp_port_cfg { + u8 family; + + /* Used only for kernel-created sockets */ + union { + struct in_addr local_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr local_ip6; +#endif + }; + + union { + struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr peer_ip6; +#endif + }; + + __be16 local_udp_port; + __be16 peer_udp_port; + unsigned int use_udp_checksums:1, + use_udp6_tx_checksums:1, + use_udp6_rx_checksums:1, + ipv6_v6only:1; +}; + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); +#else +static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + return 0; +} +#endif + +static inline int udp_sock_create(struct net *net, + struct udp_port_cfg *cfg, + struct socket **sockp) +{ + if (cfg->family == AF_INET) + return udp_sock_create4(net, cfg, sockp); + + if (cfg->family == AF_INET6) + return udp_sock_create6(net, cfg, sockp); + + return -EPFNOSUPPORT; +} + +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); + +struct udp_tunnel_sock_cfg { + void *sk_user_data; + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; +}; + +/* Setup the given (UDP) sock to receive UDP encapsulated packets */ +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *sock_cfg); + +/* Transmit the skb using UDP encapsulation. */ +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck); +#endif + +void udp_tunnel_sock_release(struct socket *sock); + +#endif /* _WG_NET_UDP_TUNNEL_H */ diff --git a/net/wireguard/compat/udp_tunnel/udp_tunnel.c b/net/wireguard/compat/udp_tunnel/udp_tunnel.c new file mode 100644 index 000000000000..d287b917be84 --- /dev/null +++ b/net/wireguard/compat/udp_tunnel/udp_tunnel.c @@ -0,0 +1,396 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) +#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) +#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) +#endif + +/* This is global so, uh, only one real call site... This is the kind of horrific hack you'd expect to see in compat code. */ +static udp_tunnel_encap_rcv_t encap_rcv = NULL; +static void __compat_sk_data_ready(struct sock *sk +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) + ,int unused_vulnerable_length_param +#endif + ) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + skb_orphan(skb); + sk_mem_reclaim(sk); + encap_rcv(sk, skb); + } +} + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + int err; + struct socket *sock = NULL; + struct sockaddr_in udp_addr; + + err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + sk_change_net(sock->sk, net); + + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = cfg->peer_udp_port; + err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr), 0); + if (err < 0) + goto error; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) + sock->sk->sk_no_check = !cfg->use_udp_checksums; +#else + sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; +#endif + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); + } + *sockp = NULL; + return err; +} + +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + inet_sk(sock->sk)->mc_loop = 0; + encap_rcv = cfg->encap_rcv; + rcu_assign_sk_user_data(sock->sk, cfg->sk_user_data); + /* We force the cast in this awful way, due to various Android kernels + * backporting things stupidly. */ + *(void **)&sock->sk->sk_data_ready = (void *)__compat_sk_data_ready; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) +static inline __sum16 udp_v4_check(int len, __be32 saddr, + __be32 daddr, __wsum base) +{ + return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); +} + +static void udp_set_csum(bool nocheck, struct sk_buff *skb, + __be32 saddr, __be32 daddr, int len) +{ + struct udphdr *uh = udp_hdr(skb); + + if (nocheck) + uh->check = 0; + else if (skb_is_gso(skb)) + uh->check = ~udp_v4_check(len, saddr, daddr, 0); + else if (skb_dst(skb) && skb_dst(skb)->dev && + (skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~udp_v4_check(len, saddr, daddr, 0); + } else { + __wsum csum; + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + uh->check = 0; + csum = skb_checksum(skb, 0, len, 0); + uh->check = udp_v4_check(len, saddr, daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +#endif + +static void __compat_fake_destructor(struct sk_buff *skb) +{ +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +static void __compat_iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 proto, + __u8 tos, __u8 ttl, __be16 df, bool xnet) +{ + struct iphdr *iph; + struct pcpu_tstats *tstats = this_cpu_ptr(skb->dev->tstats); + + skb_scrub_packet(skb, xnet); + + skb->rxhash = 0; + skb_dst_set(skb, &rt->dst); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + + /* Push down and install the IP header. */ + skb_push(skb, sizeof(struct iphdr)); + skb_reset_network_header(skb); + + iph = ip_hdr(skb); + + iph->version = 4; + iph->ihl = sizeof(struct iphdr) >> 2; + iph->frag_off = df; + iph->protocol = proto; + iph->tos = tos; + iph->daddr = dst; + iph->saddr = src; + iph->ttl = ttl; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 53) + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); +#else + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); +#endif + + iptunnel_xmit(skb, skb->dev); + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes -= 8; + u64_stats_update_end(&tstats->syncp); +} +#define iptunnel_xmit __compat_iptunnel_xmit +#endif + +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck) +{ + struct udphdr *uh; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + struct net_device *dev = skb->dev; + int ret; +#endif + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + + udp_set_csum(nocheck, skb, src, dst, skb->len); + + if (!skb->sk) + skb->sk = sk; + if (!skb->destructor) + skb->destructor = __compat_fake_destructor; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + ret = +#endif + iptunnel_xmit( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + sk, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + dev_net(dev), +#endif + rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) + , xnet +#endif + ); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + if (ret) + iptunnel_xmit_stats(ret - 8, &dev->stats, dev->tstats); +#endif +} + +void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} + +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + struct sockaddr_in6 udp6_addr; + int err; + struct socket *sock = NULL; + + err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + sk_change_net(sock->sk, net); + + if (cfg->ipv6_v6only) { + int val = 1; + + err = kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, + (char *) &val, sizeof(val)); + if (err < 0) + goto error; + } + + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->peer_udp_port; + err = kernel_connect(sock, + (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr), 0); + } + if (err < 0) + goto error; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) + sock->sk->sk_no_check = !cfg->use_udp_checksums; +#else + udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); + udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); +#endif + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); + } + *sockp = NULL; + return err; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) +static inline __sum16 udp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base); +} +static void udp6_set_csum(bool nocheck, struct sk_buff *skb, + const struct in6_addr *saddr, + const struct in6_addr *daddr, int len) +{ + struct udphdr *uh = udp_hdr(skb); + + if (nocheck) + uh->check = 0; + else if (skb_is_gso(skb)) + uh->check = ~udp_v6_check(len, saddr, daddr, 0); + else if (skb_dst(skb) && skb_dst(skb)->dev && + (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~udp_v6_check(len, saddr, daddr, 0); + } else { + __wsum csum; + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + uh->check = 0; + csum = skb_checksum(skb, 0, len, 0); + uh->check = udp_v6_check(len, saddr, daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} +#endif + +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + + skb_dst_set(skb, dst); + + udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); + + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, label); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + if (!skb->sk) + skb->sk = sk; + if (!skb->destructor) + skb->destructor = __compat_fake_destructor; + + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif diff --git a/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h b/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h new file mode 100644 index 000000000000..0605896e902f --- /dev/null +++ b/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) +#define udp_sock_create4 udp_sock_create +#define udp_sock_create6 udp_sock_create +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#include +#include +#include +#endif +static inline void __compat_fake_destructor(struct sk_buff *skb) +{ +} +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +struct udp_tunnel_sock_cfg { + void *sk_user_data; + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; +}; +/* This is global so, uh, only one real call site... This is the kind of horrific hack you'd expect to see in compat code. */ +static udp_tunnel_encap_rcv_t encap_rcv = NULL; +static void __compat_sk_data_ready(struct sock *sk) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + skb_orphan(skb); + sk_mem_reclaim(sk); + encap_rcv(sk, skb); + } +} +static inline void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + struct sock *sk = sock->sk; + inet_sk(sk)->mc_loop = 0; + encap_rcv = cfg->encap_rcv; + rcu_assign_sk_user_data(sk, cfg->sk_user_data); + sk->sk_data_ready = __compat_sk_data_ready; +} +static inline void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} +static inline int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet) +{ + struct udphdr *uh; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len); + return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP, + tos, ttl, df, xnet); +} +#if IS_ENABLED(CONFIG_IPV6) +static inline int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, + __be16 dst_port) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + struct sock *sk = sock->sk; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED + | IPSKB_REROUTED); + skb_dst_set(skb, dst); + udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, + &sk->sk_v6_daddr, skb->len); + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, htonl(0)); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include +#include +#include +#include +#include +#include +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__; ret__ = udp_tunnel_xmit_skb((b)->sk_socket, a, c, d, e, f, g, h, i, j, k); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#if IS_ENABLED(CONFIG_IPV6) +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) udp_tunnel6_xmit_skb((b)->sk_socket, a, c, d, e, f, g, h, j, k); +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) +static inline void __compat_fake_destructor(struct sk_buff *skb) +{ +} +#endif +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__; if (!(c)->destructor) (c)->destructor = __compat_fake_destructor; if (!(c)->sk) (c)->sk = (b); ret__ = udp_tunnel_xmit_skb(a, c, d, e, f, g, h, i, j, k, l); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#if IS_ENABLED(CONFIG_IPV6) +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { if (!(c)->destructor) (c)->destructor = __compat_fake_destructor; if (!(c)->sk) (c)->sk = (b); udp_tunnel6_xmit_skb(a, c, d, e, f, g, h, j, k, l); } while(0) +#endif +#else + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include +#include +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__ = udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) +#include +#include +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__ = udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l); iptunnel_xmit_stats(ret__, &dev__->stats, dev__->tstats); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && IS_ENABLED(CONFIG_IPV6) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include +#include +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, j, k, l) +#endif + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include +#include +#include +struct __compat_udp_port_cfg { + u8 family; + union { + struct in_addr local_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr local_ip6; +#endif + }; + union { + struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr peer_ip6; +#endif + }; + __be16 local_udp_port; + __be16 peer_udp_port; + unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, ipv6_v6only:1; +}; +static inline int __maybe_unused __compat_udp_sock_create(struct net *net, struct __compat_udp_port_cfg *cfg, struct socket **sockp) +{ + struct udp_port_cfg old_cfg = { + .family = cfg->family, + .local_ip = cfg->local_ip, +#if IS_ENABLED(CONFIG_IPV6) + .local_ip6 = cfg->local_ip6, +#endif + .peer_ip = cfg->peer_ip, +#if IS_ENABLED(CONFIG_IPV6) + .peer_ip6 = cfg->peer_ip6, +#endif + .local_udp_port = cfg->local_udp_port, + .peer_udp_port = cfg->peer_udp_port, + .use_udp_checksums = cfg->use_udp_checksums, + .use_udp6_tx_checksums = cfg->use_udp6_tx_checksums, + .use_udp6_rx_checksums = cfg->use_udp6_rx_checksums + }; + if (cfg->family == AF_INET) + return udp_sock_create4(net, &old_cfg, sockp); + +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->family == AF_INET6) { + int ret; + int old_bindv6only; + struct net *nobns; + + if (cfg->ipv6_v6only) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) + nobns = &init_net; +#else + nobns = net; +#endif + /* Since udp_port_cfg only learned of ipv6_v6only in 4.3, we do this horrible + * hack here and set the sysctl variable temporarily to something that will + * set the right option for us in sock_create. It's super racey! */ + old_bindv6only = nobns->ipv6.sysctl.bindv6only; + nobns->ipv6.sysctl.bindv6only = 1; + } + ret = udp_sock_create6(net, &old_cfg, sockp); + if (cfg->ipv6_v6only) + nobns->ipv6.sysctl.bindv6only = old_bindv6only; + return ret; + } +#endif + return -EPFNOSUPPORT; +} +#define udp_port_cfg __compat_udp_port_cfg +#define udp_sock_create(a, b, c) __compat_udp_sock_create(a, b, c) +#endif diff --git a/net/wireguard/compat/version/linux/version.h b/net/wireguard/compat/version/linux/version.h new file mode 100644 index 000000000000..90988b37aed6 --- /dev/null +++ b/net/wireguard/compat/version/linux/version.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2021 Jason A. Donenfeld . All Rights Reserved. + */ + +#include_next +#undef KERNEL_VERSION +#define KERNEL_VERSION(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#undef LINUX_VERSION_CODE +#define LINUX_VERSION_CODE KERNEL_VERSION(COMPAT_VERSION, COMPAT_PATCHLEVEL, COMPAT_SUBLEVEL) diff --git a/net/wireguard/cookie.c b/net/wireguard/cookie.c new file mode 100644 index 000000000000..8b7d1fe0cdf4 --- /dev/null +++ b/net/wireguard/cookie.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "cookie.h" +#include "peer.h" +#include "device.h" +#include "messages.h" +#include "ratelimiter.h" +#include "timers.h" + +#include +#include + +#include +#include + +void wg_cookie_checker_init(struct cookie_checker *checker, + struct wg_device *wg) +{ + init_rwsem(&checker->secret_lock); + checker->secret_birthdate = ktime_get_coarse_boottime_ns(); + get_random_bytes(checker->secret, NOISE_HASH_LEN); + checker->device = wg; +} + +enum { COOKIE_KEY_LABEL_LEN = 8 }; +static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----"; +static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--"; + +static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 pubkey[NOISE_PUBLIC_KEY_LEN], + const u8 label[COOKIE_KEY_LABEL_LEN]) +{ + struct blake2s_state blake; + + blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN); + blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN); + blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN); + blake2s_final(&blake, key); +} + +/* Must hold peer->handshake.static_identity->lock */ +void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker) +{ + if (likely(checker->device->static_identity.has_identity)) { + precompute_key(checker->cookie_encryption_key, + checker->device->static_identity.static_public, + cookie_key_label); + precompute_key(checker->message_mac1_key, + checker->device->static_identity.static_public, + mac1_key_label); + } else { + memset(checker->cookie_encryption_key, 0, + NOISE_SYMMETRIC_KEY_LEN); + memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN); + } +} + +void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer) +{ + precompute_key(peer->latest_cookie.cookie_decryption_key, + peer->handshake.remote_static, cookie_key_label); + precompute_key(peer->latest_cookie.message_mac1_key, + peer->handshake.remote_static, mac1_key_label); +} + +void wg_cookie_init(struct cookie *cookie) +{ + memset(cookie, 0, sizeof(*cookie)); + init_rwsem(&cookie->lock); +} + +static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len, + const u8 key[NOISE_SYMMETRIC_KEY_LEN]) +{ + len = len - sizeof(struct message_macs) + + offsetof(struct message_macs, mac1); + blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN); +} + +static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, + const u8 cookie[COOKIE_LEN]) +{ + len = len - sizeof(struct message_macs) + + offsetof(struct message_macs, mac2); + blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN); +} + +static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, + struct cookie_checker *checker) +{ + struct blake2s_state state; + + if (wg_birthdate_has_expired(checker->secret_birthdate, + COOKIE_SECRET_MAX_AGE)) { + down_write(&checker->secret_lock); + checker->secret_birthdate = ktime_get_coarse_boottime_ns(); + get_random_bytes(checker->secret, NOISE_HASH_LEN); + up_write(&checker->secret_lock); + } + + down_read(&checker->secret_lock); + + blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); + if (skb->protocol == htons(ETH_P_IP)) + blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr, + sizeof(struct in_addr)); + else if (skb->protocol == htons(ETH_P_IPV6)) + blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); + blake2s_final(&state, cookie); + + up_read(&checker->secret_lock); +} + +enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, + struct sk_buff *skb, + bool check_cookie) +{ + struct message_macs *macs = (struct message_macs *) + (skb->data + skb->len - sizeof(*macs)); + enum cookie_mac_state ret; + u8 computed_mac[COOKIE_LEN]; + u8 cookie[COOKIE_LEN]; + + ret = INVALID_MAC; + compute_mac1(computed_mac, skb->data, skb->len, + checker->message_mac1_key); + if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN)) + goto out; + + ret = VALID_MAC_BUT_NO_COOKIE; + + if (!check_cookie) + goto out; + + make_cookie(cookie, skb, checker); + + compute_mac2(computed_mac, skb->data, skb->len, cookie); + if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN)) + goto out; + + ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED; + if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev))) + goto out; + + ret = VALID_MAC_WITH_COOKIE; + +out: + return ret; +} + +void wg_cookie_add_mac_to_packet(void *message, size_t len, + struct wg_peer *peer) +{ + struct message_macs *macs = (struct message_macs *) + ((u8 *)message + len - sizeof(*macs)); + + down_write(&peer->latest_cookie.lock); + compute_mac1(macs->mac1, message, len, + peer->latest_cookie.message_mac1_key); + memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN); + peer->latest_cookie.have_sent_mac1 = true; + up_write(&peer->latest_cookie.lock); + + down_read(&peer->latest_cookie.lock); + if (peer->latest_cookie.is_valid && + !wg_birthdate_has_expired(peer->latest_cookie.birthdate, + COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY)) + compute_mac2(macs->mac2, message, len, + peer->latest_cookie.cookie); + else + memset(macs->mac2, 0, COOKIE_LEN); + up_read(&peer->latest_cookie.lock); +} + +void wg_cookie_message_create(struct message_handshake_cookie *dst, + struct sk_buff *skb, __le32 index, + struct cookie_checker *checker) +{ + struct message_macs *macs = (struct message_macs *) + ((u8 *)skb->data + skb->len - sizeof(*macs)); + u8 cookie[COOKIE_LEN]; + + dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE); + dst->receiver_index = index; + get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN); + + make_cookie(cookie, skb, checker); + xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN, + macs->mac1, COOKIE_LEN, dst->nonce, + checker->cookie_encryption_key); +} + +void wg_cookie_message_consume(struct message_handshake_cookie *src, + struct wg_device *wg) +{ + struct wg_peer *peer = NULL; + u8 cookie[COOKIE_LEN]; + bool ret; + + if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable, + INDEX_HASHTABLE_HANDSHAKE | + INDEX_HASHTABLE_KEYPAIR, + src->receiver_index, &peer))) + return; + + down_read(&peer->latest_cookie.lock); + if (unlikely(!peer->latest_cookie.have_sent_mac1)) { + up_read(&peer->latest_cookie.lock); + goto out; + } + ret = xchacha20poly1305_decrypt( + cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie), + peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce, + peer->latest_cookie.cookie_decryption_key); + up_read(&peer->latest_cookie.lock); + + if (ret) { + down_write(&peer->latest_cookie.lock); + memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN); + peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns(); + peer->latest_cookie.is_valid = true; + peer->latest_cookie.have_sent_mac1 = false; + up_write(&peer->latest_cookie.lock); + } else { + net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n", + wg->dev->name); + } + +out: + wg_peer_put(peer); +} diff --git a/net/wireguard/cookie.h b/net/wireguard/cookie.h new file mode 100644 index 000000000000..c4bd61ca03f2 --- /dev/null +++ b/net/wireguard/cookie.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_COOKIE_H +#define _WG_COOKIE_H + +#include "messages.h" +#include + +struct wg_peer; + +struct cookie_checker { + u8 secret[NOISE_HASH_LEN]; + u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN]; + u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; + u64 secret_birthdate; + struct rw_semaphore secret_lock; + struct wg_device *device; +}; + +struct cookie { + u64 birthdate; + bool is_valid; + u8 cookie[COOKIE_LEN]; + bool have_sent_mac1; + u8 last_mac1_sent[COOKIE_LEN]; + u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN]; + u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; + struct rw_semaphore lock; +}; + +enum cookie_mac_state { + INVALID_MAC, + VALID_MAC_BUT_NO_COOKIE, + VALID_MAC_WITH_COOKIE_BUT_RATELIMITED, + VALID_MAC_WITH_COOKIE +}; + +void wg_cookie_checker_init(struct cookie_checker *checker, + struct wg_device *wg); +void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker); +void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer); +void wg_cookie_init(struct cookie *cookie); + +enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, + struct sk_buff *skb, + bool check_cookie); +void wg_cookie_add_mac_to_packet(void *message, size_t len, + struct wg_peer *peer); + +void wg_cookie_message_create(struct message_handshake_cookie *src, + struct sk_buff *skb, __le32 index, + struct cookie_checker *checker); +void wg_cookie_message_consume(struct message_handshake_cookie *src, + struct wg_device *wg); + +#endif /* _WG_COOKIE_H */ diff --git a/net/wireguard/crypto/Makefile.include b/net/wireguard/crypto/Makefile.include new file mode 100644 index 000000000000..f2a312e96d88 --- /dev/null +++ b/net/wireguard/crypto/Makefile.include @@ -0,0 +1,57 @@ +ifeq ($(CONFIG_X86_64)$(if $(CONFIG_UML),y,n),yn) +CONFIG_ZINC_ARCH_X86_64 := y +endif +ifeq ($(CONFIG_ARM)$(if $(CONFIG_CPU_32v3),y,n),yn) +CONFIG_ZINC_ARCH_ARM := y +endif +ifeq ($(CONFIG_ARM64),y) +CONFIG_ZINC_ARCH_ARM64 := y +endif +ifeq ($(CONFIG_MIPS)$(CONFIG_CPU_MIPS32_R2),yy) +CONFIG_ZINC_ARCH_MIPS := y +endif +ifeq ($(CONFIG_MIPS)$(CONFIG_64BIT),yy) +CONFIG_ZINC_ARCH_MIPS64 := y +endif + +zinc-y += chacha20/chacha20.o +zinc-$(CONFIG_ZINC_ARCH_X86_64) += chacha20/chacha20-x86_64.o +zinc-$(CONFIG_ZINC_ARCH_ARM) += chacha20/chacha20-arm.o chacha20/chacha20-unrolled-arm.o +zinc-$(CONFIG_ZINC_ARCH_ARM64) += chacha20/chacha20-arm64.o +zinc-$(CONFIG_ZINC_ARCH_MIPS) += chacha20/chacha20-mips.o +AFLAGS_chacha20-mips.o += -O2 # This is required to fill the branch delay slots + +zinc-y += poly1305/poly1305.o +zinc-$(CONFIG_ZINC_ARCH_X86_64) += poly1305/poly1305-x86_64.o +zinc-$(CONFIG_ZINC_ARCH_ARM) += poly1305/poly1305-arm.o +zinc-$(CONFIG_ZINC_ARCH_ARM64) += poly1305/poly1305-arm64.o +zinc-$(CONFIG_ZINC_ARCH_MIPS) += poly1305/poly1305-mips.o +AFLAGS_poly1305-mips.o += -O2 # This is required to fill the branch delay slots +zinc-$(CONFIG_ZINC_ARCH_MIPS64) += poly1305/poly1305-mips64.o + +zinc-y += chacha20poly1305.o + +zinc-y += blake2s/blake2s.o +zinc-$(CONFIG_ZINC_ARCH_X86_64) += blake2s/blake2s-x86_64.o + +zinc-y += curve25519/curve25519.o +zinc-$(CONFIG_ZINC_ARCH_ARM) += curve25519/curve25519-arm.o + +quiet_cmd_perlasm = PERLASM $@ + cmd_perlasm = $(PERL) $< > $@ +$(obj)/%.S: $(src)/%.pl FORCE + $(call if_changed,perlasm) +kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) +targets := $(patsubst $(kbuild-dir)/%.pl,%.S,$(wildcard $(patsubst %.o,$(kbuild-dir)/crypto/zinc/%.pl,$(zinc-y) $(zinc-m) $(zinc-)))) + +# Old kernels don't set this, which causes trouble. +.SECONDARY: + +wireguard-y += $(addprefix crypto/zinc/,$(zinc-y)) +ccflags-y += -I$(kbuild-dir)/crypto/include +ccflags-$(CONFIG_ZINC_ARCH_X86_64) += -DCONFIG_ZINC_ARCH_X86_64 +ccflags-$(CONFIG_ZINC_ARCH_ARM) += -DCONFIG_ZINC_ARCH_ARM +ccflags-$(CONFIG_ZINC_ARCH_ARM64) += -DCONFIG_ZINC_ARCH_ARM64 +ccflags-$(CONFIG_ZINC_ARCH_MIPS) += -DCONFIG_ZINC_ARCH_MIPS +ccflags-$(CONFIG_ZINC_ARCH_MIPS64) += -DCONFIG_ZINC_ARCH_MIPS64 +ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DCONFIG_ZINC_SELFTEST diff --git a/net/wireguard/crypto/include/zinc/blake2s.h b/net/wireguard/crypto/include/zinc/blake2s.h new file mode 100644 index 000000000000..2ca0bc30750d --- /dev/null +++ b/net/wireguard/crypto/include/zinc/blake2s.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _ZINC_BLAKE2S_H +#define _ZINC_BLAKE2S_H + +#include +#include +#include + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32 +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[BLAKE2S_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +void blake2s_init(struct blake2s_state *state, const size_t outlen); +void blake2s_init_key(struct blake2s_state *state, const size_t outlen, + const void *key, const size_t keylen); +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_state *state, u8 *out); + +static inline void blake2s(u8 *out, const u8 *in, const u8 *key, + const size_t outlen, const size_t inlen, + const size_t keylen) +{ + struct blake2s_state state; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || + (!key && keylen))); + + if (keylen) + blake2s_init_key(&state, outlen, key, keylen); + else + blake2s_init(&state, outlen); + + blake2s_update(&state, in, inlen); + blake2s_final(&state, out); +} + +void blake2s_hmac(u8 *out, const u8 *in, const u8 *key, const size_t outlen, + const size_t inlen, const size_t keylen); + +#endif /* _ZINC_BLAKE2S_H */ diff --git a/net/wireguard/crypto/include/zinc/chacha20.h b/net/wireguard/crypto/include/zinc/chacha20.h new file mode 100644 index 000000000000..1b0083d871fb --- /dev/null +++ b/net/wireguard/crypto/include/zinc/chacha20.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _ZINC_CHACHA20_H +#define _ZINC_CHACHA20_H + +#include +#include +#include +#include + +enum chacha20_lengths { + CHACHA20_NONCE_SIZE = 16, + CHACHA20_KEY_SIZE = 32, + CHACHA20_KEY_WORDS = CHACHA20_KEY_SIZE / sizeof(u32), + CHACHA20_BLOCK_SIZE = 64, + CHACHA20_BLOCK_WORDS = CHACHA20_BLOCK_SIZE / sizeof(u32), + HCHACHA20_NONCE_SIZE = CHACHA20_NONCE_SIZE, + HCHACHA20_KEY_SIZE = CHACHA20_KEY_SIZE +}; + +enum chacha20_constants { /* expand 32-byte k */ + CHACHA20_CONSTANT_EXPA = 0x61707865U, + CHACHA20_CONSTANT_ND_3 = 0x3320646eU, + CHACHA20_CONSTANT_2_BY = 0x79622d32U, + CHACHA20_CONSTANT_TE_K = 0x6b206574U +}; + +struct chacha20_ctx { + union { + u32 state[16]; + struct { + u32 constant[4]; + u32 key[8]; + u32 counter[4]; + }; + }; +}; + +static inline void chacha20_init(struct chacha20_ctx *ctx, + const u8 key[CHACHA20_KEY_SIZE], + const u64 nonce) +{ + ctx->constant[0] = CHACHA20_CONSTANT_EXPA; + ctx->constant[1] = CHACHA20_CONSTANT_ND_3; + ctx->constant[2] = CHACHA20_CONSTANT_2_BY; + ctx->constant[3] = CHACHA20_CONSTANT_TE_K; + ctx->key[0] = get_unaligned_le32(key + 0); + ctx->key[1] = get_unaligned_le32(key + 4); + ctx->key[2] = get_unaligned_le32(key + 8); + ctx->key[3] = get_unaligned_le32(key + 12); + ctx->key[4] = get_unaligned_le32(key + 16); + ctx->key[5] = get_unaligned_le32(key + 20); + ctx->key[6] = get_unaligned_le32(key + 24); + ctx->key[7] = get_unaligned_le32(key + 28); + ctx->counter[0] = 0; + ctx->counter[1] = 0; + ctx->counter[2] = nonce & U32_MAX; + ctx->counter[3] = nonce >> 32; +} +void chacha20(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 len, + simd_context_t *simd_context); + +void hchacha20(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], simd_context_t *simd_context); + +#endif /* _ZINC_CHACHA20_H */ diff --git a/net/wireguard/crypto/include/zinc/chacha20poly1305.h b/net/wireguard/crypto/include/zinc/chacha20poly1305.h new file mode 100644 index 000000000000..e3339f02996e --- /dev/null +++ b/net/wireguard/crypto/include/zinc/chacha20poly1305.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _ZINC_CHACHA20POLY1305_H +#define _ZINC_CHACHA20POLY1305_H + +#include +#include + +struct scatterlist; + +enum chacha20poly1305_lengths { + XCHACHA20POLY1305_NONCE_SIZE = 24, + CHACHA20POLY1305_KEY_SIZE = 32, + CHACHA20POLY1305_AUTHTAG_SIZE = 16 +}; + +void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check chacha20poly1305_encrypt_sg_inplace( + struct scatterlist *src, const size_t src_len, const u8 *ad, + const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], simd_context_t *simd_context); + +bool __must_check +chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check chacha20poly1305_decrypt_sg_inplace( + struct scatterlist *src, size_t src_len, const u8 *ad, + const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], simd_context_t *simd_context); + +void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check xchacha20poly1305_decrypt( + u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, + const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +#endif /* _ZINC_CHACHA20POLY1305_H */ diff --git a/net/wireguard/crypto/include/zinc/curve25519.h b/net/wireguard/crypto/include/zinc/curve25519.h new file mode 100644 index 000000000000..127d8a3a1c82 --- /dev/null +++ b/net/wireguard/crypto/include/zinc/curve25519.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _ZINC_CURVE25519_H +#define _ZINC_CURVE25519_H + +#include + +enum curve25519_lengths { + CURVE25519_KEY_SIZE = 32 +}; + +bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]); +void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]); +bool __must_check curve25519_generate_public( + u8 pub[CURVE25519_KEY_SIZE], const u8 secret[CURVE25519_KEY_SIZE]); + +static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + secret[0] &= 248; + secret[31] = (secret[31] & 127) | 64; +} + +#endif /* _ZINC_CURVE25519_H */ diff --git a/net/wireguard/crypto/include/zinc/poly1305.h b/net/wireguard/crypto/include/zinc/poly1305.h new file mode 100644 index 000000000000..8a16d19f8177 --- /dev/null +++ b/net/wireguard/crypto/include/zinc/poly1305.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _ZINC_POLY1305_H +#define _ZINC_POLY1305_H + +#include +#include + +enum poly1305_lengths { + POLY1305_BLOCK_SIZE = 16, + POLY1305_KEY_SIZE = 32, + POLY1305_MAC_SIZE = 16 +}; + +struct poly1305_ctx { + u8 opaque[24 * sizeof(u64)]; + u32 nonce[4]; + u8 data[POLY1305_BLOCK_SIZE]; + size_t num; +} __aligned(8); + +void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE]); +void poly1305_update(struct poly1305_ctx *ctx, const u8 *input, size_t len, + simd_context_t *simd_context); +void poly1305_final(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE], + simd_context_t *simd_context); + +#endif /* _ZINC_POLY1305_H */ diff --git a/net/wireguard/crypto/zinc.h b/net/wireguard/crypto/zinc.h new file mode 100644 index 000000000000..9aa1e8d59bf5 --- /dev/null +++ b/net/wireguard/crypto/zinc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_ZINC_H +#define _WG_ZINC_H + +int chacha20_mod_init(void); +int poly1305_mod_init(void); +int chacha20poly1305_mod_init(void); +int blake2s_mod_init(void); +int curve25519_mod_init(void); + +#endif diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c new file mode 100644 index 000000000000..f8cda59bf297 --- /dev/null +++ b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#include +#include + +asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state, + const u8 *block, const size_t nblocks, + const u32 inc); +asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, + const u8 *block, const size_t nblocks, + const u32 inc); + +static bool blake2s_use_ssse3 __ro_after_init; +static bool blake2s_use_avx512 __ro_after_init; +static bool *const blake2s_nobs[] __initconst = { &blake2s_use_ssse3, + &blake2s_use_avx512 }; + +static void __init blake2s_fpu_init(void) +{ + blake2s_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3); +#ifndef COMPAT_CANNOT_USE_AVX512 + blake2s_use_avx512 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + boot_cpu_has(X86_FEATURE_AVX512VL) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL); +#endif +} + +static inline bool blake2s_compress_arch(struct blake2s_state *state, + const u8 *block, size_t nblocks, + const u32 inc) +{ + simd_context_t simd_context; + bool used_arch = false; + + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8); + + simd_get(&simd_context); + + if (!IS_ENABLED(CONFIG_AS_SSSE3) || !blake2s_use_ssse3 || + !simd_use(&simd_context)) + goto out; + used_arch = true; + + for (;;) { + const size_t blocks = min_t(size_t, nblocks, + PAGE_SIZE / BLAKE2S_BLOCK_SIZE); + + if (IS_ENABLED(CONFIG_AS_AVX512) && blake2s_use_avx512) + blake2s_compress_avx512(state, block, blocks, inc); + else + blake2s_compress_ssse3(state, block, blocks, inc); + + nblocks -= blocks; + if (!nblocks) + break; + block += blocks * BLAKE2S_BLOCK_SIZE; + simd_relax(&simd_context); + } +out: + simd_put(&simd_context); + return used_arch; +} diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S new file mode 100644 index 000000000000..24910b766bdd --- /dev/null +++ b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * Copyright (C) 2017-2019 Samuel Neves . All Rights Reserved. + */ + +#include + +.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32 +.align 32 +IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667 + .octa 0x5BE0CD191F83D9AB9B05688C510E527F +.section .rodata.cst16.ROT16, "aM", @progbits, 16 +.align 16 +ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302 +.section .rodata.cst16.ROR328, "aM", @progbits, 16 +.align 16 +ROR328: .octa 0x0C0F0E0D080B0A090407060500030201 +.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160 +.align 64 +SIGMA: +.byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 +.byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7 +.byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1 +.byte 7, 3, 13, 11, 9, 1, 12, 14, 15, 2, 5, 4, 8, 6, 10, 0 +.byte 9, 5, 2, 10, 0, 7, 4, 15, 3, 14, 11, 6, 13, 1, 12, 8 +.byte 2, 6, 0, 8, 12, 10, 11, 3, 1, 4, 7, 15, 9, 13, 5, 14 +.byte 12, 1, 14, 4, 5, 15, 13, 10, 8, 0, 6, 9, 11, 7, 3, 2 +.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6 +.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4 +.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12 +#ifdef CONFIG_AS_AVX512 +.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640 +.align 64 +SIGMA2: +.long 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 +.long 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7 +.long 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9 +.long 11, 10, 7, 0, 8, 15, 1, 13, 3, 6, 2, 12, 4, 14, 9, 5 +.long 4, 10, 9, 14, 15, 0, 11, 8, 1, 7, 3, 13, 2, 5, 6, 12 +.long 2, 11, 4, 15, 14, 3, 10, 8, 13, 6, 5, 7, 0, 12, 1, 9 +.long 4, 8, 15, 9, 14, 11, 13, 5, 3, 2, 1, 12, 6, 10, 7, 0 +.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10 +.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14 +.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9 +#endif /* CONFIG_AS_AVX512 */ + +.text +#ifdef CONFIG_AS_SSSE3 +SYM_FUNC_START(blake2s_compress_ssse3) + testq %rdx,%rdx + je .Lendofloop + movdqu (%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqa ROT16(%rip),%xmm12 + movdqa ROR328(%rip),%xmm13 + movdqu 0x20(%rdi),%xmm14 + movq %rcx,%xmm15 + leaq SIGMA+0xa0(%rip),%r8 + jmp .Lbeginofloop + .align 32 +.Lbeginofloop: + movdqa %xmm0,%xmm10 + movdqa %xmm1,%xmm11 + paddq %xmm15,%xmm14 + movdqa IV(%rip),%xmm2 + movdqa %xmm14,%xmm3 + pxor IV+0x10(%rip),%xmm3 + leaq SIGMA(%rip),%rcx +.Lroundloop: + movzbl (%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + movzbl 0x1(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + movzbl 0x2(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + movzbl 0x3(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + punpckldq %xmm5,%xmm4 + punpckldq %xmm7,%xmm6 + punpcklqdq %xmm6,%xmm4 + paddd %xmm4,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm12,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0xc,%xmm1 + pslld $0x14,%xmm8 + por %xmm8,%xmm1 + movzbl 0x4(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + movzbl 0x5(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + movzbl 0x6(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + movzbl 0x7(%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + punpckldq %xmm6,%xmm5 + punpckldq %xmm4,%xmm7 + punpcklqdq %xmm7,%xmm5 + paddd %xmm5,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm13,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0x7,%xmm1 + pslld $0x19,%xmm8 + por %xmm8,%xmm1 + pshufd $0x93,%xmm0,%xmm0 + pshufd $0x4e,%xmm3,%xmm3 + pshufd $0x39,%xmm2,%xmm2 + movzbl 0x8(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + movzbl 0x9(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + movzbl 0xa(%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + movzbl 0xb(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + punpckldq %xmm7,%xmm6 + punpckldq %xmm5,%xmm4 + punpcklqdq %xmm4,%xmm6 + paddd %xmm6,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm12,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0xc,%xmm1 + pslld $0x14,%xmm8 + por %xmm8,%xmm1 + movzbl 0xc(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + movzbl 0xd(%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + movzbl 0xe(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + movzbl 0xf(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + punpckldq %xmm4,%xmm7 + punpckldq %xmm6,%xmm5 + punpcklqdq %xmm5,%xmm7 + paddd %xmm7,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm13,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0x7,%xmm1 + pslld $0x19,%xmm8 + por %xmm8,%xmm1 + pshufd $0x39,%xmm0,%xmm0 + pshufd $0x4e,%xmm3,%xmm3 + pshufd $0x93,%xmm2,%xmm2 + addq $0x10,%rcx + cmpq %r8,%rcx + jnz .Lroundloop + pxor %xmm2,%xmm0 + pxor %xmm3,%xmm1 + pxor %xmm10,%xmm0 + pxor %xmm11,%xmm1 + addq $0x40,%rsi + decq %rdx + jnz .Lbeginofloop + movdqu %xmm0,(%rdi) + movdqu %xmm1,0x10(%rdi) + movdqu %xmm14,0x20(%rdi) +.Lendofloop: + ret +SYM_FUNC_END(blake2s_compress_ssse3) +#endif /* CONFIG_AS_SSSE3 */ + +#ifdef CONFIG_AS_AVX512 +SYM_FUNC_START(blake2s_compress_avx512) + vmovdqu (%rdi),%xmm0 + vmovdqu 0x10(%rdi),%xmm1 + vmovdqu 0x20(%rdi),%xmm4 + vmovq %rcx,%xmm5 + vmovdqa IV(%rip),%xmm14 + vmovdqa IV+16(%rip),%xmm15 + jmp .Lblake2s_compress_avx512_mainloop +.align 32 +.Lblake2s_compress_avx512_mainloop: + vmovdqa %xmm0,%xmm10 + vmovdqa %xmm1,%xmm11 + vpaddq %xmm5,%xmm4,%xmm4 + vmovdqa %xmm14,%xmm2 + vpxor %xmm15,%xmm4,%xmm3 + vmovdqu (%rsi),%ymm6 + vmovdqu 0x20(%rsi),%ymm7 + addq $0x40,%rsi + leaq SIGMA2(%rip),%rax + movb $0xa,%cl +.Lblake2s_compress_avx512_roundloop: + addq $0x40,%rax + vmovdqa -0x40(%rax),%ymm8 + vmovdqa -0x20(%rax),%ymm9 + vpermi2d %ymm7,%ymm6,%ymm8 + vpermi2d %ymm7,%ymm6,%ymm9 + vmovdqa %ymm8,%ymm6 + vmovdqa %ymm9,%ymm7 + vpaddd %xmm8,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vextracti128 $0x1,%ymm8,%xmm8 + vpaddd %xmm8,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x93,%xmm0,%xmm0 + vpshufd $0x4e,%xmm3,%xmm3 + vpshufd $0x39,%xmm2,%xmm2 + vpaddd %xmm9,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vextracti128 $0x1,%ymm9,%xmm9 + vpaddd %xmm9,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x39,%xmm0,%xmm0 + vpshufd $0x4e,%xmm3,%xmm3 + vpshufd $0x93,%xmm2,%xmm2 + decb %cl + jne .Lblake2s_compress_avx512_roundloop + vpxor %xmm10,%xmm0,%xmm0 + vpxor %xmm11,%xmm1,%xmm1 + vpxor %xmm2,%xmm0,%xmm0 + vpxor %xmm3,%xmm1,%xmm1 + decq %rdx + jne .Lblake2s_compress_avx512_mainloop + vmovdqu %xmm0,(%rdi) + vmovdqu %xmm1,0x10(%rdi) + vmovdqu %xmm4,0x20(%rdi) + vzeroupper + retq +SYM_FUNC_END(blake2s_compress_avx512) +#endif /* CONFIG_AS_AVX512 */ diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s.c b/net/wireguard/crypto/zinc/blake2s/blake2s.c new file mode 100644 index 000000000000..a9b8a8d95998 --- /dev/null +++ b/net/wireguard/crypto/zinc/blake2s/blake2s.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is an implementation of the BLAKE2s hash and PRF functions. + * + * Information: https://blake2.net/ + * + */ + +#include +#include "../selftest/run.h" + +#include +#include +#include +#include +#include +#include +#include + +static const u32 blake2s_iv[8] = { + 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, + 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL +}; + +static const u8 blake2s_sigma[10][16] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, +}; + +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + +static inline void blake2s_increment_counter(struct blake2s_state *state, + const u32 inc) +{ + state->t[0] += inc; + state->t[1] += (state->t[0] < inc); +} + +static inline void blake2s_init_param(struct blake2s_state *state, + const u32 param) +{ + int i; + + memset(state, 0, sizeof(*state)); + for (i = 0; i < 8; ++i) + state->h[i] = blake2s_iv[i]; + state->h[0] ^= param; +} + +void blake2s_init(struct blake2s_state *state, const size_t outlen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE)); + blake2s_init_param(state, 0x01010000 | outlen); + state->outlen = outlen; +} + +void blake2s_init_key(struct blake2s_state *state, const size_t outlen, + const void *key, const size_t keylen) +{ + u8 block[BLAKE2S_BLOCK_SIZE] = { 0 }; + + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || + !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); + blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen); + state->outlen = outlen; + memcpy(block, key, keylen); + blake2s_update(state, block, BLAKE2S_BLOCK_SIZE); + memzero_explicit(block, BLAKE2S_BLOCK_SIZE); +} + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "blake2s-x86_64-glue.c" +#else +static bool *const blake2s_nobs[] __initconst = { }; +static void __init blake2s_fpu_init(void) +{ +} +static inline bool blake2s_compress_arch(struct blake2s_state *state, + const u8 *block, size_t nblocks, + const u32 inc) +{ + return false; +} +#endif + +static inline void blake2s_compress(struct blake2s_state *state, + const u8 *block, size_t nblocks, + const u32 inc) +{ + u32 m[16]; + u32 v[16]; + int i; + + WARN_ON(IS_ENABLED(DEBUG) && + (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE)); + + if (blake2s_compress_arch(state, block, nblocks, inc)) + return; + + while (nblocks > 0) { + blake2s_increment_counter(state, inc); + memcpy(m, block, BLAKE2S_BLOCK_SIZE); + le32_to_cpu_array(m, ARRAY_SIZE(m)); + memcpy(v, state->h, 32); + v[ 8] = blake2s_iv[0]; + v[ 9] = blake2s_iv[1]; + v[10] = blake2s_iv[2]; + v[11] = blake2s_iv[3]; + v[12] = blake2s_iv[4] ^ state->t[0]; + v[13] = blake2s_iv[5] ^ state->t[1]; + v[14] = blake2s_iv[6] ^ state->f[0]; + v[15] = blake2s_iv[7] ^ state->f[1]; + +#define G(r, i, a, b, c, d) do { \ + a += b + m[blake2s_sigma[r][2 * i + 0]]; \ + d = ror32(d ^ a, 16); \ + c += d; \ + b = ror32(b ^ c, 12); \ + a += b + m[blake2s_sigma[r][2 * i + 1]]; \ + d = ror32(d ^ a, 8); \ + c += d; \ + b = ror32(b ^ c, 7); \ +} while (0) + +#define ROUND(r) do { \ + G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ + G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ + G(r, 2, v[2], v[ 6], v[10], v[14]); \ + G(r, 3, v[3], v[ 7], v[11], v[15]); \ + G(r, 4, v[0], v[ 5], v[10], v[15]); \ + G(r, 5, v[1], v[ 6], v[11], v[12]); \ + G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ + G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ +} while (0) + ROUND(0); + ROUND(1); + ROUND(2); + ROUND(3); + ROUND(4); + ROUND(5); + ROUND(6); + ROUND(7); + ROUND(8); + ROUND(9); + +#undef G +#undef ROUND + + for (i = 0; i < 8; ++i) + state->h[i] ^= v[i] ^ v[i + 8]; + + block += BLAKE2S_BLOCK_SIZE; + --nblocks; + } +} + +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) +{ + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +void blake2s_final(struct blake2s_state *state, u8 *out) +{ + WARN_ON(IS_ENABLED(DEBUG) && !out); + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); + memzero_explicit(state, sizeof(*state)); +} + +void blake2s_hmac(u8 *out, const u8 *in, const u8 *key, const size_t outlen, + const size_t inlen, const size_t keylen) +{ + struct blake2s_state state; + u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; + u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); + int i; + + if (keylen > BLAKE2S_BLOCK_SIZE) { + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, key, keylen); + blake2s_final(&state, x_key); + } else + memcpy(x_key, key, keylen); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, in, inlen); + blake2s_final(&state, i_hash); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x5c ^ 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); + blake2s_final(&state, i_hash); + + memcpy(out, i_hash, outlen); + memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); + memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); +} + +#include "../selftest/blake2s.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init blake2s_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + blake2s_fpu_init(); + if (!selftest_run("blake2s", blake2s_selftest, blake2s_nobs, + ARRAY_SIZE(blake2s_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("BLAKE2s hash function"); +MODULE_AUTHOR("Jason A. Donenfeld "); +#endif diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c b/net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c new file mode 100644 index 000000000000..41e2e79abb2b --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#if defined(CONFIG_ZINC_ARCH_ARM) +#include +#include +#endif + +asmlinkage void chacha20_arm(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void hchacha20_arm(const u32 state[16], u32 out[8]); +asmlinkage void chacha20_neon(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); + +static bool chacha20_use_neon __ro_after_init; +static bool *const chacha20_nobs[] __initconst = { &chacha20_use_neon }; +static void __init chacha20_fpu_init(void) +{ +#if defined(CONFIG_ZINC_ARCH_ARM64) + chacha20_use_neon = cpu_have_named_feature(ASIMD); +#elif defined(CONFIG_ZINC_ARCH_ARM) + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A7: + case ARM_CPU_PART_CORTEX_A5: + /* The Cortex-A7 and Cortex-A5 do not perform well with the NEON + * implementation but do incredibly with the scalar one and use + * less power. + */ + break; + default: + chacha20_use_neon = elf_hwcap & HWCAP_NEON; + } +#endif +} + +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < CHACHA20_BLOCK_SIZE || + PAGE_SIZE % CHACHA20_BLOCK_SIZE); + + for (;;) { + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && chacha20_use_neon && + len >= CHACHA20_BLOCK_SIZE * 3 && simd_use(simd_context)) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + chacha20_neon(dst, src, bytes, ctx->key, ctx->counter); + ctx->counter[0] += (bytes + 63) / 64; + len -= bytes; + if (!len) + break; + dst += bytes; + src += bytes; + simd_relax(simd_context); + } else { + chacha20_arm(dst, src, len, ctx->key, ctx->counter); + ctx->counter[0] += (len + 63) / 64; + break; + } + } + + return true; +} + +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + if (IS_ENABLED(CONFIG_ZINC_ARCH_ARM)) { + u32 x[] = { CHACHA20_CONSTANT_EXPA, + CHACHA20_CONSTANT_ND_3, + CHACHA20_CONSTANT_2_BY, + CHACHA20_CONSTANT_TE_K, + get_unaligned_le32(key + 0), + get_unaligned_le32(key + 4), + get_unaligned_le32(key + 8), + get_unaligned_le32(key + 12), + get_unaligned_le32(key + 16), + get_unaligned_le32(key + 20), + get_unaligned_le32(key + 24), + get_unaligned_le32(key + 28), + get_unaligned_le32(nonce + 0), + get_unaligned_le32(nonce + 4), + get_unaligned_le32(nonce + 8), + get_unaligned_le32(nonce + 12) + }; + hchacha20_arm(x, derived_key); + return true; + } + return false; +} diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl b/net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl new file mode 100644 index 000000000000..6785383ab7bb --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl @@ -0,0 +1,1227 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# December 2014 +# +# ChaCha20 for ARMv4. +# +# September 2018 +# +# Improve scalar performance per Eric Biggers' suggestion to eliminate +# separate rotates. This requires b[0..3] and d[0..3] to be maintained +# pre-rotated, hence odd twists prior inner loop and when accumulating +# key material. Since amount of instructions is reduced as result, even +# NEON performance is improved somewhat, most notably by ~9% on low-end +# Cortex-A5/A7. Full unroll was shown to provide even better scalar +# performance on Cortex-A5/A7, naturally at the cost of manyfold size +# increase. We let it be. Oversized code works in benchmarks, but is not +# necessarily optimal in real life, when it's likely to be out-of-cache +# upon entry and evict significant part of cache upon completion. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc-4.4 1xNEON 3xNEON+1xIALU +# +# Cortex-A5 14.2(*)/+160% 21.8 12.9(**) +# Cortex-A8 10.2(*)/+190% 13.9 6.10 +# Cortex-A9 10.8(*)/+150% 14.3 6.50 +# Cortex-A15 11.0/+40% 16.0 4.90 +# Snapdragon S4 13.9(***)/+90% 13.6 4.90 +# +# (*) most "favourable" result for aligned data on little-endian +# processor, result for misaligned data is 10-15% lower; +# (**) pure 4xNEON [with "vertical" layout] was shown to provide ~8% +# better performance on Cortex-A5/A7, but not on others; +# (***) it's 17% slower than original, trade-off is considered +# acceptable, because of improvement on others, specifically +# +36% on Cortex-A5/A7 and +20% on Cortex-A9; + +$flavour = shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +sub AUTOLOAD() # thunk [simplified] x86-style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; + my $arg = pop; + $arg = "#$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; +} + +my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x")); +my @t=map("r$_",(8..11)); + +sub ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my $odd = $d0&1; +my ($xc,$xc_) = (@t[0..1]); +my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]); +my @ret; + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' are permanently allocated in registers, @x[0..7], + # while 'c's and pair of 'd's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. If you observe 'd' column, you'll + # notice that 15 and 13 are reused in next pair of rounds. + # This is why these two are chosen for offloading to memory, + # to make loads count more. + push @ret,( + "&add (@x[$a0],@x[$a0],@x[$b0],'ror#13')", + "&add (@x[$a1],@x[$a1],@x[$b1],'ror#13')", + "&eor ($xd,@x[$a0],$xd,'ror#24')", + "&eor ($xd_,@x[$a1],$xd_,'ror#24')", + + "&add ($xc,$xc,$xd,'ror#16')", + "&add ($xc_,$xc_,$xd_,'ror#16')", + "&eor (@x[$b0],$xc, @x[$b0],'ror#13')", + "&eor (@x[$b1],$xc_,@x[$b1],'ror#13')", + + "&add (@x[$a0],@x[$a0],@x[$b0],'ror#20')", + "&add (@x[$a1],@x[$a1],@x[$b1],'ror#20')", + "&eor ($xd,@x[$a0],$xd,'ror#16')", + "&eor ($xd_,@x[$a1],$xd_,'ror#16')" ); + push @ret,( + "&str ($xd,'[sp,#4*(16+$d0)]')" ) if ($odd); + push @ret,( + "&add ($xc,$xc,$xd,'ror#24')" ); + push @ret,( + "&ldr ($xd,'[sp,#4*(16+$d2)]')" ) if ($odd); + push @ret,( + "&str ($xd_,'[sp,#4*(16+$d1)]')" ) if (!$odd); + push @ret,( + "&add ($xc_,$xc_,$xd_,'ror#24')" ); + push @ret,( + "&ldr ($xd_,'[sp,#4*(16+$d3)]')" ) if (!$odd); + push @ret,( + "&str ($xc,'[sp,#4*(16+$c0)]')", + "&eor (@x[$b0],@x[$b0],$xc,'ror#12')", + "&str ($xc_,'[sp,#4*(16+$c1)]')", + "&eor (@x[$b1],@x[$b1],$xc_,'ror#12')" ); + + $xd=@x[$d2] if (!$odd); + $xd_=@x[$d3] if ($odd); + push @ret,( + "&ldr ($xc,'[sp,#4*(16+$c2)]')", + "&add (@x[$a2],@x[$a2],@x[$b2],'ror#13')", + "&ldr ($xc_,'[sp,#4*(16+$c3)]')", + "&add (@x[$a3],@x[$a3],@x[$b3],'ror#13')", + "&eor ($xd,@x[$a2],$xd,'ror#24')", + "&eor ($xd_,@x[$a3],$xd_,'ror#24')", + + "&add ($xc,$xc,$xd,'ror#16')", + "&add ($xc_,$xc_,$xd_,'ror#16')", + "&eor (@x[$b2],$xc, @x[$b2],'ror#13')", + "&eor (@x[$b3],$xc_,@x[$b3],'ror#13')", + + "&add (@x[$a2],@x[$a2],@x[$b2],'ror#20')", + "&add (@x[$a3],@x[$a3],@x[$b3],'ror#20')", + "&eor ($xd,@x[$a2],$xd,'ror#16')", + "&eor ($xd_,@x[$a3],$xd_,'ror#16')", + + "&add ($xc,$xc,$xd,'ror#24')", + "&add ($xc_,$xc_,$xd_,'ror#24')", + "&eor (@x[$b2],@x[$b2],$xc,'ror#12')", + "&eor (@x[$b3],@x[$b3],$xc_,'ror#12')" ); + + @ret; +} + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +#else +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ +# define ChaCha20_ctr32 chacha20_arm_cryptogams +# define ChaCha20_neon chacha20_neon +#endif + +.text +#if defined(__thumb2__) || defined(__clang__) +.syntax unified +# define ldrhsb ldrbhs +#endif +#if defined(__thumb2__) +.thumb +#else +.code 32 +#endif + +.align 5 +.Lsigma: +.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral +.Lone: +.long 1,0,0,0 +.Lrot8: +.long 0x02010003,0x06050407 +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-.LChaCha20_ctr32 +#else +.word -1 +#endif + +.globl ChaCha20_ctr32 +.type ChaCha20_ctr32,%function +.align 5 +ChaCha20_ctr32: +.LChaCha20_ctr32: + ldr r12,[sp,#0] @ pull pointer to counter and nonce + stmdb sp!,{r0-r2,r4-r11,lr} +#if __ARM_ARCH__<7 && !defined(__thumb2__) + sub r14,pc,#16 @ ChaCha20_ctr32 +#else + adr r14,.LChaCha20_ctr32 +#endif + cmp r2,#0 @ len==0? +#ifdef __thumb2__ + itt eq +#endif + addeq sp,sp,#4*3 + beq .Lno_data +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + cmp r2,#192 @ test len + bls .Lshort + ldr r4,[r14,#-24] + ldr r4,[r14,r4] +# ifdef __APPLE__ + ldr r4,[r4] +# endif + tst r4,#ARMV7_NEON + bne .LChaCha20_neon +.Lshort: +#endif + ldmia r12,{r4-r7} @ load counter and nonce + sub sp,sp,#4*(16) @ off-load area + sub r14,r14,#64 @ .Lsigma + stmdb sp!,{r4-r7} @ copy counter and nonce + ldmia r3,{r4-r11} @ load key + ldmia r14,{r0-r3} @ load sigma + stmdb sp!,{r4-r11} @ copy key + stmdb sp!,{r0-r3} @ copy sigma + str r10,[sp,#4*(16+10)] @ off-load "@x[10]" + str r11,[sp,#4*(16+11)] @ off-load "@x[11]" + b .Loop_outer_enter + +.align 4 +.Loop_outer: + ldmia sp,{r0-r9} @ load key material + str @t[3],[sp,#4*(32+2)] @ save len + str r12, [sp,#4*(32+1)] @ save inp + str r14, [sp,#4*(32+0)] @ save out +.Loop_outer_enter: + ldr @t[3], [sp,#4*(15)] + mov @x[4],@x[4],ror#19 @ twist b[0..3] + ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load + mov @x[5],@x[5],ror#19 + ldr @t[2], [sp,#4*(13)] + mov @x[6],@x[6],ror#19 + ldr @x[14],[sp,#4*(14)] + mov @x[7],@x[7],ror#19 + mov @t[3],@t[3],ror#8 @ twist d[0..3] + mov @x[12],@x[12],ror#8 + mov @t[2],@t[2],ror#8 + mov @x[14],@x[14],ror#8 + str @t[3], [sp,#4*(16+15)] + mov @t[3],#10 + b .Loop + +.align 4 +.Loop: + subs @t[3],@t[3],#1 +___ + foreach (&ROUND(0, 4, 8,12)) { eval; } + foreach (&ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + bne .Loop + + ldr @t[3],[sp,#4*(32+2)] @ load len + + str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store + str @t[1], [sp,#4*(16+9)] + str @x[12],[sp,#4*(16+12)] + str @t[2], [sp,#4*(16+13)] + str @x[14],[sp,#4*(16+14)] + + @ at this point we have first half of 512-bit result in + @ @x[0-7] and second half at sp+4*(16+8) + + cmp @t[3],#64 @ done yet? +#ifdef __thumb2__ + itete lo +#endif + addlo r12,sp,#4*(0) @ shortcut or ... + ldrhs r12,[sp,#4*(32+1)] @ ... load inp + addlo r14,sp,#4*(0) @ shortcut or ... + ldrhs r14,[sp,#4*(32+0)] @ ... load out + + ldr @t[0],[sp,#4*(0)] @ load key material + ldr @t[1],[sp,#4*(1)] + +#if __ARM_ARCH__>=6 || !defined(__ARMEB__) +# if __ARM_ARCH__<7 + orr @t[2],r12,r14 + tst @t[2],#3 @ are input and output aligned? + ldr @t[2],[sp,#4*(2)] + bne .Lunaligned + cmp @t[3],#64 @ restore flags +# else + ldr @t[2],[sp,#4*(2)] +# endif + ldr @t[3],[sp,#4*(3)] + + add @x[0],@x[0],@t[0] @ accumulate key material + add @x[1],@x[1],@t[1] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] + + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[0],@x[0],@t[0] @ xor with input + eorhs @x[1],@x[1],@t[1] + add @t[0],sp,#4*(4) + str @x[0],[r14],#16 @ store output +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[2],@x[2],@t[2] + eorhs @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[1],[r14,#-12] + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#13 @ accumulate key material + add @x[5],@t[1],@x[5],ror#13 +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] + add @x[6],@t[2],@x[6],ror#13 + add @x[7],@t[3],@x[7],ror#13 +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[4],@x[4],@t[0] + eorhs @x[5],@x[5],@t[1] + add @t[0],sp,#4*(8) + str @x[4],[r14],#16 @ store output +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[6],@x[6],@t[2] + eorhs @x[7],@x[7],@t[3] + str @x[5],[r14,#-12] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[6],[r14,#-8] + add @x[0],sp,#4*(16+8) + str @x[7],[r14,#-4] + + ldmia @x[0],{@x[0]-@x[7]} @ load second half + + add @x[0],@x[0],@t[0] @ accumulate key material + add @x[1],@x[1],@t[1] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] +# ifdef __thumb2__ + itt hi +# endif + strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it + strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[0],@x[0],@t[0] + eorhs @x[1],@x[1],@t[1] + add @t[0],sp,#4*(12) + str @x[0],[r14],#16 @ store output +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[2],@x[2],@t[2] + eorhs @x[3],@x[3],@t[3] + str @x[1],[r14,#-12] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#24 @ accumulate key material + add @x[5],@t[1],@x[5],ror#24 +# ifdef __thumb2__ + itt hi +# endif + addhi @t[0],@t[0],#1 @ next counter value + strhi @t[0],[sp,#4*(12)] @ save next counter value +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] + add @x[6],@t[2],@x[6],ror#24 + add @x[7],@t[3],@x[7],ror#24 +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[4],@x[4],@t[0] + eorhs @x[5],@x[5],@t[1] +# ifdef __thumb2__ + it ne +# endif + ldrne @t[0],[sp,#4*(32+2)] @ re-load len +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[6],@x[6],@t[2] + eorhs @x[7],@x[7],@t[3] + str @x[4],[r14],#16 @ store output + str @x[5],[r14,#-12] +# ifdef __thumb2__ + it hs +# endif + subhs @t[3],@t[0],#64 @ len-=64 + str @x[6],[r14,#-8] + str @x[7],[r14,#-4] + bhi .Loop_outer + + beq .Ldone +# if __ARM_ARCH__<7 + b .Ltail + +.align 4 +.Lunaligned: @ unaligned endian-neutral path + cmp @t[3],#64 @ restore flags +# endif +#endif +#if __ARM_ARCH__<7 + ldr @t[3],[sp,#4*(3)] +___ +for ($i=0;$i<16;$i+=4) { +my $j=$i&0x7; +my $twist=""; +if ($i==4) { $twist = ",ror#13"; } +elsif ($i==12) { $twist = ",ror#24"; } + +$code.=<<___ if ($i==4); + add @x[0],sp,#4*(16+8) +___ +$code.=<<___ if ($i==8); + ldmia @x[0],{@x[0]-@x[7]} @ load second half +# ifdef __thumb2__ + itt hi +# endif + strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" + strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" +___ +$code.=<<___; + add @x[$j+0],@t[0],@x[$j+0]$twist @ accumulate key material +___ +$code.=<<___ if ($i==12); +# ifdef __thumb2__ + itt hi +# endif + addhi @t[0],@t[0],#1 @ next counter value + strhi @t[0],[sp,#4*(12)] @ save next counter value +___ +$code.=<<___; + add @x[$j+1],@t[1],@x[$j+1]$twist + add @x[$j+2],@t[2],@x[$j+2]$twist +# ifdef __thumb2__ + itete lo +# endif + eorlo @t[0],@t[0],@t[0] @ zero or ... + ldrhsb @t[0],[r12],#16 @ ... load input + eorlo @t[1],@t[1],@t[1] + ldrhsb @t[1],[r12,#-12] + + add @x[$j+3],@t[3],@x[$j+3]$twist +# ifdef __thumb2__ + itete lo +# endif + eorlo @t[2],@t[2],@t[2] + ldrhsb @t[2],[r12,#-8] + eorlo @t[3],@t[3],@t[3] + ldrhsb @t[3],[r12,#-4] + + eor @x[$j+0],@t[0],@x[$j+0] @ xor with input (or zero) + eor @x[$j+1],@t[1],@x[$j+1] +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[0],[r12,#-15] @ load more input + ldrhsb @t[1],[r12,#-11] + eor @x[$j+2],@t[2],@x[$j+2] + strb @x[$j+0],[r14],#16 @ store output + eor @x[$j+3],@t[3],@x[$j+3] +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[2],[r12,#-7] + ldrhsb @t[3],[r12,#-3] + strb @x[$j+1],[r14,#-12] + eor @x[$j+0],@t[0],@x[$j+0],lsr#8 + strb @x[$j+2],[r14,#-8] + eor @x[$j+1],@t[1],@x[$j+1],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[0],[r12,#-14] @ load more input + ldrhsb @t[1],[r12,#-10] + strb @x[$j+3],[r14,#-4] + eor @x[$j+2],@t[2],@x[$j+2],lsr#8 + strb @x[$j+0],[r14,#-15] + eor @x[$j+3],@t[3],@x[$j+3],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[2],[r12,#-6] + ldrhsb @t[3],[r12,#-2] + strb @x[$j+1],[r14,#-11] + eor @x[$j+0],@t[0],@x[$j+0],lsr#8 + strb @x[$j+2],[r14,#-7] + eor @x[$j+1],@t[1],@x[$j+1],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[0],[r12,#-13] @ load more input + ldrhsb @t[1],[r12,#-9] + strb @x[$j+3],[r14,#-3] + eor @x[$j+2],@t[2],@x[$j+2],lsr#8 + strb @x[$j+0],[r14,#-14] + eor @x[$j+3],@t[3],@x[$j+3],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[2],[r12,#-5] + ldrhsb @t[3],[r12,#-1] + strb @x[$j+1],[r14,#-10] + strb @x[$j+2],[r14,#-6] + eor @x[$j+0],@t[0],@x[$j+0],lsr#8 + strb @x[$j+3],[r14,#-2] + eor @x[$j+1],@t[1],@x[$j+1],lsr#8 + strb @x[$j+0],[r14,#-13] + eor @x[$j+2],@t[2],@x[$j+2],lsr#8 + strb @x[$j+1],[r14,#-9] + eor @x[$j+3],@t[3],@x[$j+3],lsr#8 + strb @x[$j+2],[r14,#-5] + strb @x[$j+3],[r14,#-1] +___ +$code.=<<___ if ($i<12); + add @t[0],sp,#4*(4+$i) + ldmia @t[0],{@t[0]-@t[3]} @ load key material +___ +} +$code.=<<___; +# ifdef __thumb2__ + it ne +# endif + ldrne @t[0],[sp,#4*(32+2)] @ re-load len +# ifdef __thumb2__ + it hs +# endif + subhs @t[3],@t[0],#64 @ len-=64 + bhi .Loop_outer + + beq .Ldone +#endif + +.Ltail: + ldr r12,[sp,#4*(32+1)] @ load inp + add @t[1],sp,#4*(0) + ldr r14,[sp,#4*(32+0)] @ load out + +.Loop_tail: + ldrb @t[2],[@t[1]],#1 @ read buffer on stack + ldrb @t[3],[r12],#1 @ read input + subs @t[0],@t[0],#1 + eor @t[3],@t[3],@t[2] + strb @t[3],[r14],#1 @ store output + bne .Loop_tail + +.Ldone: + add sp,sp,#4*(32+3) +.Lno_data: +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .long 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size ChaCha20_ctr32,.-ChaCha20_ctr32 +___ + +{{{ +my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) = + map("q$_",(0..15)); + +# This can replace vshr-by-24+vsli-by-8. It gives ~3% improvement on +# Cortex-A5/A7, but hurts Cortex-A9 by 5% and Snapdragon S4 by 14%! +sub vperm() +{ my ($dst,$src,$tbl) = @_; + $code .= " vtbl.8 $dst#lo,{$src#lo},$tbl#lo\n"; + $code .= " vtbl.8 $dst#hi,{$src#hi},$tbl#lo\n"; +} + +sub NEONROUND { +my $odd = pop; +my ($a,$b,$c,$d,$t)=@_; + + ( + "&vadd_i32 ($a,$a,$b)", + "&veor ($d,$d,$a)", + "&vrev32_16 ($d,$d)", # vrot ($d,16) + + "&vadd_i32 ($c,$c,$d)", + "&veor ($t,$b,$c)", + "&vshr_u32 ($b,$t,20)", + "&vsli_32 ($b,$t,12)", + + "&vadd_i32 ($a,$a,$b)", + "&veor ($t,$d,$a)", + "&vshr_u32 ($d,$t,24)", + "&vsli_32 ($d,$t,8)", + #"&vperm ($d,$t,$t3)", + + "&vadd_i32 ($c,$c,$d)", + "&veor ($t,$b,$c)", + "&vshr_u32 ($b,$t,25)", + "&vsli_32 ($b,$t,7)", + + "&vext_8 ($a,$a,$a,$odd?4:12)", + "&vext_8 ($d,$d,$d,8)", + "&vext_8 ($c,$c,$c,$odd?12:4)" + ); +} + +$code.=<<___; +#if (defined(__KERNEL__) && defined(CONFIG_KERNEL_MODE_NEON)) || (!defined(__KERNEL__) && __ARM_MAX_ARCH__>=7) +.arch armv7-a +.fpu neon + +# ifdef __KERNEL__ +.globl ChaCha20_neon +@ For optimal performance it's appropriate for caller to enforce +@ minimum input length, 193 bytes is suggested. +# endif +.type ChaCha20_neon,%function +.align 5 +ChaCha20_neon: + ldr r12,[sp,#0] @ pull pointer to counter and nonce + stmdb sp!,{r0-r2,r4-r11,lr} +.LChaCha20_neon: + adr r14,.Lsigma + vstmdb sp!,{d8-d15} @ ABI spec says so + stmdb sp!,{r0-r3} + + vld1.32 {$b0-$c0},[r3] @ load key + ldmia r3,{r4-r11} @ load key + + sub sp,sp,#4*(16+16) + vld1.32 {$d0},[r12] @ load counter and nonce + add r12,sp,#4*8 + ldmia r14,{r0-r3} @ load sigma + vld1.32 {$a0},[r14]! @ load sigma + vld1.32 {$t0},[r14]! @ one + @ vld1.32 {$t3#lo},[r14] @ rot8 + vst1.32 {$c0-$d0},[r12] @ copy 1/2key|counter|nonce + vst1.32 {$a0-$b0},[sp] @ copy sigma|1/2key + + str r10,[sp,#4*(16+10)] @ off-load "@x[10]" + str r11,[sp,#4*(16+11)] @ off-load "@x[11]" + vshl.i32 $t1#lo,$t0#lo,#1 @ two + vstr $t0#lo,[sp,#4*(16+0)] + vshl.i32 $t2#lo,$t0#lo,#2 @ four + vstr $t1#lo,[sp,#4*(16+2)] + vmov $a1,$a0 + vstr $t2#lo,[sp,#4*(16+4)] + vmov $a2,$a0 + @ vstr $t3#lo,[sp,#4*(16+6)] + vmov $b1,$b0 + vmov $b2,$b0 + b .Loop_neon_enter + +.align 4 +.Loop_neon_outer: + ldmia sp,{r0-r9} @ load key material + cmp @t[3],#64*2 @ if len<=64*2 + bls .Lbreak_neon @ switch to integer-only + @ vldr $t3#lo,[sp,#4*(16+6)] @ rot8 + vmov $a1,$a0 + str @t[3],[sp,#4*(32+2)] @ save len + vmov $a2,$a0 + str r12, [sp,#4*(32+1)] @ save inp + vmov $b1,$b0 + str r14, [sp,#4*(32+0)] @ save out + vmov $b2,$b0 +.Loop_neon_enter: + ldr @t[3], [sp,#4*(15)] + mov @x[4],@x[4],ror#19 @ twist b[0..3] + vadd.i32 $d1,$d0,$t0 @ counter+1 + ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load + mov @x[5],@x[5],ror#19 + vmov $c1,$c0 + ldr @t[2], [sp,#4*(13)] + mov @x[6],@x[6],ror#19 + vmov $c2,$c0 + ldr @x[14],[sp,#4*(14)] + mov @x[7],@x[7],ror#19 + vadd.i32 $d2,$d1,$t0 @ counter+2 + add @x[12],@x[12],#3 @ counter+3 + mov @t[3],@t[3],ror#8 @ twist d[0..3] + mov @x[12],@x[12],ror#8 + mov @t[2],@t[2],ror#8 + mov @x[14],@x[14],ror#8 + str @t[3], [sp,#4*(16+15)] + mov @t[3],#10 + b .Loop_neon + +.align 4 +.Loop_neon: + subs @t[3],@t[3],#1 +___ + my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0); + my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0); + my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0); + my @thread3=&ROUND(0,4,8,12); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } + + @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1); + @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1); + @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1); + @thread3=&ROUND(0,5,10,15); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } +$code.=<<___; + bne .Loop_neon + + add @t[3],sp,#32 + vld1.32 {$t0-$t1},[sp] @ load key material + vld1.32 {$t2-$t3},[@t[3]] + + ldr @t[3],[sp,#4*(32+2)] @ load len + + str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store + str @t[1], [sp,#4*(16+9)] + str @x[12],[sp,#4*(16+12)] + str @t[2], [sp,#4*(16+13)] + str @x[14],[sp,#4*(16+14)] + + @ at this point we have first half of 512-bit result in + @ @x[0-7] and second half at sp+4*(16+8) + + ldr r12,[sp,#4*(32+1)] @ load inp + ldr r14,[sp,#4*(32+0)] @ load out + + vadd.i32 $a0,$a0,$t0 @ accumulate key material + vadd.i32 $a1,$a1,$t0 + vadd.i32 $a2,$a2,$t0 + vldr $t0#lo,[sp,#4*(16+0)] @ one + + vadd.i32 $b0,$b0,$t1 + vadd.i32 $b1,$b1,$t1 + vadd.i32 $b2,$b2,$t1 + vldr $t1#lo,[sp,#4*(16+2)] @ two + + vadd.i32 $c0,$c0,$t2 + vadd.i32 $c1,$c1,$t2 + vadd.i32 $c2,$c2,$t2 + vadd.i32 $d1#lo,$d1#lo,$t0#lo @ counter+1 + vadd.i32 $d2#lo,$d2#lo,$t1#lo @ counter+2 + + vadd.i32 $d0,$d0,$t3 + vadd.i32 $d1,$d1,$t3 + vadd.i32 $d2,$d2,$t3 + + cmp @t[3],#64*4 + blo .Ltail_neon + + vld1.8 {$t0-$t1},[r12]! @ load input + mov @t[3],sp + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 @ xor with input + veor $b0,$b0,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a1,$a1,$t0 + vst1.8 {$a0-$b0},[r14]! @ store output + veor $b1,$b1,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c1,$c1,$t2 + vst1.8 {$c0-$d0},[r14]! + veor $d1,$d1,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a2,$a2,$t0 + vld1.32 {$a0-$b0},[@t[3]]! @ load for next iteration + veor $t0#hi,$t0#hi,$t0#hi + vldr $t0#lo,[sp,#4*(16+4)] @ four + veor $b2,$b2,$t1 + vld1.32 {$c0-$d0},[@t[3]] + veor $c2,$c2,$t2 + vst1.8 {$a1-$b1},[r14]! + veor $d2,$d2,$t3 + vst1.8 {$c1-$d1},[r14]! + + vadd.i32 $d0#lo,$d0#lo,$t0#lo @ next counter value + vldr $t0#lo,[sp,#4*(16+0)] @ one + + ldmia sp,{@t[0]-@t[3]} @ load key material + add @x[0],@x[0],@t[0] @ accumulate key material + ldr @t[0],[r12],#16 @ load input + vst1.8 {$a2-$b2},[r14]! + add @x[1],@x[1],@t[1] + ldr @t[1],[r12,#-12] + vst1.8 {$c2-$d2},[r14]! + add @x[2],@x[2],@t[2] + ldr @t[2],[r12,#-8] + add @x[3],@x[3],@t[3] + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif + eor @x[0],@x[0],@t[0] @ xor with input + add @t[0],sp,#4*(4) + eor @x[1],@x[1],@t[1] + str @x[0],[r14],#16 @ store output + eor @x[2],@x[2],@t[2] + str @x[1],[r14,#-12] + eor @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#13 @ accumulate key material + ldr @t[0],[r12],#16 @ load input + add @x[5],@t[1],@x[5],ror#13 + ldr @t[1],[r12,#-12] + add @x[6],@t[2],@x[6],ror#13 + ldr @t[2],[r12,#-8] + add @x[7],@t[3],@x[7],ror#13 + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + eor @x[4],@x[4],@t[0] + add @t[0],sp,#4*(8) + eor @x[5],@x[5],@t[1] + str @x[4],[r14],#16 @ store output + eor @x[6],@x[6],@t[2] + str @x[5],[r14,#-12] + eor @x[7],@x[7],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[6],[r14,#-8] + add @x[0],sp,#4*(16+8) + str @x[7],[r14,#-4] + + ldmia @x[0],{@x[0]-@x[7]} @ load second half + + add @x[0],@x[0],@t[0] @ accumulate key material + ldr @t[0],[r12],#16 @ load input + add @x[1],@x[1],@t[1] + ldr @t[1],[r12,#-12] +# ifdef __thumb2__ + it hi +# endif + strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it + add @x[2],@x[2],@t[2] + ldr @t[2],[r12,#-8] +# ifdef __thumb2__ + it hi +# endif + strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it + add @x[3],@x[3],@t[3] + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif + eor @x[0],@x[0],@t[0] + add @t[0],sp,#4*(12) + eor @x[1],@x[1],@t[1] + str @x[0],[r14],#16 @ store output + eor @x[2],@x[2],@t[2] + str @x[1],[r14,#-12] + eor @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#24 @ accumulate key material + add @t[0],@t[0],#4 @ next counter value + add @x[5],@t[1],@x[5],ror#24 + str @t[0],[sp,#4*(12)] @ save next counter value + ldr @t[0],[r12],#16 @ load input + add @x[6],@t[2],@x[6],ror#24 + add @x[4],@x[4],#3 @ counter+3 + ldr @t[1],[r12,#-12] + add @x[7],@t[3],@x[7],ror#24 + ldr @t[2],[r12,#-8] + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + eor @x[4],@x[4],@t[0] +# ifdef __thumb2__ + it hi +# endif + ldrhi @t[0],[sp,#4*(32+2)] @ re-load len + eor @x[5],@x[5],@t[1] + eor @x[6],@x[6],@t[2] + str @x[4],[r14],#16 @ store output + eor @x[7],@x[7],@t[3] + str @x[5],[r14,#-12] + sub @t[3],@t[0],#64*4 @ len-=64*4 + str @x[6],[r14,#-8] + str @x[7],[r14,#-4] + bhi .Loop_neon_outer + + b .Ldone_neon + +.align 4 +.Lbreak_neon: + @ harmonize NEON and integer-only stack frames: load data + @ from NEON frame, but save to integer-only one; distance + @ between the two is 4*(32+4+16-32)=4*(20). + + str @t[3], [sp,#4*(20+32+2)] @ save len + add @t[3],sp,#4*(32+4) + str r12, [sp,#4*(20+32+1)] @ save inp + str r14, [sp,#4*(20+32+0)] @ save out + + ldr @x[12],[sp,#4*(16+10)] + ldr @x[14],[sp,#4*(16+11)] + vldmia @t[3],{d8-d15} @ fulfill ABI requirement + str @x[12],[sp,#4*(20+16+10)] @ copy "@x[10]" + str @x[14],[sp,#4*(20+16+11)] @ copy "@x[11]" + + ldr @t[3], [sp,#4*(15)] + mov @x[4],@x[4],ror#19 @ twist b[0..3] + ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load + mov @x[5],@x[5],ror#19 + ldr @t[2], [sp,#4*(13)] + mov @x[6],@x[6],ror#19 + ldr @x[14],[sp,#4*(14)] + mov @x[7],@x[7],ror#19 + mov @t[3],@t[3],ror#8 @ twist d[0..3] + mov @x[12],@x[12],ror#8 + mov @t[2],@t[2],ror#8 + mov @x[14],@x[14],ror#8 + str @t[3], [sp,#4*(20+16+15)] + add @t[3],sp,#4*(20) + vst1.32 {$a0-$b0},[@t[3]]! @ copy key + add sp,sp,#4*(20) @ switch frame + vst1.32 {$c0-$d0},[@t[3]] + mov @t[3],#10 + b .Loop @ go integer-only + +.align 4 +.Ltail_neon: + cmp @t[3],#64*3 + bhs .L192_or_more_neon + cmp @t[3],#64*2 + bhs .L128_or_more_neon + cmp @t[3],#64*1 + bhs .L64_or_more_neon + + add @t[0],sp,#4*(8) + vst1.8 {$a0-$b0},[sp] + add @t[2],sp,#4*(0) + vst1.8 {$c0-$d0},[@t[0]] + b .Loop_tail_neon + +.align 4 +.L64_or_more_neon: + vld1.8 {$t0-$t1},[r12]! + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 + veor $b0,$b0,$t1 + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vst1.8 {$a0-$b0},[r14]! + vst1.8 {$c0-$d0},[r14]! + + beq .Ldone_neon + + add @t[0],sp,#4*(8) + vst1.8 {$a1-$b1},[sp] + add @t[2],sp,#4*(0) + vst1.8 {$c1-$d1},[@t[0]] + sub @t[3],@t[3],#64*1 @ len-=64*1 + b .Loop_tail_neon + +.align 4 +.L128_or_more_neon: + vld1.8 {$t0-$t1},[r12]! + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 + veor $b0,$b0,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a1,$a1,$t0 + veor $b1,$b1,$t1 + vst1.8 {$a0-$b0},[r14]! + veor $c1,$c1,$t2 + vst1.8 {$c0-$d0},[r14]! + veor $d1,$d1,$t3 + vst1.8 {$a1-$b1},[r14]! + vst1.8 {$c1-$d1},[r14]! + + beq .Ldone_neon + + add @t[0],sp,#4*(8) + vst1.8 {$a2-$b2},[sp] + add @t[2],sp,#4*(0) + vst1.8 {$c2-$d2},[@t[0]] + sub @t[3],@t[3],#64*2 @ len-=64*2 + b .Loop_tail_neon + +.align 4 +.L192_or_more_neon: + vld1.8 {$t0-$t1},[r12]! + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 + veor $b0,$b0,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a1,$a1,$t0 + veor $b1,$b1,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c1,$c1,$t2 + vst1.8 {$a0-$b0},[r14]! + veor $d1,$d1,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a2,$a2,$t0 + vst1.8 {$c0-$d0},[r14]! + veor $b2,$b2,$t1 + vst1.8 {$a1-$b1},[r14]! + veor $c2,$c2,$t2 + vst1.8 {$c1-$d1},[r14]! + veor $d2,$d2,$t3 + vst1.8 {$a2-$b2},[r14]! + vst1.8 {$c2-$d2},[r14]! + + beq .Ldone_neon + + ldmia sp,{@t[0]-@t[3]} @ load key material + add @x[0],@x[0],@t[0] @ accumulate key material + add @t[0],sp,#4*(4) + add @x[1],@x[1],@t[1] + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + + add @x[4],@t[0],@x[4],ror#13 @ accumulate key material + add @t[0],sp,#4*(8) + add @x[5],@t[1],@x[5],ror#13 + add @x[6],@t[2],@x[6],ror#13 + add @x[7],@t[3],@x[7],ror#13 + ldmia @t[0],{@t[0]-@t[3]} @ load key material +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + stmia sp,{@x[0]-@x[7]} + add @x[0],sp,#4*(16+8) + + ldmia @x[0],{@x[0]-@x[7]} @ load second half + + add @x[0],@x[0],@t[0] @ accumulate key material + add @t[0],sp,#4*(12) + add @x[1],@x[1],@t[1] + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + + add @x[4],@t[0],@x[4],ror#24 @ accumulate key material + add @t[0],sp,#4*(8) + add @x[5],@t[1],@x[5],ror#24 + add @x[4],@x[4],#3 @ counter+3 + add @x[6],@t[2],@x[6],ror#24 + add @x[7],@t[3],@x[7],ror#24 + ldr @t[3],[sp,#4*(32+2)] @ re-load len +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + stmia @t[0],{@x[0]-@x[7]} + add @t[2],sp,#4*(0) + sub @t[3],@t[3],#64*3 @ len-=64*3 + +.Loop_tail_neon: + ldrb @t[0],[@t[2]],#1 @ read buffer on stack + ldrb @t[1],[r12],#1 @ read input + subs @t[3],@t[3],#1 + eor @t[0],@t[0],@t[1] + strb @t[0],[r14],#1 @ store output + bne .Loop_tail_neon + +.Ldone_neon: + add sp,sp,#4*(32+4) + vldmia sp,{d8-d15} + add sp,sp,#4*(16+3) + ldmia sp!,{r4-r11,pc} +.size ChaCha20_neon,.-ChaCha20_neon +# ifndef __KERNEL__ +.comm OPENSSL_armcap_P,4,4 +# endif +#endif +___ +}}} + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/@/ and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo; + + print $_,"\n"; +} +close STDOUT; diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl b/net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl new file mode 100644 index 000000000000..ac14a9924165 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl @@ -0,0 +1,1163 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# June 2015 +# +# ChaCha20 for ARMv8. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc-4.9 3xNEON+1xIALU 6xNEON+2xIALU(*) +# +# Apple A7 5.50/+49% 3.33 1.70 +# Cortex-A53 8.40/+80% 4.72 4.72(**) +# Cortex-A57 8.06/+43% 4.90 4.43(***) +# Denver 4.50/+82% 2.63 2.67(**) +# X-Gene 9.50/+46% 8.82 8.89(**) +# Mongoose 8.00/+44% 3.64 3.25(***) +# Kryo 8.17/+50% 4.83 4.65(***) +# +# (*) since no non-Apple processor exhibits significantly better +# performance, the code path is #ifdef __APPLE__-ed; +# (**) it's expected that doubling interleave factor doesn't help +# all processors, only those with higher NEON latency and +# higher instruction issue rate; +# (***) expected improvement was actually higher; + +$flavour=shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +sub AUTOLOAD() # thunk [simplified] x86-style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; + my $arg = pop; + $arg = "#$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; +} + +my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4)); + +my @x=map("x$_",(5..17,19..21)); +my @d=map("x$_",(22..28,30)); + +sub ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); + + ( + "&add_32 (@x[$a0],@x[$a0],@x[$b0])", + "&add_32 (@x[$a1],@x[$a1],@x[$b1])", + "&add_32 (@x[$a2],@x[$a2],@x[$b2])", + "&add_32 (@x[$a3],@x[$a3],@x[$b3])", + "&eor_32 (@x[$d0],@x[$d0],@x[$a0])", + "&eor_32 (@x[$d1],@x[$d1],@x[$a1])", + "&eor_32 (@x[$d2],@x[$d2],@x[$a2])", + "&eor_32 (@x[$d3],@x[$d3],@x[$a3])", + "&ror_32 (@x[$d0],@x[$d0],16)", + "&ror_32 (@x[$d1],@x[$d1],16)", + "&ror_32 (@x[$d2],@x[$d2],16)", + "&ror_32 (@x[$d3],@x[$d3],16)", + + "&add_32 (@x[$c0],@x[$c0],@x[$d0])", + "&add_32 (@x[$c1],@x[$c1],@x[$d1])", + "&add_32 (@x[$c2],@x[$c2],@x[$d2])", + "&add_32 (@x[$c3],@x[$c3],@x[$d3])", + "&eor_32 (@x[$b0],@x[$b0],@x[$c0])", + "&eor_32 (@x[$b1],@x[$b1],@x[$c1])", + "&eor_32 (@x[$b2],@x[$b2],@x[$c2])", + "&eor_32 (@x[$b3],@x[$b3],@x[$c3])", + "&ror_32 (@x[$b0],@x[$b0],20)", + "&ror_32 (@x[$b1],@x[$b1],20)", + "&ror_32 (@x[$b2],@x[$b2],20)", + "&ror_32 (@x[$b3],@x[$b3],20)", + + "&add_32 (@x[$a0],@x[$a0],@x[$b0])", + "&add_32 (@x[$a1],@x[$a1],@x[$b1])", + "&add_32 (@x[$a2],@x[$a2],@x[$b2])", + "&add_32 (@x[$a3],@x[$a3],@x[$b3])", + "&eor_32 (@x[$d0],@x[$d0],@x[$a0])", + "&eor_32 (@x[$d1],@x[$d1],@x[$a1])", + "&eor_32 (@x[$d2],@x[$d2],@x[$a2])", + "&eor_32 (@x[$d3],@x[$d3],@x[$a3])", + "&ror_32 (@x[$d0],@x[$d0],24)", + "&ror_32 (@x[$d1],@x[$d1],24)", + "&ror_32 (@x[$d2],@x[$d2],24)", + "&ror_32 (@x[$d3],@x[$d3],24)", + + "&add_32 (@x[$c0],@x[$c0],@x[$d0])", + "&add_32 (@x[$c1],@x[$c1],@x[$d1])", + "&add_32 (@x[$c2],@x[$c2],@x[$d2])", + "&add_32 (@x[$c3],@x[$c3],@x[$d3])", + "&eor_32 (@x[$b0],@x[$b0],@x[$c0])", + "&eor_32 (@x[$b1],@x[$b1],@x[$c1])", + "&eor_32 (@x[$b2],@x[$b2],@x[$c2])", + "&eor_32 (@x[$b3],@x[$b3],@x[$c3])", + "&ror_32 (@x[$b0],@x[$b0],25)", + "&ror_32 (@x[$b1],@x[$b1],25)", + "&ror_32 (@x[$b2],@x[$b2],25)", + "&ror_32 (@x[$b3],@x[$b3],25)" + ); +} + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +.extern OPENSSL_armcap_P +#else +# define ChaCha20_ctr32 chacha20_arm +# define ChaCha20_neon chacha20_neon +#endif + +.text + +.align 5 +.Lsigma: +.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral +.Lone: +.long 1,0,0,0 +#ifndef __KERNEL__ +.LOPENSSL_armcap_P: +# ifdef __ILP32__ +.long OPENSSL_armcap_P-. +# else +.quad OPENSSL_armcap_P-. +# endif +#endif + +.globl ChaCha20_ctr32 +.type ChaCha20_ctr32,%function +.align 5 +ChaCha20_ctr32: + cbz $len,.Labort +#ifndef __KERNEL__ + adr @x[0],.LOPENSSL_armcap_P + cmp $len,#192 + b.lo .Lshort +# ifdef __ILP32__ + ldrsw @x[1],[@x[0]] +# else + ldr @x[1],[@x[0]] +# endif + ldr w17,[@x[1],@x[0]] + tst w17,#ARMV7_NEON + b.ne ChaCha20_neon + +.Lshort: +#endif + stp x29,x30,[sp,#-96]! + add x29,sp,#0 + + adr @x[0],.Lsigma + stp x19,x20,[sp,#16] + stp x21,x22,[sp,#32] + stp x23,x24,[sp,#48] + stp x25,x26,[sp,#64] + stp x27,x28,[sp,#80] + sub sp,sp,#64 + + ldp @d[0],@d[1],[@x[0]] // load sigma + ldp @d[2],@d[3],[$key] // load key + ldp @d[4],@d[5],[$key,#16] + ldp @d[6],@d[7],[$ctr] // load counter +#ifdef __AARCH64EB__ + ror @d[2],@d[2],#32 + ror @d[3],@d[3],#32 + ror @d[4],@d[4],#32 + ror @d[5],@d[5],#32 + ror @d[6],@d[6],#32 + ror @d[7],@d[7],#32 +#endif + +.Loop_outer: + mov.32 @x[0],@d[0] // unpack key block + lsr @x[1],@d[0],#32 + mov.32 @x[2],@d[1] + lsr @x[3],@d[1],#32 + mov.32 @x[4],@d[2] + lsr @x[5],@d[2],#32 + mov.32 @x[6],@d[3] + lsr @x[7],@d[3],#32 + mov.32 @x[8],@d[4] + lsr @x[9],@d[4],#32 + mov.32 @x[10],@d[5] + lsr @x[11],@d[5],#32 + mov.32 @x[12],@d[6] + lsr @x[13],@d[6],#32 + mov.32 @x[14],@d[7] + lsr @x[15],@d[7],#32 + + mov $ctr,#10 + subs $len,$len,#64 +.Loop: + sub $ctr,$ctr,#1 +___ + foreach (&ROUND(0, 4, 8,12)) { eval; } + foreach (&ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + cbnz $ctr,.Loop + + add.32 @x[0],@x[0],@d[0] // accumulate key block + add @x[1],@x[1],@d[0],lsr#32 + add.32 @x[2],@x[2],@d[1] + add @x[3],@x[3],@d[1],lsr#32 + add.32 @x[4],@x[4],@d[2] + add @x[5],@x[5],@d[2],lsr#32 + add.32 @x[6],@x[6],@d[3] + add @x[7],@x[7],@d[3],lsr#32 + add.32 @x[8],@x[8],@d[4] + add @x[9],@x[9],@d[4],lsr#32 + add.32 @x[10],@x[10],@d[5] + add @x[11],@x[11],@d[5],lsr#32 + add.32 @x[12],@x[12],@d[6] + add @x[13],@x[13],@d[6],lsr#32 + add.32 @x[14],@x[14],@d[7] + add @x[15],@x[15],@d[7],lsr#32 + + b.lo .Ltail + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor @x[10],@x[10],@x[11] + eor @x[12],@x[12],@x[13] + eor @x[14],@x[14],@x[15] + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#1 // increment counter + stp @x[4],@x[6],[$out,#16] + stp @x[8],@x[10],[$out,#32] + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + + b.hi .Loop_outer + + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 +.Labort: + ret + +.align 4 +.Ltail: + add $len,$len,#64 +.Less_than_64: + sub $out,$out,#1 + add $inp,$inp,$len + add $out,$out,$len + add $ctr,sp,$len + neg $len,$len + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + stp @x[0],@x[2],[sp,#0] + stp @x[4],@x[6],[sp,#16] + stp @x[8],@x[10],[sp,#32] + stp @x[12],@x[14],[sp,#48] + +.Loop_tail: + ldrb w10,[$inp,$len] + ldrb w11,[$ctr,$len] + add $len,$len,#1 + eor w10,w10,w11 + strb w10,[$out,$len] + cbnz $len,.Loop_tail + + stp xzr,xzr,[sp,#0] + stp xzr,xzr,[sp,#16] + stp xzr,xzr,[sp,#32] + stp xzr,xzr,[sp,#48] + + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret +.size ChaCha20_ctr32,.-ChaCha20_ctr32 +___ + +{{{ +my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) = + map("v$_.4s",(0..7,16..23)); +my (@K)=map("v$_.4s",(24..30)); +my $ONE="v31.4s"; + +sub NEONROUND { +my $odd = pop; +my ($a,$b,$c,$d,$t)=@_; + + ( + "&add ('$a','$a','$b')", + "&eor ('$d','$d','$a')", + "&rev32_16 ('$d','$d')", # vrot ($d,16) + + "&add ('$c','$c','$d')", + "&eor ('$t','$b','$c')", + "&ushr ('$b','$t',20)", + "&sli ('$b','$t',12)", + + "&add ('$a','$a','$b')", + "&eor ('$t','$d','$a')", + "&ushr ('$d','$t',24)", + "&sli ('$d','$t',8)", + + "&add ('$c','$c','$d')", + "&eor ('$t','$b','$c')", + "&ushr ('$b','$t',25)", + "&sli ('$b','$t',7)", + + "&ext ('$a','$a','$a',$odd?4:12)", + "&ext ('$d','$d','$d',8)", + "&ext ('$c','$c','$c',$odd?12:4)" + ); +} + +$code.=<<___; +#if !defined(__KERNEL__) || defined(CONFIG_KERNEL_MODE_NEON) +#ifdef __KERNEL__ +.globl ChaCha20_neon +.type ChaCha20_neon,%function +#endif +.type ChaCha20_neon,%function +.align 5 +ChaCha20_neon: + stp x29,x30,[sp,#-96]! + add x29,sp,#0 + + adr @x[0],.Lsigma + stp x19,x20,[sp,#16] + stp x21,x22,[sp,#32] + stp x23,x24,[sp,#48] + stp x25,x26,[sp,#64] + stp x27,x28,[sp,#80] +#ifdef __APPLE__ + cmp $len,#512 + b.hs .L512_or_more_neon +#endif + + sub sp,sp,#64 + + ldp @d[0],@d[1],[@x[0]] // load sigma + ld1 {@K[0]},[@x[0]],#16 + ldp @d[2],@d[3],[$key] // load key + ldp @d[4],@d[5],[$key,#16] + ld1 {@K[1],@K[2]},[$key] + ldp @d[6],@d[7],[$ctr] // load counter + ld1 {@K[3]},[$ctr] + ld1 {$ONE},[@x[0]] +#ifdef __AARCH64EB__ + rev64 @K[0],@K[0] + ror @d[2],@d[2],#32 + ror @d[3],@d[3],#32 + ror @d[4],@d[4],#32 + ror @d[5],@d[5],#32 + ror @d[6],@d[6],#32 + ror @d[7],@d[7],#32 +#endif + add @K[3],@K[3],$ONE // += 1 + add @K[4],@K[3],$ONE + add @K[5],@K[4],$ONE + shl $ONE,$ONE,#2 // 1 -> 4 + +.Loop_outer_neon: + mov.32 @x[0],@d[0] // unpack key block + lsr @x[1],@d[0],#32 + mov $A0,@K[0] + mov.32 @x[2],@d[1] + lsr @x[3],@d[1],#32 + mov $A1,@K[0] + mov.32 @x[4],@d[2] + lsr @x[5],@d[2],#32 + mov $A2,@K[0] + mov.32 @x[6],@d[3] + mov $B0,@K[1] + lsr @x[7],@d[3],#32 + mov $B1,@K[1] + mov.32 @x[8],@d[4] + mov $B2,@K[1] + lsr @x[9],@d[4],#32 + mov $D0,@K[3] + mov.32 @x[10],@d[5] + mov $D1,@K[4] + lsr @x[11],@d[5],#32 + mov $D2,@K[5] + mov.32 @x[12],@d[6] + mov $C0,@K[2] + lsr @x[13],@d[6],#32 + mov $C1,@K[2] + mov.32 @x[14],@d[7] + mov $C2,@K[2] + lsr @x[15],@d[7],#32 + + mov $ctr,#10 + subs $len,$len,#256 +.Loop_neon: + sub $ctr,$ctr,#1 +___ + my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0); + my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0); + my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0); + my @thread3=&ROUND(0,4,8,12); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } + + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1); + @thread3=&ROUND(0,5,10,15); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } +$code.=<<___; + cbnz $ctr,.Loop_neon + + add.32 @x[0],@x[0],@d[0] // accumulate key block + add $A0,$A0,@K[0] + add @x[1],@x[1],@d[0],lsr#32 + add $A1,$A1,@K[0] + add.32 @x[2],@x[2],@d[1] + add $A2,$A2,@K[0] + add @x[3],@x[3],@d[1],lsr#32 + add $C0,$C0,@K[2] + add.32 @x[4],@x[4],@d[2] + add $C1,$C1,@K[2] + add @x[5],@x[5],@d[2],lsr#32 + add $C2,$C2,@K[2] + add.32 @x[6],@x[6],@d[3] + add $D0,$D0,@K[3] + add @x[7],@x[7],@d[3],lsr#32 + add.32 @x[8],@x[8],@d[4] + add $D1,$D1,@K[4] + add @x[9],@x[9],@d[4],lsr#32 + add.32 @x[10],@x[10],@d[5] + add $D2,$D2,@K[5] + add @x[11],@x[11],@d[5],lsr#32 + add.32 @x[12],@x[12],@d[6] + add $B0,$B0,@K[1] + add @x[13],@x[13],@d[6],lsr#32 + add.32 @x[14],@x[14],@d[7] + add $B1,$B1,@K[1] + add @x[15],@x[15],@d[7],lsr#32 + add $B2,$B2,@K[1] + + b.lo .Ltail_neon + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + ld1.8 {$T0-$T3},[$inp],#64 + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor $A0,$A0,$T0 + eor @x[10],@x[10],@x[11] + eor $B0,$B0,$T1 + eor @x[12],@x[12],@x[13] + eor $C0,$C0,$T2 + eor @x[14],@x[14],@x[15] + eor $D0,$D0,$T3 + ld1.8 {$T0-$T3},[$inp],#64 + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#4 // increment counter + stp @x[4],@x[6],[$out,#16] + add @K[3],@K[3],$ONE // += 4 + stp @x[8],@x[10],[$out,#32] + add @K[4],@K[4],$ONE + stp @x[12],@x[14],[$out,#48] + add @K[5],@K[5],$ONE + add $out,$out,#64 + + st1.8 {$A0-$D0},[$out],#64 + ld1.8 {$A0-$D0},[$inp],#64 + + eor $A1,$A1,$T0 + eor $B1,$B1,$T1 + eor $C1,$C1,$T2 + eor $D1,$D1,$T3 + st1.8 {$A1-$D1},[$out],#64 + + eor $A2,$A2,$A0 + eor $B2,$B2,$B0 + eor $C2,$C2,$C0 + eor $D2,$D2,$D0 + st1.8 {$A2-$D2},[$out],#64 + + b.hi .Loop_outer_neon + + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret + +.Ltail_neon: + add $len,$len,#256 + cmp $len,#64 + b.lo .Less_than_64 + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor @x[10],@x[10],@x[11] + eor @x[12],@x[12],@x[13] + eor @x[14],@x[14],@x[15] + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#4 // increment counter + stp @x[4],@x[6],[$out,#16] + stp @x[8],@x[10],[$out,#32] + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + b.eq .Ldone_neon + sub $len,$len,#64 + cmp $len,#64 + b.lo .Less_than_128 + + ld1.8 {$T0-$T3},[$inp],#64 + eor $A0,$A0,$T0 + eor $B0,$B0,$T1 + eor $C0,$C0,$T2 + eor $D0,$D0,$T3 + st1.8 {$A0-$D0},[$out],#64 + b.eq .Ldone_neon + sub $len,$len,#64 + cmp $len,#64 + b.lo .Less_than_192 + + ld1.8 {$T0-$T3},[$inp],#64 + eor $A1,$A1,$T0 + eor $B1,$B1,$T1 + eor $C1,$C1,$T2 + eor $D1,$D1,$T3 + st1.8 {$A1-$D1},[$out],#64 + b.eq .Ldone_neon + sub $len,$len,#64 + + st1.8 {$A2-$D2},[sp] + b .Last_neon + +.Less_than_128: + st1.8 {$A0-$D0},[sp] + b .Last_neon +.Less_than_192: + st1.8 {$A1-$D1},[sp] + b .Last_neon + +.align 4 +.Last_neon: + sub $out,$out,#1 + add $inp,$inp,$len + add $out,$out,$len + add $ctr,sp,$len + neg $len,$len + +.Loop_tail_neon: + ldrb w10,[$inp,$len] + ldrb w11,[$ctr,$len] + add $len,$len,#1 + eor w10,w10,w11 + strb w10,[$out,$len] + cbnz $len,.Loop_tail_neon + + stp xzr,xzr,[sp,#0] + stp xzr,xzr,[sp,#16] + stp xzr,xzr,[sp,#32] + stp xzr,xzr,[sp,#48] + +.Ldone_neon: + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret +.size ChaCha20_neon,.-ChaCha20_neon +___ +{ +my ($T0,$T1,$T2,$T3,$T4,$T5)=@K; +my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2, + $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23)); + +$code.=<<___; +#ifdef __APPLE__ +.type ChaCha20_512_neon,%function +.align 5 +ChaCha20_512_neon: + stp x29,x30,[sp,#-96]! + add x29,sp,#0 + + adr @x[0],.Lsigma + stp x19,x20,[sp,#16] + stp x21,x22,[sp,#32] + stp x23,x24,[sp,#48] + stp x25,x26,[sp,#64] + stp x27,x28,[sp,#80] + +.L512_or_more_neon: + sub sp,sp,#128+64 + + ldp @d[0],@d[1],[@x[0]] // load sigma + ld1 {@K[0]},[@x[0]],#16 + ldp @d[2],@d[3],[$key] // load key + ldp @d[4],@d[5],[$key,#16] + ld1 {@K[1],@K[2]},[$key] + ldp @d[6],@d[7],[$ctr] // load counter + ld1 {@K[3]},[$ctr] + ld1 {$ONE},[@x[0]] +# ifdef __AARCH64EB__ + rev64 @K[0],@K[0] + ror @d[2],@d[2],#32 + ror @d[3],@d[3],#32 + ror @d[4],@d[4],#32 + ror @d[5],@d[5],#32 + ror @d[6],@d[6],#32 + ror @d[7],@d[7],#32 +# endif + add @K[3],@K[3],$ONE // += 1 + stp @K[0],@K[1],[sp,#0] // off-load key block, invariant part + add @K[3],@K[3],$ONE // not typo + str @K[2],[sp,#32] + add @K[4],@K[3],$ONE + add @K[5],@K[4],$ONE + add @K[6],@K[5],$ONE + shl $ONE,$ONE,#2 // 1 -> 4 + + stp d8,d9,[sp,#128+0] // meet ABI requirements + stp d10,d11,[sp,#128+16] + stp d12,d13,[sp,#128+32] + stp d14,d15,[sp,#128+48] + + sub $len,$len,#512 // not typo + +.Loop_outer_512_neon: + mov $A0,@K[0] + mov $A1,@K[0] + mov $A2,@K[0] + mov $A3,@K[0] + mov $A4,@K[0] + mov $A5,@K[0] + mov $B0,@K[1] + mov.32 @x[0],@d[0] // unpack key block + mov $B1,@K[1] + lsr @x[1],@d[0],#32 + mov $B2,@K[1] + mov.32 @x[2],@d[1] + mov $B3,@K[1] + lsr @x[3],@d[1],#32 + mov $B4,@K[1] + mov.32 @x[4],@d[2] + mov $B5,@K[1] + lsr @x[5],@d[2],#32 + mov $D0,@K[3] + mov.32 @x[6],@d[3] + mov $D1,@K[4] + lsr @x[7],@d[3],#32 + mov $D2,@K[5] + mov.32 @x[8],@d[4] + mov $D3,@K[6] + lsr @x[9],@d[4],#32 + mov $C0,@K[2] + mov.32 @x[10],@d[5] + mov $C1,@K[2] + lsr @x[11],@d[5],#32 + add $D4,$D0,$ONE // +4 + mov.32 @x[12],@d[6] + add $D5,$D1,$ONE // +4 + lsr @x[13],@d[6],#32 + mov $C2,@K[2] + mov.32 @x[14],@d[7] + mov $C3,@K[2] + lsr @x[15],@d[7],#32 + mov $C4,@K[2] + stp @K[3],@K[4],[sp,#48] // off-load key block, variable part + mov $C5,@K[2] + str @K[5],[sp,#80] + + mov $ctr,#5 + subs $len,$len,#512 +.Loop_upper_neon: + sub $ctr,$ctr,#1 +___ + my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0); + my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0); + my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0); + my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0); + my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0); + my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0); + my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + my $diff = ($#thread0+1)*6 - $#thread67 - 1; + my $i = 0; + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } + + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1); + @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1); + @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1); + @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1); + @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } +$code.=<<___; + cbnz $ctr,.Loop_upper_neon + + add.32 @x[0],@x[0],@d[0] // accumulate key block + add @x[1],@x[1],@d[0],lsr#32 + add.32 @x[2],@x[2],@d[1] + add @x[3],@x[3],@d[1],lsr#32 + add.32 @x[4],@x[4],@d[2] + add @x[5],@x[5],@d[2],lsr#32 + add.32 @x[6],@x[6],@d[3] + add @x[7],@x[7],@d[3],lsr#32 + add.32 @x[8],@x[8],@d[4] + add @x[9],@x[9],@d[4],lsr#32 + add.32 @x[10],@x[10],@d[5] + add @x[11],@x[11],@d[5],lsr#32 + add.32 @x[12],@x[12],@d[6] + add @x[13],@x[13],@d[6],lsr#32 + add.32 @x[14],@x[14],@d[7] + add @x[15],@x[15],@d[7],lsr#32 + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +# ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +# endif + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor @x[10],@x[10],@x[11] + eor @x[12],@x[12],@x[13] + eor @x[14],@x[14],@x[15] + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#1 // increment counter + mov.32 @x[0],@d[0] // unpack key block + lsr @x[1],@d[0],#32 + stp @x[4],@x[6],[$out,#16] + mov.32 @x[2],@d[1] + lsr @x[3],@d[1],#32 + stp @x[8],@x[10],[$out,#32] + mov.32 @x[4],@d[2] + lsr @x[5],@d[2],#32 + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + mov.32 @x[6],@d[3] + lsr @x[7],@d[3],#32 + mov.32 @x[8],@d[4] + lsr @x[9],@d[4],#32 + mov.32 @x[10],@d[5] + lsr @x[11],@d[5],#32 + mov.32 @x[12],@d[6] + lsr @x[13],@d[6],#32 + mov.32 @x[14],@d[7] + lsr @x[15],@d[7],#32 + + mov $ctr,#5 +.Loop_lower_neon: + sub $ctr,$ctr,#1 +___ + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0); + @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0); + @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0); + @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0); + @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } + + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1); + @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1); + @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1); + @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1); + @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } +$code.=<<___; + cbnz $ctr,.Loop_lower_neon + + add.32 @x[0],@x[0],@d[0] // accumulate key block + ldp @K[0],@K[1],[sp,#0] + add @x[1],@x[1],@d[0],lsr#32 + ldp @K[2],@K[3],[sp,#32] + add.32 @x[2],@x[2],@d[1] + ldp @K[4],@K[5],[sp,#64] + add @x[3],@x[3],@d[1],lsr#32 + add $A0,$A0,@K[0] + add.32 @x[4],@x[4],@d[2] + add $A1,$A1,@K[0] + add @x[5],@x[5],@d[2],lsr#32 + add $A2,$A2,@K[0] + add.32 @x[6],@x[6],@d[3] + add $A3,$A3,@K[0] + add @x[7],@x[7],@d[3],lsr#32 + add $A4,$A4,@K[0] + add.32 @x[8],@x[8],@d[4] + add $A5,$A5,@K[0] + add @x[9],@x[9],@d[4],lsr#32 + add $C0,$C0,@K[2] + add.32 @x[10],@x[10],@d[5] + add $C1,$C1,@K[2] + add @x[11],@x[11],@d[5],lsr#32 + add $C2,$C2,@K[2] + add.32 @x[12],@x[12],@d[6] + add $C3,$C3,@K[2] + add @x[13],@x[13],@d[6],lsr#32 + add $C4,$C4,@K[2] + add.32 @x[14],@x[14],@d[7] + add $C5,$C5,@K[2] + add @x[15],@x[15],@d[7],lsr#32 + add $D4,$D4,$ONE // +4 + add @x[0],@x[0],@x[1],lsl#32 // pack + add $D5,$D5,$ONE // +4 + add @x[2],@x[2],@x[3],lsl#32 + add $D0,$D0,@K[3] + ldp @x[1],@x[3],[$inp,#0] // load input + add $D1,$D1,@K[4] + add @x[4],@x[4],@x[5],lsl#32 + add $D2,$D2,@K[5] + add @x[6],@x[6],@x[7],lsl#32 + add $D3,$D3,@K[6] + ldp @x[5],@x[7],[$inp,#16] + add $D4,$D4,@K[3] + add @x[8],@x[8],@x[9],lsl#32 + add $D5,$D5,@K[4] + add @x[10],@x[10],@x[11],lsl#32 + add $B0,$B0,@K[1] + ldp @x[9],@x[11],[$inp,#32] + add $B1,$B1,@K[1] + add @x[12],@x[12],@x[13],lsl#32 + add $B2,$B2,@K[1] + add @x[14],@x[14],@x[15],lsl#32 + add $B3,$B3,@K[1] + ldp @x[13],@x[15],[$inp,#48] + add $B4,$B4,@K[1] + add $inp,$inp,#64 + add $B5,$B5,@K[1] + +# ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +# endif + ld1.8 {$T0-$T3},[$inp],#64 + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor $A0,$A0,$T0 + eor @x[10],@x[10],@x[11] + eor $B0,$B0,$T1 + eor @x[12],@x[12],@x[13] + eor $C0,$C0,$T2 + eor @x[14],@x[14],@x[15] + eor $D0,$D0,$T3 + ld1.8 {$T0-$T3},[$inp],#64 + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#7 // increment counter + stp @x[4],@x[6],[$out,#16] + stp @x[8],@x[10],[$out,#32] + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + st1.8 {$A0-$D0},[$out],#64 + + ld1.8 {$A0-$D0},[$inp],#64 + eor $A1,$A1,$T0 + eor $B1,$B1,$T1 + eor $C1,$C1,$T2 + eor $D1,$D1,$T3 + st1.8 {$A1-$D1},[$out],#64 + + ld1.8 {$A1-$D1},[$inp],#64 + eor $A2,$A2,$A0 + ldp @K[0],@K[1],[sp,#0] + eor $B2,$B2,$B0 + ldp @K[2],@K[3],[sp,#32] + eor $C2,$C2,$C0 + eor $D2,$D2,$D0 + st1.8 {$A2-$D2},[$out],#64 + + ld1.8 {$A2-$D2},[$inp],#64 + eor $A3,$A3,$A1 + eor $B3,$B3,$B1 + eor $C3,$C3,$C1 + eor $D3,$D3,$D1 + st1.8 {$A3-$D3},[$out],#64 + + ld1.8 {$A3-$D3},[$inp],#64 + eor $A4,$A4,$A2 + eor $B4,$B4,$B2 + eor $C4,$C4,$C2 + eor $D4,$D4,$D2 + st1.8 {$A4-$D4},[$out],#64 + + shl $A0,$ONE,#1 // 4 -> 8 + eor $A5,$A5,$A3 + eor $B5,$B5,$B3 + eor $C5,$C5,$C3 + eor $D5,$D5,$D3 + st1.8 {$A5-$D5},[$out],#64 + + add @K[3],@K[3],$A0 // += 8 + add @K[4],@K[4],$A0 + add @K[5],@K[5],$A0 + add @K[6],@K[6],$A0 + + b.hs .Loop_outer_512_neon + + adds $len,$len,#512 + ushr $A0,$ONE,#2 // 4 -> 1 + + ldp d8,d9,[sp,#128+0] // meet ABI requirements + ldp d10,d11,[sp,#128+16] + ldp d12,d13,[sp,#128+32] + ldp d14,d15,[sp,#128+48] + + stp @K[0],$ONE,[sp,#0] // wipe off-load area + stp @K[0],$ONE,[sp,#32] + stp @K[0],$ONE,[sp,#64] + + b.eq .Ldone_512_neon + + cmp $len,#192 + sub @K[3],@K[3],$A0 // -= 1 + sub @K[4],@K[4],$A0 + sub @K[5],@K[5],$A0 + add sp,sp,#128 + b.hs .Loop_outer_neon + + eor @K[1],@K[1],@K[1] + eor @K[2],@K[2],@K[2] + eor @K[3],@K[3],@K[3] + eor @K[4],@K[4],@K[4] + eor @K[5],@K[5],@K[5] + eor @K[6],@K[6],@K[6] + b .Loop_outer + +.Ldone_512_neon: + ldp x19,x20,[x29,#16] + add sp,sp,#128+64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret +.size ChaCha20_512_neon,.-ChaCha20_512_neon +#endif +#endif +___ +} +}}} + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + (s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1)) or + (m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1)) or + (s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1)) or + (m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1)) or + (s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1)); + + print $_,"\n"; +} +close STDOUT; # flush diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c b/net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c new file mode 100644 index 000000000000..96ce01e2c133 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +asmlinkage void chacha20_mips(u32 state[16], u8 *out, const u8 *in, + const size_t len); +static bool *const chacha20_nobs[] __initconst = { }; +static void __init chacha20_fpu_init(void) +{ +} + +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + chacha20_mips(ctx->state, dst, src, len); + return true; +} + +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + return false; +} diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-mips.S b/net/wireguard/crypto/zinc/chacha20/chacha20-mips.S new file mode 100644 index 000000000000..a81e02db95e7 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-mips.S @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2016-2018 René van Dorst . All Rights Reserved. + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#define MASK_U32 0x3c +#define CHACHA20_BLOCK_SIZE 64 +#define STACK_SIZE 32 + +#define X0 $t0 +#define X1 $t1 +#define X2 $t2 +#define X3 $t3 +#define X4 $t4 +#define X5 $t5 +#define X6 $t6 +#define X7 $t7 +#define X8 $t8 +#define X9 $t9 +#define X10 $v1 +#define X11 $s6 +#define X12 $s5 +#define X13 $s4 +#define X14 $s3 +#define X15 $s2 +/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */ +#define T0 $s1 +#define T1 $s0 +#define T(n) T ## n +#define X(n) X ## n + +/* Input arguments */ +#define STATE $a0 +#define OUT $a1 +#define IN $a2 +#define BYTES $a3 + +/* Output argument */ +/* NONCE[0] is kept in a register and not in memory. + * We don't want to touch original value in memory. + * Must be incremented every loop iteration. + */ +#define NONCE_0 $v0 + +/* SAVED_X and SAVED_CA are set in the jump table. + * Use regs which are overwritten on exit else we don't leak clear data. + * They are used to handling the last bytes which are not multiple of 4. + */ +#define SAVED_X X15 +#define SAVED_CA $s7 + +#define IS_UNALIGNED $s7 + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define MSB 0 +#define LSB 3 +#define ROTx rotl +#define ROTR(n) rotr n, 24 +#define CPU_TO_LE32(n) \ + wsbh n; \ + rotr n, 16; +#else +#define MSB 3 +#define LSB 0 +#define ROTx rotr +#define CPU_TO_LE32(n) +#define ROTR(n) +#endif + +#define FOR_EACH_WORD(x) \ + x( 0); \ + x( 1); \ + x( 2); \ + x( 3); \ + x( 4); \ + x( 5); \ + x( 6); \ + x( 7); \ + x( 8); \ + x( 9); \ + x(10); \ + x(11); \ + x(12); \ + x(13); \ + x(14); \ + x(15); + +#define FOR_EACH_WORD_REV(x) \ + x(15); \ + x(14); \ + x(13); \ + x(12); \ + x(11); \ + x(10); \ + x( 9); \ + x( 8); \ + x( 7); \ + x( 6); \ + x( 5); \ + x( 4); \ + x( 3); \ + x( 2); \ + x( 1); \ + x( 0); + +#define PLUS_ONE_0 1 +#define PLUS_ONE_1 2 +#define PLUS_ONE_2 3 +#define PLUS_ONE_3 4 +#define PLUS_ONE_4 5 +#define PLUS_ONE_5 6 +#define PLUS_ONE_6 7 +#define PLUS_ONE_7 8 +#define PLUS_ONE_8 9 +#define PLUS_ONE_9 10 +#define PLUS_ONE_10 11 +#define PLUS_ONE_11 12 +#define PLUS_ONE_12 13 +#define PLUS_ONE_13 14 +#define PLUS_ONE_14 15 +#define PLUS_ONE_15 16 +#define PLUS_ONE(x) PLUS_ONE_ ## x +#define _CONCAT3(a,b,c) a ## b ## c +#define CONCAT3(a,b,c) _CONCAT3(a,b,c) + +#define STORE_UNALIGNED(x) \ +CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \ + .if (x != 12); \ + lw T0, (x*4)(STATE); \ + .endif; \ + lwl T1, (x*4)+MSB ## (IN); \ + lwr T1, (x*4)+LSB ## (IN); \ + .if (x == 12); \ + addu X ## x, NONCE_0; \ + .else; \ + addu X ## x, T0; \ + .endif; \ + CPU_TO_LE32(X ## x); \ + xor X ## x, T1; \ + swl X ## x, (x*4)+MSB ## (OUT); \ + swr X ## x, (x*4)+LSB ## (OUT); + +#define STORE_ALIGNED(x) \ +CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ + .if (x != 12); \ + lw T0, (x*4)(STATE); \ + .endif; \ + lw T1, (x*4) ## (IN); \ + .if (x == 12); \ + addu X ## x, NONCE_0; \ + .else; \ + addu X ## x, T0; \ + .endif; \ + CPU_TO_LE32(X ## x); \ + xor X ## x, T1; \ + sw X ## x, (x*4) ## (OUT); + +/* Jump table macro. + * Used for setup and handling the last bytes, which are not multiple of 4. + * X15 is free to store Xn + * Every jumptable entry must be equal in size. + */ +#define JMPTBL_ALIGNED(x) \ +.Lchacha20_mips_jmptbl_aligned_ ## x: ; \ + .set noreorder; \ + b .Lchacha20_mips_xor_aligned_ ## x ## _b; \ + .if (x == 12); \ + addu SAVED_X, X ## x, NONCE_0; \ + .else; \ + addu SAVED_X, X ## x, SAVED_CA; \ + .endif; \ + .set reorder + +#define JMPTBL_UNALIGNED(x) \ +.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \ + .set noreorder; \ + b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \ + .if (x == 12); \ + addu SAVED_X, X ## x, NONCE_0; \ + .else; \ + addu SAVED_X, X ## x, SAVED_CA; \ + .endif; \ + .set reorder + +#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \ + addu X(A), X(K); \ + addu X(B), X(L); \ + addu X(C), X(M); \ + addu X(D), X(N); \ + xor X(V), X(A); \ + xor X(W), X(B); \ + xor X(Y), X(C); \ + xor X(Z), X(D); \ + rotl X(V), S; \ + rotl X(W), S; \ + rotl X(Y), S; \ + rotl X(Z), S; + +.text +.set reorder +.set noat +.globl chacha20_mips +.ent chacha20_mips +chacha20_mips: + .frame $sp, STACK_SIZE, $ra + + addiu $sp, -STACK_SIZE + + /* Return bytes = 0. */ + beqz BYTES, .Lchacha20_mips_end + + lw NONCE_0, 48(STATE) + + /* Save s0-s7 */ + sw $s0, 0($sp) + sw $s1, 4($sp) + sw $s2, 8($sp) + sw $s3, 12($sp) + sw $s4, 16($sp) + sw $s5, 20($sp) + sw $s6, 24($sp) + sw $s7, 28($sp) + + /* Test IN or OUT is unaligned. + * IS_UNALIGNED = ( IN | OUT ) & 0x00000003 + */ + or IS_UNALIGNED, IN, OUT + andi IS_UNALIGNED, 0x3 + + /* Set number of rounds */ + li $at, 20 + + b .Lchacha20_rounds_start + +.align 4 +.Loop_chacha20_rounds: + addiu IN, CHACHA20_BLOCK_SIZE + addiu OUT, CHACHA20_BLOCK_SIZE + addiu NONCE_0, 1 + +.Lchacha20_rounds_start: + lw X0, 0(STATE) + lw X1, 4(STATE) + lw X2, 8(STATE) + lw X3, 12(STATE) + + lw X4, 16(STATE) + lw X5, 20(STATE) + lw X6, 24(STATE) + lw X7, 28(STATE) + lw X8, 32(STATE) + lw X9, 36(STATE) + lw X10, 40(STATE) + lw X11, 44(STATE) + + move X12, NONCE_0 + lw X13, 52(STATE) + lw X14, 56(STATE) + lw X15, 60(STATE) + +.Loop_chacha20_xor_rounds: + addiu $at, -2 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8); + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7); + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16); + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); + bnez $at, .Loop_chacha20_xor_rounds + + addiu BYTES, -(CHACHA20_BLOCK_SIZE) + + /* Is data src/dst unaligned? Jump */ + bnez IS_UNALIGNED, .Loop_chacha20_unaligned + + /* Set number rounds here to fill delayslot. */ + li $at, 20 + + /* BYTES < 0, it has no full block. */ + bltz BYTES, .Lchacha20_mips_no_full_block_aligned + + FOR_EACH_WORD_REV(STORE_ALIGNED) + + /* BYTES > 0? Loop again. */ + bgtz BYTES, .Loop_chacha20_rounds + + /* Place this here to fill delay slot */ + addiu NONCE_0, 1 + + /* BYTES < 0? Handle last bytes */ + bltz BYTES, .Lchacha20_mips_xor_bytes + +.Lchacha20_mips_xor_done: + /* Restore used registers */ + lw $s0, 0($sp) + lw $s1, 4($sp) + lw $s2, 8($sp) + lw $s3, 12($sp) + lw $s4, 16($sp) + lw $s5, 20($sp) + lw $s6, 24($sp) + lw $s7, 28($sp) + + /* Write NONCE_0 back to right location in state */ + sw NONCE_0, 48(STATE) + +.Lchacha20_mips_end: + addiu $sp, STACK_SIZE + jr $ra + +.Lchacha20_mips_no_full_block_aligned: + /* Restore the offset on BYTES */ + addiu BYTES, CHACHA20_BLOCK_SIZE + + /* Get number of full WORDS */ + andi $at, BYTES, MASK_U32 + + /* Load upper half of jump table addr */ + lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0) + + /* Calculate lower half jump table offset */ + ins T0, $at, 1, 6 + + /* Add offset to STATE */ + addu T1, STATE, $at + + /* Add lower half jump table addr */ + addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0) + + /* Read value from STATE */ + lw SAVED_CA, 0(T1) + + /* Store remaining bytecounter as negative value */ + subu BYTES, $at, BYTES + + jr T0 + + /* Jump table */ + FOR_EACH_WORD(JMPTBL_ALIGNED) + + +.Loop_chacha20_unaligned: + /* Set number rounds here to fill delayslot. */ + li $at, 20 + + /* BYTES > 0, it has no full block. */ + bltz BYTES, .Lchacha20_mips_no_full_block_unaligned + + FOR_EACH_WORD_REV(STORE_UNALIGNED) + + /* BYTES > 0? Loop again. */ + bgtz BYTES, .Loop_chacha20_rounds + + /* Write NONCE_0 back to right location in state */ + sw NONCE_0, 48(STATE) + + .set noreorder + /* Fall through to byte handling */ + bgez BYTES, .Lchacha20_mips_xor_done +.Lchacha20_mips_xor_unaligned_0_b: +.Lchacha20_mips_xor_aligned_0_b: + /* Place this here to fill delay slot */ + addiu NONCE_0, 1 + .set reorder + +.Lchacha20_mips_xor_bytes: + addu IN, $at + addu OUT, $at + /* First byte */ + lbu T1, 0(IN) + addiu $at, BYTES, 1 + CPU_TO_LE32(SAVED_X) + ROTR(SAVED_X) + xor T1, SAVED_X + sb T1, 0(OUT) + beqz $at, .Lchacha20_mips_xor_done + /* Second byte */ + lbu T1, 1(IN) + addiu $at, BYTES, 2 + ROTx SAVED_X, 8 + xor T1, SAVED_X + sb T1, 1(OUT) + beqz $at, .Lchacha20_mips_xor_done + /* Third byte */ + lbu T1, 2(IN) + ROTx SAVED_X, 8 + xor T1, SAVED_X + sb T1, 2(OUT) + b .Lchacha20_mips_xor_done + +.Lchacha20_mips_no_full_block_unaligned: + /* Restore the offset on BYTES */ + addiu BYTES, CHACHA20_BLOCK_SIZE + + /* Get number of full WORDS */ + andi $at, BYTES, MASK_U32 + + /* Load upper half of jump table addr */ + lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0) + + /* Calculate lower half jump table offset */ + ins T0, $at, 1, 6 + + /* Add offset to STATE */ + addu T1, STATE, $at + + /* Add lower half jump table addr */ + addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0) + + /* Read value from STATE */ + lw SAVED_CA, 0(T1) + + /* Store remaining bytecounter as negative value */ + subu BYTES, $at, BYTES + + jr T0 + + /* Jump table */ + FOR_EACH_WORD(JMPTBL_UNALIGNED) +.end chacha20_mips +.set at diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S b/net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S new file mode 100644 index 000000000000..8fb4bc2e7b5b --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Google, Inc. + */ + +#include +#include + +/* + * Design notes: + * + * 16 registers would be needed to hold the state matrix, but only 14 are + * available because 'sp' and 'pc' cannot be used. So we spill the elements + * (x8, x9) to the stack and swap them out with (x10, x11). This adds one + * 'ldrd' and one 'strd' instruction per round. + * + * All rotates are performed using the implicit rotate operand accepted by the + * 'add' and 'eor' instructions. This is faster than using explicit rotate + * instructions. To make this work, we allow the values in the second and last + * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the + * wrong rotation amount. The rotation amount is then fixed up just in time + * when the values are used. 'brot' is the number of bits the values in row 'b' + * need to be rotated right to arrive at the correct values, and 'drot' + * similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such + * that they end up as (25, 24) after every round. + */ + + // ChaCha state registers + X0 .req r0 + X1 .req r1 + X2 .req r2 + X3 .req r3 + X4 .req r4 + X5 .req r5 + X6 .req r6 + X7 .req r7 + X8_X10 .req r8 // shared by x8 and x10 + X9_X11 .req r9 // shared by x9 and x11 + X12 .req r10 + X13 .req r11 + X14 .req r12 + X15 .req r14 + +.Lexpand_32byte_k: + // "expand 32-byte k" + .word 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 + +#ifdef __thumb2__ +# define adrl adr +#endif + +.macro __rev out, in, t0, t1, t2 +.if __LINUX_ARM_ARCH__ >= 6 + rev \out, \in +.else + lsl \t0, \in, #24 + and \t1, \in, #0xff00 + and \t2, \in, #0xff0000 + orr \out, \t0, \in, lsr #24 + orr \out, \out, \t1, lsl #8 + orr \out, \out, \t2, lsr #8 +.endif +.endm + +.macro _le32_bswap x, t0, t1, t2 +#ifdef __ARMEB__ + __rev \x, \x, \t0, \t1, \t2 +#endif +.endm + +.macro _le32_bswap_4x a, b, c, d, t0, t1, t2 + _le32_bswap \a, \t0, \t1, \t2 + _le32_bswap \b, \t0, \t1, \t2 + _le32_bswap \c, \t0, \t1, \t2 + _le32_bswap \d, \t0, \t1, \t2 +.endm + +.macro __ldrd a, b, src, offset +#if __LINUX_ARM_ARCH__ >= 6 + ldrd \a, \b, [\src, #\offset] +#else + ldr \a, [\src, #\offset] + ldr \b, [\src, #\offset + 4] +#endif +.endm + +.macro __strd a, b, dst, offset +#if __LINUX_ARM_ARCH__ >= 6 + strd \a, \b, [\dst, #\offset] +#else + str \a, [\dst, #\offset] + str \b, [\dst, #\offset + 4] +#endif +.endm + +.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2 + + // a += b; d ^= a; d = rol(d, 16); + add \a1, \a1, \b1, ror #brot + add \a2, \a2, \b2, ror #brot + eor \d1, \a1, \d1, ror #drot + eor \d2, \a2, \d2, ror #drot + // drot == 32 - 16 == 16 + + // c += d; b ^= c; b = rol(b, 12); + add \c1, \c1, \d1, ror #16 + add \c2, \c2, \d2, ror #16 + eor \b1, \c1, \b1, ror #brot + eor \b2, \c2, \b2, ror #brot + // brot == 32 - 12 == 20 + + // a += b; d ^= a; d = rol(d, 8); + add \a1, \a1, \b1, ror #20 + add \a2, \a2, \b2, ror #20 + eor \d1, \a1, \d1, ror #16 + eor \d2, \a2, \d2, ror #16 + // drot == 32 - 8 == 24 + + // c += d; b ^= c; b = rol(b, 7); + add \c1, \c1, \d1, ror #24 + add \c2, \c2, \d2, ror #24 + eor \b1, \c1, \b1, ror #20 + eor \b2, \c2, \b2, ror #20 + // brot == 32 - 7 == 25 +.endm + +.macro _doubleround + + // column round + + // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13) + _halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13 + + // save (x8, x9); restore (x10, x11) + __strd X8_X10, X9_X11, sp, 0 + __ldrd X8_X10, X9_X11, sp, 8 + + // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15) + _halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15 + + .set brot, 25 + .set drot, 24 + + // diagonal round + + // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12) + _halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12 + + // save (x10, x11); restore (x8, x9) + __strd X8_X10, X9_X11, sp, 8 + __ldrd X8_X10, X9_X11, sp, 0 + + // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14) + _halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14 +.endm + +.macro _chacha_permute nrounds + .set brot, 0 + .set drot, 0 + .rept \nrounds / 2 + _doubleround + .endr +.endm + +.macro _chacha nrounds + +.Lnext_block\@: + // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN + // Registers contain x0-x9,x12-x15. + + // Do the core ChaCha permutation to update x0-x15. + _chacha_permute \nrounds + + add sp, #8 + // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN + // Registers contain x0-x9,x12-x15. + // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. + + // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15). + push {X8_X10, X9_X11, X12, X13, X14, X15} + + // Load (OUT, IN, LEN). + ldr r14, [sp, #96] + ldr r12, [sp, #100] + ldr r11, [sp, #104] + + orr r10, r14, r12 + + // Use slow path if fewer than 64 bytes remain. + cmp r11, #64 + blt .Lxor_slowpath\@ + + // Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on + // ARMv6+, since ldmia and stmia (used below) still require alignment. + tst r10, #3 + bne .Lxor_slowpath\@ + + // Fast path: XOR 64 bytes of aligned data. + + // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN + // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT. + // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. + + // x0-x3 + __ldrd r8, r9, sp, 32 + __ldrd r10, r11, sp, 40 + add X0, X0, r8 + add X1, X1, r9 + add X2, X2, r10 + add X3, X3, r11 + _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + ldmia r12!, {r8-r11} + eor X0, X0, r8 + eor X1, X1, r9 + eor X2, X2, r10 + eor X3, X3, r11 + stmia r14!, {X0-X3} + + // x4-x7 + __ldrd r8, r9, sp, 48 + __ldrd r10, r11, sp, 56 + add X4, r8, X4, ror #brot + add X5, r9, X5, ror #brot + ldmia r12!, {X0-X3} + add X6, r10, X6, ror #brot + add X7, r11, X7, ror #brot + _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + eor X4, X4, X0 + eor X5, X5, X1 + eor X6, X6, X2 + eor X7, X7, X3 + stmia r14!, {X4-X7} + + // x8-x15 + pop {r0-r7} // (x8-x9,x12-x15,x10-x11) + __ldrd r8, r9, sp, 32 + __ldrd r10, r11, sp, 40 + add r0, r0, r8 // x8 + add r1, r1, r9 // x9 + add r6, r6, r10 // x10 + add r7, r7, r11 // x11 + _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + ldmia r12!, {r8-r11} + eor r0, r0, r8 // x8 + eor r1, r1, r9 // x9 + eor r6, r6, r10 // x10 + eor r7, r7, r11 // x11 + stmia r14!, {r0,r1,r6,r7} + ldmia r12!, {r0,r1,r6,r7} + __ldrd r8, r9, sp, 48 + __ldrd r10, r11, sp, 56 + add r2, r8, r2, ror #drot // x12 + add r3, r9, r3, ror #drot // x13 + add r4, r10, r4, ror #drot // x14 + add r5, r11, r5, ror #drot // x15 + _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + ldr r9, [sp, #72] // load LEN + eor r2, r2, r0 // x12 + eor r3, r3, r1 // x13 + eor r4, r4, r6 // x14 + eor r5, r5, r7 // x15 + subs r9, #64 // decrement and check LEN + stmia r14!, {r2-r5} + + beq .Ldone\@ + +.Lprepare_for_next_block\@: + + // Stack: x0-x15 OUT IN LEN + + // Increment block counter (x12) + add r8, #1 + + // Store updated (OUT, IN, LEN) + str r14, [sp, #64] + str r12, [sp, #68] + str r9, [sp, #72] + + mov r14, sp + + // Store updated block counter (x12) + str r8, [sp, #48] + + sub sp, #16 + + // Reload state and do next block + ldmia r14!, {r0-r11} // load x0-x11 + __strd r10, r11, sp, 8 // store x10-x11 before state + ldmia r14, {r10-r12,r14} // load x12-x15 + b .Lnext_block\@ + +.Lxor_slowpath\@: + // Slow path: < 64 bytes remaining, or unaligned input or output buffer. + // We handle it by storing the 64 bytes of keystream to the stack, then + // XOR-ing the needed portion with the data. + + // Allocate keystream buffer + sub sp, #64 + mov r14, sp + + // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN + // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0. + // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. + + // Save keystream for x0-x3 + __ldrd r8, r9, sp, 96 + __ldrd r10, r11, sp, 104 + add X0, X0, r8 + add X1, X1, r9 + add X2, X2, r10 + add X3, X3, r11 + _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + stmia r14!, {X0-X3} + + // Save keystream for x4-x7 + __ldrd r8, r9, sp, 112 + __ldrd r10, r11, sp, 120 + add X4, r8, X4, ror #brot + add X5, r9, X5, ror #brot + add X6, r10, X6, ror #brot + add X7, r11, X7, ror #brot + _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + add r8, sp, #64 + stmia r14!, {X4-X7} + + // Save keystream for x8-x15 + ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11) + __ldrd r8, r9, sp, 128 + __ldrd r10, r11, sp, 136 + add r0, r0, r8 // x8 + add r1, r1, r9 // x9 + add r6, r6, r10 // x10 + add r7, r7, r11 // x11 + _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + stmia r14!, {r0,r1,r6,r7} + __ldrd r8, r9, sp, 144 + __ldrd r10, r11, sp, 152 + add r2, r8, r2, ror #drot // x12 + add r3, r9, r3, ror #drot // x13 + add r4, r10, r4, ror #drot // x14 + add r5, r11, r5, ror #drot // x15 + _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + stmia r14, {r2-r5} + + // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN + // Registers: r8 is block counter, r12 is IN. + + ldr r9, [sp, #168] // LEN + ldr r14, [sp, #160] // OUT + cmp r9, #64 + mov r0, sp + movle r1, r9 + movgt r1, #64 + // r1 is number of bytes to XOR, in range [1, 64] + +.if __LINUX_ARM_ARCH__ < 6 + orr r2, r12, r14 + tst r2, #3 // IN or OUT misaligned? + bne .Lxor_next_byte\@ +.endif + + // XOR a word at a time +.rept 16 + subs r1, #4 + blt .Lxor_words_done\@ + ldr r2, [r12], #4 + ldr r3, [r0], #4 + eor r2, r2, r3 + str r2, [r14], #4 +.endr + b .Lxor_slowpath_done\@ +.Lxor_words_done\@: + ands r1, r1, #3 + beq .Lxor_slowpath_done\@ + + // XOR a byte at a time +.Lxor_next_byte\@: + ldrb r2, [r12], #1 + ldrb r3, [r0], #1 + eor r2, r2, r3 + strb r2, [r14], #1 + subs r1, #1 + bne .Lxor_next_byte\@ + +.Lxor_slowpath_done\@: + subs r9, #64 + add sp, #96 + bgt .Lprepare_for_next_block\@ + +.Ldone\@: +.endm // _chacha + +/* + * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], + * const u32 iv[4]); + */ +SYM_FUNC_START(chacha20_arm) + cmp r2, #0 // len == 0? + reteq lr + + push {r0-r2,r4-r11,lr} + + // Push state x0-x15 onto stack. + // Also store an extra copy of x10-x11 just before the state. + + ldr r4, [sp, #48] // iv + mov r0, sp + sub sp, #80 + + // iv: x12-x15 + ldm r4, {X12,X13,X14,X15} + stmdb r0!, {X12,X13,X14,X15} + + // key: x4-x11 + __ldrd X8_X10, X9_X11, r3, 24 + __strd X8_X10, X9_X11, sp, 8 + stmdb r0!, {X8_X10, X9_X11} + ldm r3, {X4-X9_X11} + stmdb r0!, {X4-X9_X11} + + // constants: x0-x3 + adrl X3, .Lexpand_32byte_k + ldm X3, {X0-X3} + __strd X0, X1, sp, 16 + __strd X2, X3, sp, 24 + + _chacha 20 + + add sp, #76 + pop {r4-r11, pc} +SYM_FUNC_END(chacha20_arm) + +/* + * void hchacha20_arm(const u32 state[16], u32 out[8]); + */ +SYM_FUNC_START(hchacha20_arm) + push {r1,r4-r11,lr} + + mov r14, r0 + ldmia r14!, {r0-r11} // load x0-x11 + push {r10-r11} // store x10-x11 to stack + ldm r14, {r10-r12,r14} // load x12-x15 + sub sp, #8 + + _chacha_permute 20 + + // Skip over (unused0-unused1, x10-x11) + add sp, #16 + + // Fix up rotations of x12-x15 + ror X12, X12, #drot + ror X13, X13, #drot + pop {r4} // load 'out' + ror X14, X14, #drot + ror X15, X15, #drot + + // Store (x0-x3,x12-x15) to 'out' + stm r4, {X0,X1,X2,X3,X12,X13,X14,X15} + + pop {r4-r11,pc} +SYM_FUNC_END(hchacha20_arm) diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c new file mode 100644 index 000000000000..5ac5f686a641 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#include +#include + +asmlinkage void hchacha20_ssse3(u32 *derived_key, const u8 *nonce, + const u8 *key); +asmlinkage void chacha20_ssse3(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void chacha20_avx2(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void chacha20_avx512(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void chacha20_avx512vl(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); + +static bool chacha20_use_ssse3 __ro_after_init; +static bool chacha20_use_avx2 __ro_after_init; +static bool chacha20_use_avx512 __ro_after_init; +static bool chacha20_use_avx512vl __ro_after_init; +static bool *const chacha20_nobs[] __initconst = { + &chacha20_use_ssse3, &chacha20_use_avx2, &chacha20_use_avx512, + &chacha20_use_avx512vl }; + +static void __init chacha20_fpu_init(void) +{ + chacha20_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3); + chacha20_use_avx2 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); +#ifndef COMPAT_CANNOT_USE_AVX512 + chacha20_use_avx512 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL) && + /* Skylake downclocks unacceptably much when using zmm. */ + boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X; + chacha20_use_avx512vl = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + boot_cpu_has(X86_FEATURE_AVX512VL) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL); +#endif +} + +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < CHACHA20_BLOCK_SIZE || + PAGE_SIZE % CHACHA20_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_AS_SSSE3) || !chacha20_use_ssse3 || + len <= CHACHA20_BLOCK_SIZE || !simd_use(simd_context)) + return false; + + for (;;) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + if (IS_ENABLED(CONFIG_AS_AVX512) && chacha20_use_avx512 && + len >= CHACHA20_BLOCK_SIZE * 8) + chacha20_avx512(dst, src, bytes, ctx->key, ctx->counter); + else if (IS_ENABLED(CONFIG_AS_AVX512) && chacha20_use_avx512vl && + len >= CHACHA20_BLOCK_SIZE * 4) + chacha20_avx512vl(dst, src, bytes, ctx->key, ctx->counter); + else if (IS_ENABLED(CONFIG_AS_AVX2) && chacha20_use_avx2 && + len >= CHACHA20_BLOCK_SIZE * 4) + chacha20_avx2(dst, src, bytes, ctx->key, ctx->counter); + else + chacha20_ssse3(dst, src, bytes, ctx->key, ctx->counter); + ctx->counter[0] += (bytes + 63) / 64; + len -= bytes; + if (!len) + break; + dst += bytes; + src += bytes; + simd_relax(simd_context); + } + + return true; +} + +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + if (IS_ENABLED(CONFIG_AS_SSSE3) && chacha20_use_ssse3 && + simd_use(simd_context)) { + hchacha20_ssse3(derived_key, nonce, key); + return true; + } + return false; +} diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl new file mode 100644 index 000000000000..29906a66b8b7 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl @@ -0,0 +1,4106 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# Copyright (C) 2017-2019 Samuel Neves . All Rights Reserved. +# Copyright (C) 2017-2019 Jason A. Donenfeld . All Rights Reserved. +# Copyright (C) 2006-2017 CRYPTOGAMS by . All Rights Reserved. +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# November 2014 +# +# ChaCha20 for x86_64. +# +# December 2016 +# +# Add AVX512F code path. +# +# December 2017 +# +# Add AVX512VL code path. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc 4.8(i) 1x/2xSSSE3(ii) 4xSSSE3 NxAVX(v) +# +# P4 9.48/+99% - - +# Core2 7.83/+55% 7.90/5.76 4.35 +# Westmere 7.19/+50% 5.60/4.50 3.00 +# Sandy Bridge 8.31/+42% 5.45/4.00 2.72 +# Ivy Bridge 6.71/+46% 5.40/? 2.41 +# Haswell 5.92/+43% 5.20/3.45 2.42 1.23 +# Skylake[-X] 5.87/+39% 4.70/3.22 2.31 1.19[0.80(vi)] +# Silvermont 12.0/+33% 7.75/6.90 7.03(iii) +# Knights L 11.7/- ? 9.60(iii) 0.80 +# Goldmont 10.6/+17% 5.10/3.52 3.28 +# Sledgehammer 7.28/+52% - - +# Bulldozer 9.66/+28% 9.85/5.35(iv) 3.06(iv) +# Ryzen 5.96/+50% 5.19/3.00 2.40 2.09 +# VIA Nano 10.5/+46% 6.72/6.88 6.05 +# +# (i) compared to older gcc 3.x one can observe >2x improvement on +# most platforms; +# (ii) 2xSSSE3 is code path optimized specifically for 128 bytes used +# by chacha20_poly1305_tls_cipher, results are EVP-free; +# (iii) this is not optimal result for Atom because of MSROM +# limitations, SSE2 can do better, but gain is considered too +# low to justify the [maintenance] effort; +# (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20 +# and 4.85 for 128-byte inputs; +# (v) 8xAVX2, 8xAVX512VL or 16xAVX512F, whichever best applicable; +# (vi) even though Skylake-X can execute AVX512F code and deliver 0.57 +# cpb in single thread, the corresponding capability is suppressed; + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); +$kernel=0; $kernel=1 if (!$flavour && !$output); + +if (!$kernel) { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or + die "can't locate x86_64-xlate.pl"; + + open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; + *STDOUT=*OUT; + + if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25); + } + + if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { + $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12); + $avx += 1 if ($1==2.11 && $2>=8); + } + + if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./) { + $avx = ($1>=10) + ($1>=11); + } + + if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { + $avx = ($2>=3.0) + ($2>3.0); + } +} else { + $avx = 4; # The kernel uses ifdefs for this. +} + +# input parameter block +($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8"); + +$code.=<<___ if $kernel; +#include +___ + +sub declare_variable() { + my ($name, $size, $type, $payload) = @_; + if($kernel) { + $code.=".section .rodata.cst$size.L$name, \"aM\", \@progbits, $size\n"; + $code.=".align $size\n"; + $code.=".L$name:\n"; + $code.=".$type $payload\n"; + } else { + $code.=".L$name:\n"; + $code.=".$type $payload\n"; + } +} + +sub declare_function() { + my ($name, $align, $nargs) = @_; + if($kernel) { + $code .= ".align $align\n"; + $code .= "SYM_FUNC_START($name)\n"; + $code .= ".L$name:\n"; + } else { + $code .= ".globl $name\n"; + $code .= ".type $name,\@function,$nargs\n"; + $code .= ".align $align\n"; + $code .= "$name:\n"; + } +} + +sub end_function() { + my ($name) = @_; + if($kernel) { + $code .= "SYM_FUNC_END($name)\n"; + } else { + $code .= ".size $name,.-$name\n"; + } +} + +if(!$kernel) { + $code .= ".text\n"; +} +&declare_variable('zero', 16, 'long', '0,0,0,0'); +&declare_variable('one', 16, 'long', '1,0,0,0'); +&declare_variable('inc', 16, 'long', '0,1,2,3'); +&declare_variable('four', 16, 'long', '4,4,4,4'); +&declare_variable('incy', 32, 'long', '0,2,4,6,1,3,5,7'); +&declare_variable('eight', 32, 'long', '8,8,8,8,8,8,8,8'); +&declare_variable('rot16', 16, 'byte', '0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd'); +&declare_variable('rot24', 16, 'byte', '0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe'); +&declare_variable('twoy', 32, 'long', '2,0,0,0, 2,0,0,0'); +&declare_variable('zeroz', 64, 'long', '0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0'); +&declare_variable('fourz', 64, 'long', '4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0'); +&declare_variable('incz', 64, 'long', '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15'); +&declare_variable('sixteen', 64, 'long', '16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16'); +&declare_variable('sigma', 16, 'ascii', '"expand 32-byte k"'); + +$code.=<<___ if !$kernel; +.asciz "ChaCha20 for x86_64, CRYPTOGAMS by " +___ +$code.=".text\n"; + +sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; + my $arg = pop; + $arg = "\$$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; +} + +@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)), + "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15))); +@t=("%esi","%edi"); + +sub ROUND { # critical path is 24 cycles per round +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_)=map("\"$_\"",@t); +my @x=map("\"$_\"",@x); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + # Normally instructions would be interleaved to favour in-order + # execution. Generally out-of-order cores manage it gracefully, + # but not this time for some reason. As in-order execution + # cores are dying breed, old Atom is the only one around, + # instructions are left uninterleaved. Besides, Atom is better + # off executing 1xSSSE3 code anyway... + + ( + "&add (@x[$a0],@x[$b0])", # Q1 + "&xor (@x[$d0],@x[$a0])", + "&rol (@x[$d0],16)", + "&add (@x[$a1],@x[$b1])", # Q2 + "&xor (@x[$d1],@x[$a1])", + "&rol (@x[$d1],16)", + + "&add ($xc,@x[$d0])", + "&xor (@x[$b0],$xc)", + "&rol (@x[$b0],12)", + "&add ($xc_,@x[$d1])", + "&xor (@x[$b1],$xc_)", + "&rol (@x[$b1],12)", + + "&add (@x[$a0],@x[$b0])", + "&xor (@x[$d0],@x[$a0])", + "&rol (@x[$d0],8)", + "&add (@x[$a1],@x[$b1])", + "&xor (@x[$d1],@x[$a1])", + "&rol (@x[$d1],8)", + + "&add ($xc,@x[$d0])", + "&xor (@x[$b0],$xc)", + "&rol (@x[$b0],7)", + "&add ($xc_,@x[$d1])", + "&xor (@x[$b1],$xc_)", + "&rol (@x[$b1],7)", + + "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's + "&mov (\"4*$c1(%rsp)\",$xc_)", + "&mov ($xc,\"4*$c2(%rsp)\")", + "&mov ($xc_,\"4*$c3(%rsp)\")", + + "&add (@x[$a2],@x[$b2])", # Q3 + "&xor (@x[$d2],@x[$a2])", + "&rol (@x[$d2],16)", + "&add (@x[$a3],@x[$b3])", # Q4 + "&xor (@x[$d3],@x[$a3])", + "&rol (@x[$d3],16)", + + "&add ($xc,@x[$d2])", + "&xor (@x[$b2],$xc)", + "&rol (@x[$b2],12)", + "&add ($xc_,@x[$d3])", + "&xor (@x[$b3],$xc_)", + "&rol (@x[$b3],12)", + + "&add (@x[$a2],@x[$b2])", + "&xor (@x[$d2],@x[$a2])", + "&rol (@x[$d2],8)", + "&add (@x[$a3],@x[$b3])", + "&xor (@x[$d3],@x[$a3])", + "&rol (@x[$d3],8)", + + "&add ($xc,@x[$d2])", + "&xor (@x[$b2],$xc)", + "&rol (@x[$b2],7)", + "&add ($xc_,@x[$d3])", + "&xor (@x[$b3],$xc_)", + "&rol (@x[$b3],7)" + ); +} + +######################################################################## +# Generic code path that handles all lengths on pre-SSSE3 processors. +if(!$kernel) { +&declare_function("chacha20_ctr32", 64, 5); +$code.=<<___; +.cfi_startproc + cmp \$0,$len + je .Lno_data + mov OPENSSL_ia32cap_P+4(%rip),%r9 +___ +$code.=<<___ if ($avx>2); + bt \$48,%r9 # check for AVX512F + jc .Lchacha20_avx512 + test %r9,%r9 # check for AVX512VL + js .Lchacha20_avx512vl +___ +$code.=<<___; + test \$`1<<(41-32)`,%r9d + jnz .Lchacha20_ssse3 +___ +$code.=<<___; + push %rbx +.cfi_push %rbx + push %rbp +.cfi_push %rbp + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 + sub \$64+24,%rsp +.cfi_adjust_cfa_offset 64+24 +.Lctr32_body: + + #movdqa .Lsigma(%rip),%xmm0 + movdqu ($key),%xmm1 + movdqu 16($key),%xmm2 + movdqu ($counter),%xmm3 + movdqa .Lone(%rip),%xmm4 + + #movdqa %xmm0,4*0(%rsp) # key[0] + movdqa %xmm1,4*4(%rsp) # key[1] + movdqa %xmm2,4*8(%rsp) # key[2] + movdqa %xmm3,4*12(%rsp) # key[3] + mov $len,%rbp # reassign $len + jmp .Loop_outer + +.align 32 +.Loop_outer: + mov \$0x61707865,@x[0] # 'expa' + mov \$0x3320646e,@x[1] # 'nd 3' + mov \$0x79622d32,@x[2] # '2-by' + mov \$0x6b206574,@x[3] # 'te k' + mov 4*4(%rsp),@x[4] + mov 4*5(%rsp),@x[5] + mov 4*6(%rsp),@x[6] + mov 4*7(%rsp),@x[7] + movd %xmm3,@x[12] + mov 4*13(%rsp),@x[13] + mov 4*14(%rsp),@x[14] + mov 4*15(%rsp),@x[15] + + mov %rbp,64+0(%rsp) # save len + mov \$10,%ebp + mov $inp,64+8(%rsp) # save inp + movq %xmm2,%rsi # "@x[8]" + mov $out,64+16(%rsp) # save out + mov %rsi,%rdi + shr \$32,%rdi # "@x[9]" + jmp .Loop + +.align 32 +.Loop: +___ + foreach (&ROUND (0, 4, 8,12)) { eval; } + foreach (&ROUND (0, 5,10,15)) { eval; } + &dec ("%ebp"); + &jnz (".Loop"); + +$code.=<<___; + mov @t[1],4*9(%rsp) # modulo-scheduled + mov @t[0],4*8(%rsp) + mov 64(%rsp),%rbp # load len + movdqa %xmm2,%xmm1 + mov 64+8(%rsp),$inp # load inp + paddd %xmm4,%xmm3 # increment counter + mov 64+16(%rsp),$out # load out + + add \$0x61707865,@x[0] # 'expa' + add \$0x3320646e,@x[1] # 'nd 3' + add \$0x79622d32,@x[2] # '2-by' + add \$0x6b206574,@x[3] # 'te k' + add 4*4(%rsp),@x[4] + add 4*5(%rsp),@x[5] + add 4*6(%rsp),@x[6] + add 4*7(%rsp),@x[7] + add 4*12(%rsp),@x[12] + add 4*13(%rsp),@x[13] + add 4*14(%rsp),@x[14] + add 4*15(%rsp),@x[15] + paddd 4*8(%rsp),%xmm1 + + cmp \$64,%rbp + jb .Ltail + + xor 4*0($inp),@x[0] # xor with input + xor 4*1($inp),@x[1] + xor 4*2($inp),@x[2] + xor 4*3($inp),@x[3] + xor 4*4($inp),@x[4] + xor 4*5($inp),@x[5] + xor 4*6($inp),@x[6] + xor 4*7($inp),@x[7] + movdqu 4*8($inp),%xmm0 + xor 4*12($inp),@x[12] + xor 4*13($inp),@x[13] + xor 4*14($inp),@x[14] + xor 4*15($inp),@x[15] + lea 4*16($inp),$inp # inp+=64 + pxor %xmm1,%xmm0 + + movdqa %xmm2,4*8(%rsp) + movd %xmm3,4*12(%rsp) + + mov @x[0],4*0($out) # write output + mov @x[1],4*1($out) + mov @x[2],4*2($out) + mov @x[3],4*3($out) + mov @x[4],4*4($out) + mov @x[5],4*5($out) + mov @x[6],4*6($out) + mov @x[7],4*7($out) + movdqu %xmm0,4*8($out) + mov @x[12],4*12($out) + mov @x[13],4*13($out) + mov @x[14],4*14($out) + mov @x[15],4*15($out) + lea 4*16($out),$out # out+=64 + + sub \$64,%rbp + jnz .Loop_outer + + jmp .Ldone + +.align 16 +.Ltail: + mov @x[0],4*0(%rsp) + mov @x[1],4*1(%rsp) + xor %rbx,%rbx + mov @x[2],4*2(%rsp) + mov @x[3],4*3(%rsp) + mov @x[4],4*4(%rsp) + mov @x[5],4*5(%rsp) + mov @x[6],4*6(%rsp) + mov @x[7],4*7(%rsp) + movdqa %xmm1,4*8(%rsp) + mov @x[12],4*12(%rsp) + mov @x[13],4*13(%rsp) + mov @x[14],4*14(%rsp) + mov @x[15],4*15(%rsp) + +.Loop_tail: + movzb ($inp,%rbx),%eax + movzb (%rsp,%rbx),%edx + lea 1(%rbx),%rbx + xor %edx,%eax + mov %al,-1($out,%rbx) + dec %rbp + jnz .Loop_tail + +.Ldone: + add \$64+24,%rsp +.cfi_adjust_cfa_offset -64-24 + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbp +.cfi_restore %rbp + pop %rbx +.cfi_restore %rbx +.Lno_data: + ret +.cfi_endproc +___ +&end_function("chacha20_ctr32"); +} + +######################################################################## +# SSSE3 code path that handles shorter lengths +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7)); + +sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot16); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,20); + &pslld ($t,12); + &por ($b,$t); + + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot24); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,25); + &pslld ($t,7); + &por ($b,$t); +} + +my $xframe = $win64 ? 32+8 : 8; + +if($kernel) { + $code .= "#ifdef CONFIG_AS_SSSE3\n"; +} + +if($kernel) { +&declare_function("hchacha20_ssse3", 32, 5); +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($len),$b + movdqu 16($len),$c + movdqu ($inp),$d + # This code is only used when targeting kernel. + # If targeting win64, xmm{6,7} preserving needs to be added. + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + mov \$10,$counter # reuse $counter + jmp 1f +.align 32 +1: +___ + &SSSE3ROUND(); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &nop (); + + &SSSE3ROUND(); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + + &dec ($counter); + &jnz ("1b"); + +$code.=<<___; + movdqu $a, ($out) + movdqu $d, 16($out) + ret +___ +&end_function("hchacha20_ssse3"); +} + +&declare_function("chacha20_ssse3", 32, 5); +$code.=<<___; +.cfi_startproc + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 +___ +$code.=<<___ if ($avx && !$kernel); + test \$`1<<(43-32)`,%r10d + jnz .Lchacha20_4xop # XOP is fastest even if we use 1/4 +___ +$code.=<<___; + cmp \$128,$len # we might throw away some data, + je .Lchacha20_128 + ja .Lchacha20_4x # but overall it won't be slower + +.Ldo_ssse3_after_all: + sub \$64+$xframe,%rsp + and \$-16,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r10) + movaps %xmm7,-0x20(%r10) +.Lssse3_body: +___ +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($key),$b + movdqu 16($key),$c + movdqu ($counter),$d + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + + movdqa $a,0x00(%rsp) + movdqa $b,0x10(%rsp) + movdqa $c,0x20(%rsp) + movdqa $d,0x30(%rsp) + mov \$10,$counter # reuse $counter + jmp .Loop_ssse3 + +.align 32 +.Loop_outer_ssse3: + movdqa .Lone(%rip),$d + movdqa 0x00(%rsp),$a + movdqa 0x10(%rsp),$b + movdqa 0x20(%rsp),$c + paddd 0x30(%rsp),$d + mov \$10,$counter + movdqa $d,0x30(%rsp) + jmp .Loop_ssse3 + +.align 32 +.Loop_ssse3: +___ + &SSSE3ROUND(); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &nop (); + + &SSSE3ROUND(); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + + &dec ($counter); + &jnz (".Loop_ssse3"); + +$code.=<<___; + paddd 0x00(%rsp),$a + paddd 0x10(%rsp),$b + paddd 0x20(%rsp),$c + paddd 0x30(%rsp),$d + + cmp \$64,$len + jb .Ltail_ssse3 + + movdqu 0x00($inp),$t + movdqu 0x10($inp),$t1 + pxor $t,$a # xor with input + movdqu 0x20($inp),$t + pxor $t1,$b + movdqu 0x30($inp),$t1 + lea 0x40($inp),$inp # inp+=64 + pxor $t,$c + pxor $t1,$d + + movdqu $a,0x00($out) # write output + movdqu $b,0x10($out) + movdqu $c,0x20($out) + movdqu $d,0x30($out) + lea 0x40($out),$out # out+=64 + + sub \$64,$len + jnz .Loop_outer_ssse3 + + jmp .Ldone_ssse3 + +.align 16 +.Ltail_ssse3: + movdqa $a,0x00(%rsp) + movdqa $b,0x10(%rsp) + movdqa $c,0x20(%rsp) + movdqa $d,0x30(%rsp) + xor $counter,$counter + +.Loop_tail_ssse3: + movzb ($inp,$counter),%eax + movzb (%rsp,$counter),%ecx + lea 1($counter),$counter + xor %ecx,%eax + mov %al,-1($out,$counter) + dec $len + jnz .Loop_tail_ssse3 + +.Ldone_ssse3: +___ +$code.=<<___ if ($win64); + movaps -0x30(%r10),%xmm6 + movaps -0x20(%r10),%xmm7 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.Lssse3_epilogue: + ret +.cfi_endproc +___ +} +&end_function("chacha20_ssse3"); + +######################################################################## +# SSSE3 code path that handles 128-byte inputs +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(8,9,2..7)); +my ($a1,$b1,$c1,$d1)=map("%xmm$_",(10,11,0,1)); + +sub SSSE3ROUND_2x { + &paddd ($a,$b); + &pxor ($d,$a); + &paddd ($a1,$b1); + &pxor ($d1,$a1); + &pshufb ($d,$rot16); + &pshufb($d1,$rot16); + + &paddd ($c,$d); + &paddd ($c1,$d1); + &pxor ($b,$c); + &pxor ($b1,$c1); + &movdqa ($t,$b); + &psrld ($b,20); + &movdqa($t1,$b1); + &pslld ($t,12); + &psrld ($b1,20); + &por ($b,$t); + &pslld ($t1,12); + &por ($b1,$t1); + + &paddd ($a,$b); + &pxor ($d,$a); + &paddd ($a1,$b1); + &pxor ($d1,$a1); + &pshufb ($d,$rot24); + &pshufb($d1,$rot24); + + &paddd ($c,$d); + &paddd ($c1,$d1); + &pxor ($b,$c); + &pxor ($b1,$c1); + &movdqa ($t,$b); + &psrld ($b,25); + &movdqa($t1,$b1); + &pslld ($t,7); + &psrld ($b1,25); + &por ($b,$t); + &pslld ($t1,7); + &por ($b1,$t1); +} + +my $xframe = $win64 ? 0x68 : 8; + +$code.=<<___; +.type chacha20_128,\@function,5 +.align 32 +chacha20_128: +.cfi_startproc +.Lchacha20_128: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + sub \$64+$xframe,%rsp + and \$-16,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x70(%r10) + movaps %xmm7,-0x60(%r10) + movaps %xmm8,-0x50(%r10) + movaps %xmm9,-0x40(%r10) + movaps %xmm10,-0x30(%r10) + movaps %xmm11,-0x20(%r10) +.L128_body: +___ +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($key),$b + movdqu 16($key),$c + movdqu ($counter),$d + movdqa .Lone(%rip),$d1 + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + + movdqa $a,$a1 + movdqa $a,0x00(%rsp) + movdqa $b,$b1 + movdqa $b,0x10(%rsp) + movdqa $c,$c1 + movdqa $c,0x20(%rsp) + paddd $d,$d1 + movdqa $d,0x30(%rsp) + mov \$10,$counter # reuse $counter + jmp .Loop_128 + +.align 32 +.Loop_128: +___ + &SSSE3ROUND_2x(); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &pshufd ($a1,$a1,0b10010011); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b00111001); + + &SSSE3ROUND_2x(); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + &pshufd ($a1,$a1,0b00111001); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b10010011); + + &dec ($counter); + &jnz (".Loop_128"); + +$code.=<<___; + paddd 0x00(%rsp),$a + paddd 0x10(%rsp),$b + paddd 0x20(%rsp),$c + paddd 0x30(%rsp),$d + paddd .Lone(%rip),$d1 + paddd 0x00(%rsp),$a1 + paddd 0x10(%rsp),$b1 + paddd 0x20(%rsp),$c1 + paddd 0x30(%rsp),$d1 + + movdqu 0x00($inp),$t + movdqu 0x10($inp),$t1 + pxor $t,$a # xor with input + movdqu 0x20($inp),$t + pxor $t1,$b + movdqu 0x30($inp),$t1 + pxor $t,$c + movdqu 0x40($inp),$t + pxor $t1,$d + movdqu 0x50($inp),$t1 + pxor $t,$a1 + movdqu 0x60($inp),$t + pxor $t1,$b1 + movdqu 0x70($inp),$t1 + pxor $t,$c1 + pxor $t1,$d1 + + movdqu $a,0x00($out) # write output + movdqu $b,0x10($out) + movdqu $c,0x20($out) + movdqu $d,0x30($out) + movdqu $a1,0x40($out) + movdqu $b1,0x50($out) + movdqu $c1,0x60($out) + movdqu $d1,0x70($out) +___ +$code.=<<___ if ($win64); + movaps -0x70(%r10),%xmm6 + movaps -0x60(%r10),%xmm7 + movaps -0x50(%r10),%xmm8 + movaps -0x40(%r10),%xmm9 + movaps -0x30(%r10),%xmm10 + movaps -0x20(%r10),%xmm11 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L128_epilogue: + ret +.cfi_endproc +.size chacha20_128,.-chacha20_128 +___ +} + +######################################################################## +# SSSE3 code path that handles longer messages. +{ +# assign variables to favor Atom front-end +my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, + $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); + +sub SSSE3_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); +my @x=map("\"$_\"",@xx); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + ( + "&paddd (@x[$a0],@x[$b0])", # Q1 + "&paddd (@x[$a1],@x[$b1])", # Q2 + "&pxor (@x[$d0],@x[$a0])", + "&pxor (@x[$d1],@x[$a1])", + "&pshufb (@x[$d0],$t1)", + "&pshufb (@x[$d1],$t1)", + + "&paddd ($xc,@x[$d0])", + "&paddd ($xc_,@x[$d1])", + "&pxor (@x[$b0],$xc)", + "&pxor (@x[$b1],$xc_)", + "&movdqa ($t0,@x[$b0])", + "&pslld (@x[$b0],12)", + "&psrld ($t0,20)", + "&movdqa ($t1,@x[$b1])", + "&pslld (@x[$b1],12)", + "&por (@x[$b0],$t0)", + "&psrld ($t1,20)", + "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) + "&por (@x[$b1],$t1)", + + "&paddd (@x[$a0],@x[$b0])", + "&paddd (@x[$a1],@x[$b1])", + "&pxor (@x[$d0],@x[$a0])", + "&pxor (@x[$d1],@x[$a1])", + "&pshufb (@x[$d0],$t0)", + "&pshufb (@x[$d1],$t0)", + + "&paddd ($xc,@x[$d0])", + "&paddd ($xc_,@x[$d1])", + "&pxor (@x[$b0],$xc)", + "&pxor (@x[$b1],$xc_)", + "&movdqa ($t1,@x[$b0])", + "&pslld (@x[$b0],7)", + "&psrld ($t1,25)", + "&movdqa ($t0,@x[$b1])", + "&pslld (@x[$b1],7)", + "&por (@x[$b0],$t1)", + "&psrld ($t0,25)", + "&movdqa ($t1,'(%r9)')", # .Lrot16(%rip) + "&por (@x[$b1],$t0)", + + "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's + "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)", + "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")", + "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")", + + "&paddd (@x[$a2],@x[$b2])", # Q3 + "&paddd (@x[$a3],@x[$b3])", # Q4 + "&pxor (@x[$d2],@x[$a2])", + "&pxor (@x[$d3],@x[$a3])", + "&pshufb (@x[$d2],$t1)", + "&pshufb (@x[$d3],$t1)", + + "&paddd ($xc,@x[$d2])", + "&paddd ($xc_,@x[$d3])", + "&pxor (@x[$b2],$xc)", + "&pxor (@x[$b3],$xc_)", + "&movdqa ($t0,@x[$b2])", + "&pslld (@x[$b2],12)", + "&psrld ($t0,20)", + "&movdqa ($t1,@x[$b3])", + "&pslld (@x[$b3],12)", + "&por (@x[$b2],$t0)", + "&psrld ($t1,20)", + "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) + "&por (@x[$b3],$t1)", + + "&paddd (@x[$a2],@x[$b2])", + "&paddd (@x[$a3],@x[$b3])", + "&pxor (@x[$d2],@x[$a2])", + "&pxor (@x[$d3],@x[$a3])", + "&pshufb (@x[$d2],$t0)", + "&pshufb (@x[$d3],$t0)", + + "&paddd ($xc,@x[$d2])", + "&paddd ($xc_,@x[$d3])", + "&pxor (@x[$b2],$xc)", + "&pxor (@x[$b3],$xc_)", + "&movdqa ($t1,@x[$b2])", + "&pslld (@x[$b2],7)", + "&psrld ($t1,25)", + "&movdqa ($t0,@x[$b3])", + "&pslld (@x[$b3],7)", + "&por (@x[$b2],$t1)", + "&psrld ($t0,25)", + "&movdqa ($t1,'(%r9)')", # .Lrot16(%rip) + "&por (@x[$b3],$t0)" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +$code.=<<___; +.type chacha20_4x,\@function,5 +.align 32 +chacha20_4x: +.cfi_startproc +.Lchacha20_4x: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 +___ +$code.=<<___ if (!$kernel); + mov %r9,%r11 +___ +$code.=<<___ if ($avx>1 && !$kernel); + shr \$32,%r9 # OPENSSL_ia32cap_P+8 + test \$`1<<5`,%r9 # test AVX2 + jnz .Lchacha20_8x +___ +$code.=<<___; + cmp \$192,$len + ja .Lproceed4x +___ +$code.=<<___ if (!$kernel); + and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE + cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE + je .Ldo_ssse3_after_all # to detect Atom +___ +$code.=<<___; +.Lproceed4x: + sub \$0x140+$xframe,%rsp + and \$-16,%rsp +___ + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x40 constant copy of key[0-2] smashed by lanes + # ... + # +0x100 SIMD counters (with nonce smashed by lanes) + # ... + # +0x140 +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L4x_body: +___ +$code.=<<___; + movdqa .Lsigma(%rip),$xa3 # key[0] + movdqu ($key),$xb3 # key[1] + movdqu 16($key),$xt3 # key[2] + movdqu ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + lea .Lrot16(%rip),%r9 + lea .Lrot24(%rip),%r11 + + pshufd \$0x00,$xa3,$xa0 # smash key by lanes... + pshufd \$0x55,$xa3,$xa1 + movdqa $xa0,0x40(%rsp) # ... and offload + pshufd \$0xaa,$xa3,$xa2 + movdqa $xa1,0x50(%rsp) + pshufd \$0xff,$xa3,$xa3 + movdqa $xa2,0x60(%rsp) + movdqa $xa3,0x70(%rsp) + + pshufd \$0x00,$xb3,$xb0 + pshufd \$0x55,$xb3,$xb1 + movdqa $xb0,0x80-0x100(%rcx) + pshufd \$0xaa,$xb3,$xb2 + movdqa $xb1,0x90-0x100(%rcx) + pshufd \$0xff,$xb3,$xb3 + movdqa $xb2,0xa0-0x100(%rcx) + movdqa $xb3,0xb0-0x100(%rcx) + + pshufd \$0x00,$xt3,$xt0 # "$xc0" + pshufd \$0x55,$xt3,$xt1 # "$xc1" + movdqa $xt0,0xc0-0x100(%rcx) + pshufd \$0xaa,$xt3,$xt2 # "$xc2" + movdqa $xt1,0xd0-0x100(%rcx) + pshufd \$0xff,$xt3,$xt3 # "$xc3" + movdqa $xt2,0xe0-0x100(%rcx) + movdqa $xt3,0xf0-0x100(%rcx) + + pshufd \$0x00,$xd3,$xd0 + pshufd \$0x55,$xd3,$xd1 + paddd .Linc(%rip),$xd0 # don't save counters yet + pshufd \$0xaa,$xd3,$xd2 + movdqa $xd1,0x110-0x100(%rcx) + pshufd \$0xff,$xd3,$xd3 + movdqa $xd2,0x120-0x100(%rcx) + movdqa $xd3,0x130-0x100(%rcx) + + jmp .Loop_enter4x + +.align 32 +.Loop_outer4x: + movdqa 0x40(%rsp),$xa0 # re-load smashed key + movdqa 0x50(%rsp),$xa1 + movdqa 0x60(%rsp),$xa2 + movdqa 0x70(%rsp),$xa3 + movdqa 0x80-0x100(%rcx),$xb0 + movdqa 0x90-0x100(%rcx),$xb1 + movdqa 0xa0-0x100(%rcx),$xb2 + movdqa 0xb0-0x100(%rcx),$xb3 + movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" + movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" + movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" + movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" + movdqa 0x100-0x100(%rcx),$xd0 + movdqa 0x110-0x100(%rcx),$xd1 + movdqa 0x120-0x100(%rcx),$xd2 + movdqa 0x130-0x100(%rcx),$xd3 + paddd .Lfour(%rip),$xd0 # next SIMD counters + +.Loop_enter4x: + movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]" + movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]" + movdqa (%r9),$xt3 # .Lrot16(%rip) + mov \$10,%eax + movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters + jmp .Loop4x + +.align 32 +.Loop4x: +___ + foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop4x + + paddd 0x40(%rsp),$xa0 # accumulate key material + paddd 0x50(%rsp),$xa1 + paddd 0x60(%rsp),$xa2 + paddd 0x70(%rsp),$xa3 + + movdqa $xa0,$xt2 # "de-interlace" data + punpckldq $xa1,$xa0 + movdqa $xa2,$xt3 + punpckldq $xa3,$xa2 + punpckhdq $xa1,$xt2 + punpckhdq $xa3,$xt3 + movdqa $xa0,$xa1 + punpcklqdq $xa2,$xa0 # "a0" + movdqa $xt2,$xa3 + punpcklqdq $xt3,$xt2 # "a2" + punpckhqdq $xa2,$xa1 # "a1" + punpckhqdq $xt3,$xa3 # "a3" +___ + ($xa2,$xt2)=($xt2,$xa2); +$code.=<<___; + paddd 0x80-0x100(%rcx),$xb0 + paddd 0x90-0x100(%rcx),$xb1 + paddd 0xa0-0x100(%rcx),$xb2 + paddd 0xb0-0x100(%rcx),$xb3 + + movdqa $xa0,0x00(%rsp) # offload $xaN + movdqa $xa1,0x10(%rsp) + movdqa 0x20(%rsp),$xa0 # "xc2" + movdqa 0x30(%rsp),$xa1 # "xc3" + + movdqa $xb0,$xt2 + punpckldq $xb1,$xb0 + movdqa $xb2,$xt3 + punpckldq $xb3,$xb2 + punpckhdq $xb1,$xt2 + punpckhdq $xb3,$xt3 + movdqa $xb0,$xb1 + punpcklqdq $xb2,$xb0 # "b0" + movdqa $xt2,$xb3 + punpcklqdq $xt3,$xt2 # "b2" + punpckhqdq $xb2,$xb1 # "b1" + punpckhqdq $xt3,$xb3 # "b3" +___ + ($xb2,$xt2)=($xt2,$xb2); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + paddd 0xc0-0x100(%rcx),$xc0 + paddd 0xd0-0x100(%rcx),$xc1 + paddd 0xe0-0x100(%rcx),$xc2 + paddd 0xf0-0x100(%rcx),$xc3 + + movdqa $xa2,0x20(%rsp) # keep offloading $xaN + movdqa $xa3,0x30(%rsp) + + movdqa $xc0,$xt2 + punpckldq $xc1,$xc0 + movdqa $xc2,$xt3 + punpckldq $xc3,$xc2 + punpckhdq $xc1,$xt2 + punpckhdq $xc3,$xt3 + movdqa $xc0,$xc1 + punpcklqdq $xc2,$xc0 # "c0" + movdqa $xt2,$xc3 + punpcklqdq $xt3,$xt2 # "c2" + punpckhqdq $xc2,$xc1 # "c1" + punpckhqdq $xt3,$xc3 # "c3" +___ + ($xc2,$xt2)=($xt2,$xc2); + ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary +$code.=<<___; + paddd 0x100-0x100(%rcx),$xd0 + paddd 0x110-0x100(%rcx),$xd1 + paddd 0x120-0x100(%rcx),$xd2 + paddd 0x130-0x100(%rcx),$xd3 + + movdqa $xd0,$xt2 + punpckldq $xd1,$xd0 + movdqa $xd2,$xt3 + punpckldq $xd3,$xd2 + punpckhdq $xd1,$xt2 + punpckhdq $xd3,$xt3 + movdqa $xd0,$xd1 + punpcklqdq $xd2,$xd0 # "d0" + movdqa $xt2,$xd3 + punpcklqdq $xt3,$xt2 # "d2" + punpckhqdq $xd2,$xd1 # "d1" + punpckhqdq $xt3,$xd3 # "d3" +___ + ($xd2,$xt2)=($xt2,$xd2); +$code.=<<___; + cmp \$64*4,$len + jb .Ltail4x + + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # size optimization + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + + movdqu $xt0,0x40($out) + movdqu 0x00($inp),$xt0 + movdqu $xt1,0x50($out) + movdqu 0x10($inp),$xt1 + movdqu $xt2,0x60($out) + movdqu 0x20($inp),$xt2 + movdqu $xt3,0x70($out) + lea 0x80($out),$out # size optimization + movdqu 0x30($inp),$xt3 + pxor 0x20(%rsp),$xt0 + pxor $xb2,$xt1 + pxor $xc2,$xt2 + pxor $xd2,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # inp+=64*4 + pxor 0x30(%rsp),$xt0 + pxor $xb3,$xt1 + pxor $xc3,$xt2 + pxor $xd3,$xt3 + movdqu $xt0,0x40($out) + movdqu $xt1,0x50($out) + movdqu $xt2,0x60($out) + movdqu $xt3,0x70($out) + lea 0x80($out),$out # out+=64*4 + + sub \$64*4,$len + jnz .Loop_outer4x + + jmp .Ldone4x + +.Ltail4x: + cmp \$192,$len + jae .L192_or_more4x + cmp \$128,$len + jae .L128_or_more4x + cmp \$64,$len + jae .L64_or_more4x + + #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + xor %r9,%r9 + #movdqa $xt0,0x00(%rsp) + movdqa $xb0,0x10(%rsp) + movdqa $xc0,0x20(%rsp) + movdqa $xd0,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L64_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + movdqu $xt0,0x00($out) + movdqu $xt1,0x10($out) + movdqu $xt2,0x20($out) + movdqu $xt3,0x30($out) + je .Ldone4x + + movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x40($inp),$inp # inp+=64*1 + xor %r9,%r9 + movdqa $xt0,0x00(%rsp) + movdqa $xb1,0x10(%rsp) + lea 0x40($out),$out # out+=64*1 + movdqa $xc1,0x20(%rsp) + sub \$64,$len # len-=64*1 + movdqa $xd1,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L128_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + movdqu $xt0,0x40($out) + movdqu $xt1,0x50($out) + movdqu $xt2,0x60($out) + movdqu $xt3,0x70($out) + je .Ldone4x + + movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x80($inp),$inp # inp+=64*2 + xor %r9,%r9 + movdqa $xt0,0x00(%rsp) + movdqa $xb2,0x10(%rsp) + lea 0x80($out),$out # out+=64*2 + movdqa $xc2,0x20(%rsp) + sub \$128,$len # len-=64*2 + movdqa $xd2,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L192_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # size optimization + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + + movdqu $xt0,0x40($out) + movdqu 0x00($inp),$xt0 + movdqu $xt1,0x50($out) + movdqu 0x10($inp),$xt1 + movdqu $xt2,0x60($out) + movdqu 0x20($inp),$xt2 + movdqu $xt3,0x70($out) + lea 0x80($out),$out # size optimization + movdqu 0x30($inp),$xt3 + pxor 0x20(%rsp),$xt0 + pxor $xb2,$xt1 + pxor $xc2,$xt2 + pxor $xd2,$xt3 + movdqu $xt0,0x00($out) + movdqu $xt1,0x10($out) + movdqu $xt2,0x20($out) + movdqu $xt3,0x30($out) + je .Ldone4x + + movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x40($inp),$inp # inp+=64*3 + xor %r9,%r9 + movdqa $xt0,0x00(%rsp) + movdqa $xb3,0x10(%rsp) + lea 0x40($out),$out # out+=64*3 + movdqa $xc3,0x20(%rsp) + sub \$192,$len # len-=64*3 + movdqa $xd3,0x30(%rsp) + +.Loop_tail4x: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail4x + +.Ldone4x: +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L4x_epilogue: + ret +.cfi_endproc +.size chacha20_4x,.-chacha20_4x +___ +} +if($kernel) { + $code .= "#endif\n"; +} + +######################################################################## +# XOP code path that handles all lengths. +if ($avx && !$kernel) { +# There is some "anomaly" observed depending on instructions' size or +# alignment. If you look closely at below code you'll notice that +# sometimes argument order varies. The order affects instruction +# encoding by making it larger, and such fiddling gives 5% performance +# improvement. This is on FX-4100... + +my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, + $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3); + +sub XOP_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my @x=map("\"$_\"",@xx); + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vprotd (@x[$d0],@x[$d0],16)", + "&vprotd (@x[$d1],@x[$d1],16)", + "&vprotd (@x[$d2],@x[$d2],16)", + "&vprotd (@x[$d3],@x[$d3],16)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxor (@x[$b0],@x[$c0],@x[$b0])", + "&vpxor (@x[$b1],@x[$c1],@x[$b1])", + "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip + "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip + "&vprotd (@x[$b0],@x[$b0],12)", + "&vprotd (@x[$b1],@x[$b1],12)", + "&vprotd (@x[$b2],@x[$b2],12)", + "&vprotd (@x[$b3],@x[$b3],12)", + + "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip + "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vprotd (@x[$d0],@x[$d0],8)", + "&vprotd (@x[$d1],@x[$d1],8)", + "&vprotd (@x[$d2],@x[$d2],8)", + "&vprotd (@x[$d3],@x[$d3],8)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxor (@x[$b0],@x[$c0],@x[$b0])", + "&vpxor (@x[$b1],@x[$c1],@x[$b1])", + "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip + "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip + "&vprotd (@x[$b0],@x[$b0],7)", + "&vprotd (@x[$b1],@x[$b1],7)", + "&vprotd (@x[$b2],@x[$b2],7)", + "&vprotd (@x[$b3],@x[$b3],7)" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +&declare_function("chacha20_xop", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_4xop: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + sub \$0x140+$xframe,%rsp + and \$-16,%rsp +___ + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x40 constant copy of key[0-2] smashed by lanes + # ... + # +0x100 SIMD counters (with nonce smashed by lanes) + # ... + # +0x140 +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L4xop_body: +___ +$code.=<<___; + vzeroupper + + vmovdqa .Lsigma(%rip),$xa3 # key[0] + vmovdqu ($key),$xb3 # key[1] + vmovdqu 16($key),$xt3 # key[2] + vmovdqu ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vmovdqa $xa0,0x40(%rsp) # ... and offload + vpshufd \$0xaa,$xa3,$xa2 + vmovdqa $xa1,0x50(%rsp) + vpshufd \$0xff,$xa3,$xa3 + vmovdqa $xa2,0x60(%rsp) + vmovdqa $xa3,0x70(%rsp) + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vmovdqa $xb0,0x80-0x100(%rcx) + vpshufd \$0xaa,$xb3,$xb2 + vmovdqa $xb1,0x90-0x100(%rcx) + vpshufd \$0xff,$xb3,$xb3 + vmovdqa $xb2,0xa0-0x100(%rcx) + vmovdqa $xb3,0xb0-0x100(%rcx) + + vpshufd \$0x00,$xt3,$xt0 # "$xc0" + vpshufd \$0x55,$xt3,$xt1 # "$xc1" + vmovdqa $xt0,0xc0-0x100(%rcx) + vpshufd \$0xaa,$xt3,$xt2 # "$xc2" + vmovdqa $xt1,0xd0-0x100(%rcx) + vpshufd \$0xff,$xt3,$xt3 # "$xc3" + vmovdqa $xt2,0xe0-0x100(%rcx) + vmovdqa $xt3,0xf0-0x100(%rcx) + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet + vpshufd \$0xaa,$xd3,$xd2 + vmovdqa $xd1,0x110-0x100(%rcx) + vpshufd \$0xff,$xd3,$xd3 + vmovdqa $xd2,0x120-0x100(%rcx) + vmovdqa $xd3,0x130-0x100(%rcx) + + jmp .Loop_enter4xop + +.align 32 +.Loop_outer4xop: + vmovdqa 0x40(%rsp),$xa0 # re-load smashed key + vmovdqa 0x50(%rsp),$xa1 + vmovdqa 0x60(%rsp),$xa2 + vmovdqa 0x70(%rsp),$xa3 + vmovdqa 0x80-0x100(%rcx),$xb0 + vmovdqa 0x90-0x100(%rcx),$xb1 + vmovdqa 0xa0-0x100(%rcx),$xb2 + vmovdqa 0xb0-0x100(%rcx),$xb3 + vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" + vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" + vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" + vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" + vmovdqa 0x100-0x100(%rcx),$xd0 + vmovdqa 0x110-0x100(%rcx),$xd1 + vmovdqa 0x120-0x100(%rcx),$xd2 + vmovdqa 0x130-0x100(%rcx),$xd3 + vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters + +.Loop_enter4xop: + mov \$10,%eax + vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters + jmp .Loop4xop + +.align 32 +.Loop4xop: +___ + foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop4xop + + vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material + vpaddd 0x50(%rsp),$xa1,$xa1 + vpaddd 0x60(%rsp),$xa2,$xa2 + vpaddd 0x70(%rsp),$xa3,$xa3 + + vmovdqa $xt2,0x20(%rsp) # offload $xc2,3 + vmovdqa $xt3,0x30(%rsp) + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd 0x80-0x100(%rcx),$xb0,$xb0 + vpaddd 0x90-0x100(%rcx),$xb1,$xb1 + vpaddd 0xa0-0x100(%rcx),$xb2,$xb2 + vpaddd 0xb0-0x100(%rcx),$xb3,$xb3 + + vmovdqa $xa0,0x00(%rsp) # offload $xa0,1 + vmovdqa $xa1,0x10(%rsp) + vmovdqa 0x20(%rsp),$xa0 # "xc2" + vmovdqa 0x30(%rsp),$xa1 # "xc3" + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + vpaddd 0xc0-0x100(%rcx),$xc0,$xc0 + vpaddd 0xd0-0x100(%rcx),$xc1,$xc1 + vpaddd 0xe0-0x100(%rcx),$xc2,$xc2 + vpaddd 0xf0-0x100(%rcx),$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd 0x100-0x100(%rcx),$xd0,$xd0 + vpaddd 0x110-0x100(%rcx),$xd1,$xd1 + vpaddd 0x120-0x100(%rcx),$xd2,$xd2 + vpaddd 0x130-0x100(%rcx),$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); + ($xa0,$xa1)=($xt2,$xt3); +$code.=<<___; + vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1 + vmovdqa 0x10(%rsp),$xa1 + + cmp \$64*4,$len + jb .Ltail4xop + + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x10($inp),$xb2,$xb2 + vpxor 0x20($inp),$xc2,$xc2 + vpxor 0x30($inp),$xd2,$xd2 + vpxor 0x40($inp),$xa3,$xa3 + vpxor 0x50($inp),$xb3,$xb3 + vpxor 0x60($inp),$xc3,$xc3 + vpxor 0x70($inp),$xd3,$xd3 + lea 0x80($inp),$inp # inp+=64*4 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + lea 0x80($out),$out # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x10($out) + vmovdqu $xc2,0x20($out) + vmovdqu $xd2,0x30($out) + vmovdqu $xa3,0x40($out) + vmovdqu $xb3,0x50($out) + vmovdqu $xc3,0x60($out) + vmovdqu $xd3,0x70($out) + lea 0x80($out),$out # out+=64*4 + + sub \$64*4,$len + jnz .Loop_outer4xop + + jmp .Ldone4xop + +.align 32 +.Ltail4xop: + cmp \$192,$len + jae .L192_or_more4xop + cmp \$128,$len + jae .L128_or_more4xop + cmp \$64,$len + jae .L64_or_more4xop + + xor %r9,%r9 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x10(%rsp) + vmovdqa $xc0,0x20(%rsp) + vmovdqa $xd0,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L64_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + je .Ldone4xop + + lea 0x40($inp),$inp # inp+=64*1 + vmovdqa $xa1,0x00(%rsp) + xor %r9,%r9 + vmovdqa $xb1,0x10(%rsp) + lea 0x40($out),$out # out+=64*1 + vmovdqa $xc1,0x20(%rsp) + sub \$64,$len # len-=64*1 + vmovdqa $xd1,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L128_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + je .Ldone4xop + + lea 0x80($inp),$inp # inp+=64*2 + vmovdqa $xa2,0x00(%rsp) + xor %r9,%r9 + vmovdqa $xb2,0x10(%rsp) + lea 0x80($out),$out # out+=64*2 + vmovdqa $xc2,0x20(%rsp) + sub \$128,$len # len-=64*2 + vmovdqa $xd2,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L192_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x10($inp),$xb2,$xb2 + vpxor 0x20($inp),$xc2,$xc2 + vpxor 0x30($inp),$xd2,$xd2 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + lea 0x80($out),$out # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x10($out) + vmovdqu $xc2,0x20($out) + vmovdqu $xd2,0x30($out) + je .Ldone4xop + + lea 0x40($inp),$inp # inp+=64*3 + vmovdqa $xa3,0x00(%rsp) + xor %r9,%r9 + vmovdqa $xb3,0x10(%rsp) + lea 0x40($out),$out # out+=64*3 + vmovdqa $xc3,0x20(%rsp) + sub \$192,$len # len-=64*3 + vmovdqa $xd3,0x30(%rsp) + +.Loop_tail4xop: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail4xop + +.Ldone4xop: + vzeroupper +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L4xop_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_xop"); +} + +######################################################################## +# AVX2 code path +if ($avx>1) { + +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX2\n"; +} + +my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, + $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); + +sub AVX2_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); +my @x=map("\"$_\"",@xx); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpshufb (@x[$d0],@x[$d0],$t1)", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpshufb (@x[$d1],@x[$d1],$t1)", + + "&vpaddd ($xc,$xc,@x[$d0])", + "&vpxor (@x[$b0],$xc,@x[$b0])", + "&vpslld ($t0,@x[$b0],12)", + "&vpsrld (@x[$b0],@x[$b0],20)", + "&vpor (@x[$b0],$t0,@x[$b0])", + "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) + "&vpaddd ($xc_,$xc_,@x[$d1])", + "&vpxor (@x[$b1],$xc_,@x[$b1])", + "&vpslld ($t1,@x[$b1],12)", + "&vpsrld (@x[$b1],@x[$b1],20)", + "&vpor (@x[$b1],$t1,@x[$b1])", + + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpshufb (@x[$d0],@x[$d0],$t0)", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpshufb (@x[$d1],@x[$d1],$t0)", + + "&vpaddd ($xc,$xc,@x[$d0])", + "&vpxor (@x[$b0],$xc,@x[$b0])", + "&vpslld ($t1,@x[$b0],7)", + "&vpsrld (@x[$b0],@x[$b0],25)", + "&vpor (@x[$b0],$t1,@x[$b0])", + "&vbroadcasti128($t1,'(%r9)')", # .Lrot16(%rip) + "&vpaddd ($xc_,$xc_,@x[$d1])", + "&vpxor (@x[$b1],$xc_,@x[$b1])", + "&vpslld ($t0,@x[$b1],7)", + "&vpsrld (@x[$b1],@x[$b1],25)", + "&vpor (@x[$b1],$t0,@x[$b1])", + + "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's + "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)", + "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")", + "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")", + + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpshufb (@x[$d2],@x[$d2],$t1)", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vpshufb (@x[$d3],@x[$d3],$t1)", + + "&vpaddd ($xc,$xc,@x[$d2])", + "&vpxor (@x[$b2],$xc,@x[$b2])", + "&vpslld ($t0,@x[$b2],12)", + "&vpsrld (@x[$b2],@x[$b2],20)", + "&vpor (@x[$b2],$t0,@x[$b2])", + "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) + "&vpaddd ($xc_,$xc_,@x[$d3])", + "&vpxor (@x[$b3],$xc_,@x[$b3])", + "&vpslld ($t1,@x[$b3],12)", + "&vpsrld (@x[$b3],@x[$b3],20)", + "&vpor (@x[$b3],$t1,@x[$b3])", + + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpshufb (@x[$d2],@x[$d2],$t0)", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vpshufb (@x[$d3],@x[$d3],$t0)", + + "&vpaddd ($xc,$xc,@x[$d2])", + "&vpxor (@x[$b2],$xc,@x[$b2])", + "&vpslld ($t1,@x[$b2],7)", + "&vpsrld (@x[$b2],@x[$b2],25)", + "&vpor (@x[$b2],$t1,@x[$b2])", + "&vbroadcasti128($t1,'(%r9)')", # .Lrot16(%rip) + "&vpaddd ($xc_,$xc_,@x[$d3])", + "&vpxor (@x[$b3],$xc_,@x[$b3])", + "&vpslld ($t0,@x[$b3],7)", + "&vpsrld (@x[$b3],@x[$b3],25)", + "&vpor (@x[$b3],$t0,@x[$b3])" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +&declare_function("chacha20_avx2", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_8x: + lea 8(%rsp),%r10 # frame register +.cfi_def_cfa_register %r10 + sub \$0x280+$xframe,%rsp + and \$-32,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L8x_body: +___ +$code.=<<___; + vzeroupper + + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x80 constant copy of key[0-2] smashed by lanes + # ... + # +0x200 SIMD counters (with nonce smashed by lanes) + # ... + # +0x280 + + vbroadcasti128 .Lsigma(%rip),$xa3 # key[0] + vbroadcasti128 ($key),$xb3 # key[1] + vbroadcasti128 16($key),$xt3 # key[2] + vbroadcasti128 ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + lea 0x200(%rsp),%rax # size optimization + lea .Lrot16(%rip),%r9 + lea .Lrot24(%rip),%r11 + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload + vpshufd \$0xaa,$xa3,$xa2 + vmovdqa $xa1,0xa0-0x100(%rcx) + vpshufd \$0xff,$xa3,$xa3 + vmovdqa $xa2,0xc0-0x100(%rcx) + vmovdqa $xa3,0xe0-0x100(%rcx) + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vmovdqa $xb0,0x100-0x100(%rcx) + vpshufd \$0xaa,$xb3,$xb2 + vmovdqa $xb1,0x120-0x100(%rcx) + vpshufd \$0xff,$xb3,$xb3 + vmovdqa $xb2,0x140-0x100(%rcx) + vmovdqa $xb3,0x160-0x100(%rcx) + + vpshufd \$0x00,$xt3,$xt0 # "xc0" + vpshufd \$0x55,$xt3,$xt1 # "xc1" + vmovdqa $xt0,0x180-0x200(%rax) + vpshufd \$0xaa,$xt3,$xt2 # "xc2" + vmovdqa $xt1,0x1a0-0x200(%rax) + vpshufd \$0xff,$xt3,$xt3 # "xc3" + vmovdqa $xt2,0x1c0-0x200(%rax) + vmovdqa $xt3,0x1e0-0x200(%rax) + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet + vpshufd \$0xaa,$xd3,$xd2 + vmovdqa $xd1,0x220-0x200(%rax) + vpshufd \$0xff,$xd3,$xd3 + vmovdqa $xd2,0x240-0x200(%rax) + vmovdqa $xd3,0x260-0x200(%rax) + + jmp .Loop_enter8x + +.align 32 +.Loop_outer8x: + vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key + vmovdqa 0xa0-0x100(%rcx),$xa1 + vmovdqa 0xc0-0x100(%rcx),$xa2 + vmovdqa 0xe0-0x100(%rcx),$xa3 + vmovdqa 0x100-0x100(%rcx),$xb0 + vmovdqa 0x120-0x100(%rcx),$xb1 + vmovdqa 0x140-0x100(%rcx),$xb2 + vmovdqa 0x160-0x100(%rcx),$xb3 + vmovdqa 0x180-0x200(%rax),$xt0 # "xc0" + vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1" + vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2" + vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3" + vmovdqa 0x200-0x200(%rax),$xd0 + vmovdqa 0x220-0x200(%rax),$xd1 + vmovdqa 0x240-0x200(%rax),$xd2 + vmovdqa 0x260-0x200(%rax),$xd3 + vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters + +.Loop_enter8x: + vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]" + vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]" + vbroadcasti128 (%r9),$xt3 + vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters + mov \$10,%eax + jmp .Loop8x + +.align 32 +.Loop8x: +___ + foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop8x + + lea 0x200(%rsp),%rax # size optimization + vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key + vpaddd 0xa0-0x100(%rcx),$xa1,$xa1 + vpaddd 0xc0-0x100(%rcx),$xa2,$xa2 + vpaddd 0xe0-0x100(%rcx),$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd 0x100-0x100(%rcx),$xb0,$xb0 + vpaddd 0x120-0x100(%rcx),$xb1,$xb1 + vpaddd 0x140-0x100(%rcx),$xb2,$xb2 + vpaddd 0x160-0x100(%rcx),$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xb0,$xa0,$xb0 + vperm2i128 \$0x20,$xb1,$xa1,$xa0 + vperm2i128 \$0x31,$xb1,$xa1,$xb1 + vperm2i128 \$0x20,$xb2,$xa2,$xa1 + vperm2i128 \$0x31,$xb2,$xa2,$xb2 + vperm2i128 \$0x20,$xb3,$xa3,$xa2 + vperm2i128 \$0x31,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + vmovdqa $xa0,0x00(%rsp) # offload $xaN + vmovdqa $xa1,0x20(%rsp) + vmovdqa 0x40(%rsp),$xc2 # $xa0 + vmovdqa 0x60(%rsp),$xc3 # $xa1 + + vpaddd 0x180-0x200(%rax),$xc0,$xc0 + vpaddd 0x1a0-0x200(%rax),$xc1,$xc1 + vpaddd 0x1c0-0x200(%rax),$xc2,$xc2 + vpaddd 0x1e0-0x200(%rax),$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd 0x200-0x200(%rax),$xd0,$xd0 + vpaddd 0x220-0x200(%rax),$xd1,$xd1 + vpaddd 0x240-0x200(%rax),$xd2,$xd2 + vpaddd 0x260-0x200(%rax),$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xd0,$xc0,$xd0 + vperm2i128 \$0x20,$xd1,$xc1,$xc0 + vperm2i128 \$0x31,$xd1,$xc1,$xd1 + vperm2i128 \$0x20,$xd2,$xc2,$xc1 + vperm2i128 \$0x31,$xd2,$xc2,$xd2 + vperm2i128 \$0x20,$xd3,$xc3,$xc2 + vperm2i128 \$0x31,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); + ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)= + ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3); + ($xa0,$xa1)=($xt2,$xt3); +$code.=<<___; + vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember? + vmovdqa 0x20(%rsp),$xa1 + + cmp \$64*8,$len + jb .Ltail8x + + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vpxor 0x40($inp),$xc1,$xc1 + vpxor 0x60($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa1,0x00($out) + vmovdqu $xb1,0x20($out) + vmovdqu $xc1,0x40($out) + vmovdqu $xd1,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vpxor 0x40($inp),$xc2,$xc2 + vpxor 0x60($inp),$xd2,$xd2 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x20($out) + vmovdqu $xc2,0x40($out) + vmovdqu $xd2,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vpxor 0x40($inp),$xc3,$xc3 + vpxor 0x60($inp),$xd3,$xd3 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa3,0x00($out) + vmovdqu $xb3,0x20($out) + vmovdqu $xc3,0x40($out) + vmovdqu $xd3,0x60($out) + lea 0x80($out),$out # size optimization + + sub \$64*8,$len + jnz .Loop_outer8x + + jmp .Ldone8x + +.Ltail8x: + cmp \$448,$len + jae .L448_or_more8x + cmp \$384,$len + jae .L384_or_more8x + cmp \$320,$len + jae .L320_or_more8x + cmp \$256,$len + jae .L256_or_more8x + cmp \$192,$len + jae .L192_or_more8x + cmp \$128,$len + jae .L128_or_more8x + cmp \$64,$len + jae .L64_or_more8x + + xor %r9,%r9 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L64_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + je .Ldone8x + + lea 0x40($inp),$inp # inp+=64*1 + xor %r9,%r9 + vmovdqa $xc0,0x00(%rsp) + lea 0x40($out),$out # out+=64*1 + sub \$64,$len # len-=64*1 + vmovdqa $xd0,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L128_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + je .Ldone8x + + lea 0x80($inp),$inp # inp+=64*2 + xor %r9,%r9 + vmovdqa $xa1,0x00(%rsp) + lea 0x80($out),$out # out+=64*2 + sub \$128,$len # len-=64*2 + vmovdqa $xb1,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L192_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + je .Ldone8x + + lea 0xc0($inp),$inp # inp+=64*3 + xor %r9,%r9 + vmovdqa $xc1,0x00(%rsp) + lea 0xc0($out),$out # out+=64*3 + sub \$192,$len # len-=64*3 + vmovdqa $xd1,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L256_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + je .Ldone8x + + lea 0x100($inp),$inp # inp+=64*4 + xor %r9,%r9 + vmovdqa $xa2,0x00(%rsp) + lea 0x100($out),$out # out+=64*4 + sub \$256,$len # len-=64*4 + vmovdqa $xb2,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L320_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + je .Ldone8x + + lea 0x140($inp),$inp # inp+=64*5 + xor %r9,%r9 + vmovdqa $xc2,0x00(%rsp) + lea 0x140($out),$out # out+=64*5 + sub \$320,$len # len-=64*5 + vmovdqa $xd2,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L384_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vpxor 0x140($inp),$xc2,$xc2 + vpxor 0x160($inp),$xd2,$xd2 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + vmovdqu $xc2,0x140($out) + vmovdqu $xd2,0x160($out) + je .Ldone8x + + lea 0x180($inp),$inp # inp+=64*6 + xor %r9,%r9 + vmovdqa $xa3,0x00(%rsp) + lea 0x180($out),$out # out+=64*6 + sub \$384,$len # len-=64*6 + vmovdqa $xb3,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L448_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vpxor 0x140($inp),$xc2,$xc2 + vpxor 0x160($inp),$xd2,$xd2 + vpxor 0x180($inp),$xa3,$xa3 + vpxor 0x1a0($inp),$xb3,$xb3 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + vmovdqu $xc2,0x140($out) + vmovdqu $xd2,0x160($out) + vmovdqu $xa3,0x180($out) + vmovdqu $xb3,0x1a0($out) + je .Ldone8x + + lea 0x1c0($inp),$inp # inp+=64*7 + xor %r9,%r9 + vmovdqa $xc3,0x00(%rsp) + lea 0x1c0($out),$out # out+=64*7 + sub \$448,$len # len-=64*7 + vmovdqa $xd3,0x20(%rsp) + +.Loop_tail8x: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail8x + +.Ldone8x: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L8x_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_avx2"); +if($kernel) { + $code .= "#endif\n"; +} +} + +######################################################################## +# AVX512 code paths +if ($avx>2) { +# This one handles shorter inputs... +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX512\n"; +} + +my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20)); +my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7)); + +sub vpxord() # size optimization +{ my $opcode = "vpxor"; # adhere to vpxor when possible + + foreach (@_) { + if (/%([zy])mm([0-9]+)/ && ($1 eq "z" || $2>=16)) { + $opcode = "vpxord"; + last; + } + } + + $code .= "\t$opcode\t".join(',',reverse @_)."\n"; +} + +sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round + &vpaddd ($a,$a,$b); + &vpxord ($d,$d,$a); + &vprold ($d,$d,16); + + &vpaddd ($c,$c,$d); + &vpxord ($b,$b,$c); + &vprold ($b,$b,12); + + &vpaddd ($a,$a,$b); + &vpxord ($d,$d,$a); + &vprold ($d,$d,8); + + &vpaddd ($c,$c,$d); + &vpxord ($b,$b,$c); + &vprold ($b,$b,7); +} + +my $xframe = $win64 ? 32+8 : 8; + +&declare_function("chacha20_avx512", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_avx512: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + cmp \$512,$len + ja .Lchacha20_16x + + sub \$64+$xframe,%rsp + and \$-64,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r10) + movaps %xmm7,-0x20(%r10) +.Lavx512_body: +___ +$code.=<<___; + vbroadcasti32x4 .Lsigma(%rip),$a + vbroadcasti32x4 ($key),$b + vbroadcasti32x4 16($key),$c + vbroadcasti32x4 ($counter),$d + + vmovdqa32 $a,$a_ + vmovdqa32 $b,$b_ + vmovdqa32 $c,$c_ + vpaddd .Lzeroz(%rip),$d,$d + vmovdqa32 .Lfourz(%rip),$fourz + mov \$10,$counter # reuse $counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512 + +.align 16 +.Loop_outer_avx512: + vmovdqa32 $a_,$a + vmovdqa32 $b_,$b + vmovdqa32 $c_,$c + vpaddd $fourz,$d_,$d + mov \$10,$counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512 + +.align 32 +.Loop_avx512: +___ + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b00111001); + &vpshufd ($d,$d,0b10010011); + + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b10010011); + &vpshufd ($d,$d,0b00111001); + + &dec ($counter); + &jnz (".Loop_avx512"); + +$code.=<<___; + vpaddd $a_,$a,$a + vpaddd $b_,$b,$b + vpaddd $c_,$c,$c + vpaddd $d_,$d,$d + + sub \$64,$len + jb .Ltail64_avx512 + + vpxor 0x00($inp),%x#$a,$t0 # xor with input + vpxor 0x10($inp),%x#$b,$t1 + vpxor 0x20($inp),%x#$c,$t2 + vpxor 0x30($inp),%x#$d,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512 + + vextracti32x4 \$1,$a,$t0 + vextracti32x4 \$1,$b,$t1 + vextracti32x4 \$1,$c,$t2 + vextracti32x4 \$1,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512 + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512 + + vextracti32x4 \$2,$a,$t0 + vextracti32x4 \$2,$b,$t1 + vextracti32x4 \$2,$c,$t2 + vextracti32x4 \$2,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512 + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512 + + vextracti32x4 \$3,$a,$t0 + vextracti32x4 \$3,$b,$t1 + vextracti32x4 \$3,$c,$t2 + vextracti32x4 \$3,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512 + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jnz .Loop_outer_avx512 + + jmp .Ldone_avx512 + +.align 16 +.Ltail64_avx512: + vmovdqa %x#$a,0x00(%rsp) + vmovdqa %x#$b,0x10(%rsp) + vmovdqa %x#$c,0x20(%rsp) + vmovdqa %x#$d,0x30(%rsp) + add \$64,$len + jmp .Loop_tail_avx512 + +.align 16 +.Ltail_avx512: + vmovdqa $t0,0x00(%rsp) + vmovdqa $t1,0x10(%rsp) + vmovdqa $t2,0x20(%rsp) + vmovdqa $t3,0x30(%rsp) + add \$64,$len + +.Loop_tail_avx512: + movzb ($inp,$counter),%eax + movzb (%rsp,$counter),%ecx + lea 1($counter),$counter + xor %ecx,%eax + mov %al,-1($out,$counter) + dec $len + jnz .Loop_tail_avx512 + + vmovdqu32 $a_,0x00(%rsp) + +.Ldone_avx512: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0x30(%r10),%xmm6 + movaps -0x20(%r10),%xmm7 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.Lavx512_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_avx512"); + +map(s/%z/%y/, $a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz); + +&declare_function("chacha20_avx512vl", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_avx512vl: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + cmp \$128,$len + ja .Lchacha20_8xvl + + sub \$64+$xframe,%rsp + and \$-32,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r10) + movaps %xmm7,-0x20(%r10) +.Lavx512vl_body: +___ +$code.=<<___; + vbroadcasti128 .Lsigma(%rip),$a + vbroadcasti128 ($key),$b + vbroadcasti128 16($key),$c + vbroadcasti128 ($counter),$d + + vmovdqa32 $a,$a_ + vmovdqa32 $b,$b_ + vmovdqa32 $c,$c_ + vpaddd .Lzeroz(%rip),$d,$d + vmovdqa32 .Ltwoy(%rip),$fourz + mov \$10,$counter # reuse $counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512vl + +.align 16 +.Loop_outer_avx512vl: + vmovdqa32 $c_,$c + vpaddd $fourz,$d_,$d + mov \$10,$counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512vl + +.align 32 +.Loop_avx512vl: +___ + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b00111001); + &vpshufd ($d,$d,0b10010011); + + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b10010011); + &vpshufd ($d,$d,0b00111001); + + &dec ($counter); + &jnz (".Loop_avx512vl"); + +$code.=<<___; + vpaddd $a_,$a,$a + vpaddd $b_,$b,$b + vpaddd $c_,$c,$c + vpaddd $d_,$d,$d + + sub \$64,$len + jb .Ltail64_avx512vl + + vpxor 0x00($inp),%x#$a,$t0 # xor with input + vpxor 0x10($inp),%x#$b,$t1 + vpxor 0x20($inp),%x#$c,$t2 + vpxor 0x30($inp),%x#$d,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512vl + + vextracti128 \$1,$a,$t0 + vextracti128 \$1,$b,$t1 + vextracti128 \$1,$c,$t2 + vextracti128 \$1,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512vl + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + vmovdqa32 $a_,$a + vmovdqa32 $b_,$b + jnz .Loop_outer_avx512vl + + jmp .Ldone_avx512vl + +.align 16 +.Ltail64_avx512vl: + vmovdqa %x#$a,0x00(%rsp) + vmovdqa %x#$b,0x10(%rsp) + vmovdqa %x#$c,0x20(%rsp) + vmovdqa %x#$d,0x30(%rsp) + add \$64,$len + jmp .Loop_tail_avx512vl + +.align 16 +.Ltail_avx512vl: + vmovdqa $t0,0x00(%rsp) + vmovdqa $t1,0x10(%rsp) + vmovdqa $t2,0x20(%rsp) + vmovdqa $t3,0x30(%rsp) + add \$64,$len + +.Loop_tail_avx512vl: + movzb ($inp,$counter),%eax + movzb (%rsp,$counter),%ecx + lea 1($counter),$counter + xor %ecx,%eax + mov %al,-1($out,$counter) + dec $len + jnz .Loop_tail_avx512vl + + vmovdqu32 $a_,0x00(%rsp) + vmovdqu32 $a_,0x20(%rsp) + +.Ldone_avx512vl: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0x30(%r10),%xmm6 + movaps -0x20(%r10),%xmm7 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.Lavx512vl_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_avx512vl"); + +# This one handles longer inputs... + +my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3); +my @key=map("%zmm$_",(16..31)); +my ($xt0,$xt1,$xt2,$xt3)=@key[0..3]; + +sub AVX512_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my @x=map("\"$_\"",@xx); + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxord (@x[$d0],@x[$d0],@x[$a0])", + "&vpxord (@x[$d1],@x[$d1],@x[$a1])", + "&vpxord (@x[$d2],@x[$d2],@x[$a2])", + "&vpxord (@x[$d3],@x[$d3],@x[$a3])", + "&vprold (@x[$d0],@x[$d0],16)", + "&vprold (@x[$d1],@x[$d1],16)", + "&vprold (@x[$d2],@x[$d2],16)", + "&vprold (@x[$d3],@x[$d3],16)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxord (@x[$b0],@x[$b0],@x[$c0])", + "&vpxord (@x[$b1],@x[$b1],@x[$c1])", + "&vpxord (@x[$b2],@x[$b2],@x[$c2])", + "&vpxord (@x[$b3],@x[$b3],@x[$c3])", + "&vprold (@x[$b0],@x[$b0],12)", + "&vprold (@x[$b1],@x[$b1],12)", + "&vprold (@x[$b2],@x[$b2],12)", + "&vprold (@x[$b3],@x[$b3],12)", + + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxord (@x[$d0],@x[$d0],@x[$a0])", + "&vpxord (@x[$d1],@x[$d1],@x[$a1])", + "&vpxord (@x[$d2],@x[$d2],@x[$a2])", + "&vpxord (@x[$d3],@x[$d3],@x[$a3])", + "&vprold (@x[$d0],@x[$d0],8)", + "&vprold (@x[$d1],@x[$d1],8)", + "&vprold (@x[$d2],@x[$d2],8)", + "&vprold (@x[$d3],@x[$d3],8)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxord (@x[$b0],@x[$b0],@x[$c0])", + "&vpxord (@x[$b1],@x[$b1],@x[$c1])", + "&vpxord (@x[$b2],@x[$b2],@x[$c2])", + "&vpxord (@x[$b3],@x[$b3],@x[$c3])", + "&vprold (@x[$b0],@x[$b0],7)", + "&vprold (@x[$b1],@x[$b1],7)", + "&vprold (@x[$b2],@x[$b2],7)", + "&vprold (@x[$b3],@x[$b3],7)" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +$code.=<<___; +.type chacha20_16x,\@function,5 +.align 32 +chacha20_16x: +.cfi_startproc +.Lchacha20_16x: + lea 8(%rsp),%r10 # frame register +.cfi_def_cfa_register %r10 + sub \$64+$xframe,%rsp + and \$-64,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L16x_body: +___ +$code.=<<___; + vzeroupper + + lea .Lsigma(%rip),%r9 + vbroadcasti32x4 (%r9),$xa3 # key[0] + vbroadcasti32x4 ($key),$xb3 # key[1] + vbroadcasti32x4 16($key),$xc3 # key[2] + vbroadcasti32x4 ($counter),$xd3 # key[3] + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vpshufd \$0xaa,$xa3,$xa2 + vpshufd \$0xff,$xa3,$xa3 + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vpshufd \$0xaa,$xb3,$xb2 + vpshufd \$0xff,$xb3,$xb3 + vmovdqa64 $xb0,@key[4] + vmovdqa64 $xb1,@key[5] + vmovdqa64 $xb2,@key[6] + vmovdqa64 $xb3,@key[7] + + vpshufd \$0x00,$xc3,$xc0 + vpshufd \$0x55,$xc3,$xc1 + vpshufd \$0xaa,$xc3,$xc2 + vpshufd \$0xff,$xc3,$xc3 + vmovdqa64 $xc0,@key[8] + vmovdqa64 $xc1,@key[9] + vmovdqa64 $xc2,@key[10] + vmovdqa64 $xc3,@key[11] + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpshufd \$0xaa,$xd3,$xd2 + vpshufd \$0xff,$xd3,$xd3 + vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet + vmovdqa64 $xd0,@key[12] + vmovdqa64 $xd1,@key[13] + vmovdqa64 $xd2,@key[14] + vmovdqa64 $xd3,@key[15] + + mov \$10,%eax + jmp .Loop16x + +.align 32 +.Loop_outer16x: + vpbroadcastd 0(%r9),$xa0 # reload key + vpbroadcastd 4(%r9),$xa1 + vpbroadcastd 8(%r9),$xa2 + vpbroadcastd 12(%r9),$xa3 + vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters + vmovdqa64 @key[4],$xb0 + vmovdqa64 @key[5],$xb1 + vmovdqa64 @key[6],$xb2 + vmovdqa64 @key[7],$xb3 + vmovdqa64 @key[8],$xc0 + vmovdqa64 @key[9],$xc1 + vmovdqa64 @key[10],$xc2 + vmovdqa64 @key[11],$xc3 + vmovdqa64 @key[12],$xd0 + vmovdqa64 @key[13],$xd1 + vmovdqa64 @key[14],$xd2 + vmovdqa64 @key[15],$xd3 + + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + mov \$10,%eax + jmp .Loop16x + +.align 32 +.Loop16x: +___ + foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop16x + + vpaddd @key[0],$xa0,$xa0 # accumulate key + vpaddd @key[1],$xa1,$xa1 + vpaddd @key[2],$xa2,$xa2 + vpaddd @key[3],$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd @key[4],$xb0,$xb0 + vpaddd @key[5],$xb1,$xb1 + vpaddd @key[6],$xb2,$xb2 + vpaddd @key[7],$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further + vshufi32x4 \$0xee,$xb0,$xa0,$xb0 + vshufi32x4 \$0x44,$xb1,$xa1,$xa0 + vshufi32x4 \$0xee,$xb1,$xa1,$xb1 + vshufi32x4 \$0x44,$xb2,$xa2,$xa1 + vshufi32x4 \$0xee,$xb2,$xa2,$xb2 + vshufi32x4 \$0x44,$xb3,$xa3,$xa2 + vshufi32x4 \$0xee,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); +$code.=<<___; + vpaddd @key[8],$xc0,$xc0 + vpaddd @key[9],$xc1,$xc1 + vpaddd @key[10],$xc2,$xc2 + vpaddd @key[11],$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd @key[12],$xd0,$xd0 + vpaddd @key[13],$xd1,$xd1 + vpaddd @key[14],$xd2,$xd2 + vpaddd @key[15],$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further + vshufi32x4 \$0xee,$xd0,$xc0,$xd0 + vshufi32x4 \$0x44,$xd1,$xc1,$xc0 + vshufi32x4 \$0xee,$xd1,$xc1,$xd1 + vshufi32x4 \$0x44,$xd2,$xc2,$xc1 + vshufi32x4 \$0xee,$xd2,$xc2,$xd2 + vshufi32x4 \$0x44,$xd3,$xc3,$xc2 + vshufi32x4 \$0xee,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); +$code.=<<___; + vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further + vshufi32x4 \$0xdd,$xc0,$xa0,$xa0 + vshufi32x4 \$0x88,$xd0,$xb0,$xc0 + vshufi32x4 \$0xdd,$xd0,$xb0,$xd0 + vshufi32x4 \$0x88,$xc1,$xa1,$xt1 + vshufi32x4 \$0xdd,$xc1,$xa1,$xa1 + vshufi32x4 \$0x88,$xd1,$xb1,$xc1 + vshufi32x4 \$0xdd,$xd1,$xb1,$xd1 + vshufi32x4 \$0x88,$xc2,$xa2,$xt2 + vshufi32x4 \$0xdd,$xc2,$xa2,$xa2 + vshufi32x4 \$0x88,$xd2,$xb2,$xc2 + vshufi32x4 \$0xdd,$xd2,$xb2,$xd2 + vshufi32x4 \$0x88,$xc3,$xa3,$xt3 + vshufi32x4 \$0xdd,$xc3,$xa3,$xa3 + vshufi32x4 \$0x88,$xd3,$xb3,$xc3 + vshufi32x4 \$0xdd,$xd3,$xb3,$xd3 +___ + ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)= + ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3); + + ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1, + $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) = + ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3); +$code.=<<___; + cmp \$64*16,$len + jb .Ltail16x + + vpxord 0x00($inp),$xa0,$xa0 # xor with input + vpxord 0x40($inp),$xb0,$xb0 + vpxord 0x80($inp),$xc0,$xc0 + vpxord 0xc0($inp),$xd0,$xd0 + vmovdqu32 $xa0,0x00($out) + vmovdqu32 $xb0,0x40($out) + vmovdqu32 $xc0,0x80($out) + vmovdqu32 $xd0,0xc0($out) + + vpxord 0x100($inp),$xa1,$xa1 + vpxord 0x140($inp),$xb1,$xb1 + vpxord 0x180($inp),$xc1,$xc1 + vpxord 0x1c0($inp),$xd1,$xd1 + vmovdqu32 $xa1,0x100($out) + vmovdqu32 $xb1,0x140($out) + vmovdqu32 $xc1,0x180($out) + vmovdqu32 $xd1,0x1c0($out) + + vpxord 0x200($inp),$xa2,$xa2 + vpxord 0x240($inp),$xb2,$xb2 + vpxord 0x280($inp),$xc2,$xc2 + vpxord 0x2c0($inp),$xd2,$xd2 + vmovdqu32 $xa2,0x200($out) + vmovdqu32 $xb2,0x240($out) + vmovdqu32 $xc2,0x280($out) + vmovdqu32 $xd2,0x2c0($out) + + vpxord 0x300($inp),$xa3,$xa3 + vpxord 0x340($inp),$xb3,$xb3 + vpxord 0x380($inp),$xc3,$xc3 + vpxord 0x3c0($inp),$xd3,$xd3 + lea 0x400($inp),$inp + vmovdqu32 $xa3,0x300($out) + vmovdqu32 $xb3,0x340($out) + vmovdqu32 $xc3,0x380($out) + vmovdqu32 $xd3,0x3c0($out) + lea 0x400($out),$out + + sub \$64*16,$len + jnz .Loop_outer16x + + jmp .Ldone16x + +.align 32 +.Ltail16x: + xor %r9,%r9 + sub $inp,$out + cmp \$64*1,$len + jb .Less_than_64_16x + vpxord ($inp),$xa0,$xa0 # xor with input + vmovdqu32 $xa0,($out,$inp) + je .Ldone16x + vmovdqa32 $xb0,$xa0 + lea 64($inp),$inp + + cmp \$64*2,$len + jb .Less_than_64_16x + vpxord ($inp),$xb0,$xb0 + vmovdqu32 $xb0,($out,$inp) + je .Ldone16x + vmovdqa32 $xc0,$xa0 + lea 64($inp),$inp + + cmp \$64*3,$len + jb .Less_than_64_16x + vpxord ($inp),$xc0,$xc0 + vmovdqu32 $xc0,($out,$inp) + je .Ldone16x + vmovdqa32 $xd0,$xa0 + lea 64($inp),$inp + + cmp \$64*4,$len + jb .Less_than_64_16x + vpxord ($inp),$xd0,$xd0 + vmovdqu32 $xd0,($out,$inp) + je .Ldone16x + vmovdqa32 $xa1,$xa0 + lea 64($inp),$inp + + cmp \$64*5,$len + jb .Less_than_64_16x + vpxord ($inp),$xa1,$xa1 + vmovdqu32 $xa1,($out,$inp) + je .Ldone16x + vmovdqa32 $xb1,$xa0 + lea 64($inp),$inp + + cmp \$64*6,$len + jb .Less_than_64_16x + vpxord ($inp),$xb1,$xb1 + vmovdqu32 $xb1,($out,$inp) + je .Ldone16x + vmovdqa32 $xc1,$xa0 + lea 64($inp),$inp + + cmp \$64*7,$len + jb .Less_than_64_16x + vpxord ($inp),$xc1,$xc1 + vmovdqu32 $xc1,($out,$inp) + je .Ldone16x + vmovdqa32 $xd1,$xa0 + lea 64($inp),$inp + + cmp \$64*8,$len + jb .Less_than_64_16x + vpxord ($inp),$xd1,$xd1 + vmovdqu32 $xd1,($out,$inp) + je .Ldone16x + vmovdqa32 $xa2,$xa0 + lea 64($inp),$inp + + cmp \$64*9,$len + jb .Less_than_64_16x + vpxord ($inp),$xa2,$xa2 + vmovdqu32 $xa2,($out,$inp) + je .Ldone16x + vmovdqa32 $xb2,$xa0 + lea 64($inp),$inp + + cmp \$64*10,$len + jb .Less_than_64_16x + vpxord ($inp),$xb2,$xb2 + vmovdqu32 $xb2,($out,$inp) + je .Ldone16x + vmovdqa32 $xc2,$xa0 + lea 64($inp),$inp + + cmp \$64*11,$len + jb .Less_than_64_16x + vpxord ($inp),$xc2,$xc2 + vmovdqu32 $xc2,($out,$inp) + je .Ldone16x + vmovdqa32 $xd2,$xa0 + lea 64($inp),$inp + + cmp \$64*12,$len + jb .Less_than_64_16x + vpxord ($inp),$xd2,$xd2 + vmovdqu32 $xd2,($out,$inp) + je .Ldone16x + vmovdqa32 $xa3,$xa0 + lea 64($inp),$inp + + cmp \$64*13,$len + jb .Less_than_64_16x + vpxord ($inp),$xa3,$xa3 + vmovdqu32 $xa3,($out,$inp) + je .Ldone16x + vmovdqa32 $xb3,$xa0 + lea 64($inp),$inp + + cmp \$64*14,$len + jb .Less_than_64_16x + vpxord ($inp),$xb3,$xb3 + vmovdqu32 $xb3,($out,$inp) + je .Ldone16x + vmovdqa32 $xc3,$xa0 + lea 64($inp),$inp + + cmp \$64*15,$len + jb .Less_than_64_16x + vpxord ($inp),$xc3,$xc3 + vmovdqu32 $xc3,($out,$inp) + je .Ldone16x + vmovdqa32 $xd3,$xa0 + lea 64($inp),$inp + +.Less_than_64_16x: + vmovdqa32 $xa0,0x00(%rsp) + lea ($out,$inp),$out + and \$63,$len + +.Loop_tail16x: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail16x + + vpxord $xa0,$xa0,$xa0 + vmovdqa32 $xa0,0(%rsp) + +.Ldone16x: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L16x_epilogue: + ret +.cfi_endproc +.size chacha20_16x,.-chacha20_16x +___ + +# switch to %ymm domain +($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%ymm$_",(0..15)); +@xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3); +@key=map("%ymm$_",(16..31)); +($xt0,$xt1,$xt2,$xt3)=@key[0..3]; + +$code.=<<___; +.type chacha20_8xvl,\@function,5 +.align 32 +chacha20_8xvl: +.cfi_startproc +.Lchacha20_8xvl: + lea 8(%rsp),%r10 # frame register +.cfi_def_cfa_register %r10 + sub \$64+$xframe,%rsp + and \$-64,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L8xvl_body: +___ +$code.=<<___; + vzeroupper + + lea .Lsigma(%rip),%r9 + vbroadcasti128 (%r9),$xa3 # key[0] + vbroadcasti128 ($key),$xb3 # key[1] + vbroadcasti128 16($key),$xc3 # key[2] + vbroadcasti128 ($counter),$xd3 # key[3] + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vpshufd \$0xaa,$xa3,$xa2 + vpshufd \$0xff,$xa3,$xa3 + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vpshufd \$0xaa,$xb3,$xb2 + vpshufd \$0xff,$xb3,$xb3 + vmovdqa64 $xb0,@key[4] + vmovdqa64 $xb1,@key[5] + vmovdqa64 $xb2,@key[6] + vmovdqa64 $xb3,@key[7] + + vpshufd \$0x00,$xc3,$xc0 + vpshufd \$0x55,$xc3,$xc1 + vpshufd \$0xaa,$xc3,$xc2 + vpshufd \$0xff,$xc3,$xc3 + vmovdqa64 $xc0,@key[8] + vmovdqa64 $xc1,@key[9] + vmovdqa64 $xc2,@key[10] + vmovdqa64 $xc3,@key[11] + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpshufd \$0xaa,$xd3,$xd2 + vpshufd \$0xff,$xd3,$xd3 + vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet + vmovdqa64 $xd0,@key[12] + vmovdqa64 $xd1,@key[13] + vmovdqa64 $xd2,@key[14] + vmovdqa64 $xd3,@key[15] + + mov \$10,%eax + jmp .Loop8xvl + +.align 32 +.Loop_outer8xvl: + #vpbroadcastd 0(%r9),$xa0 # reload key + #vpbroadcastd 4(%r9),$xa1 + vpbroadcastd 8(%r9),$xa2 + vpbroadcastd 12(%r9),$xa3 + vpaddd .Leight(%rip),@key[12],@key[12] # next SIMD counters + vmovdqa64 @key[4],$xb0 + vmovdqa64 @key[5],$xb1 + vmovdqa64 @key[6],$xb2 + vmovdqa64 @key[7],$xb3 + vmovdqa64 @key[8],$xc0 + vmovdqa64 @key[9],$xc1 + vmovdqa64 @key[10],$xc2 + vmovdqa64 @key[11],$xc3 + vmovdqa64 @key[12],$xd0 + vmovdqa64 @key[13],$xd1 + vmovdqa64 @key[14],$xd2 + vmovdqa64 @key[15],$xd3 + + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + mov \$10,%eax + jmp .Loop8xvl + +.align 32 +.Loop8xvl: +___ + foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop8xvl + + vpaddd @key[0],$xa0,$xa0 # accumulate key + vpaddd @key[1],$xa1,$xa1 + vpaddd @key[2],$xa2,$xa2 + vpaddd @key[3],$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd @key[4],$xb0,$xb0 + vpaddd @key[5],$xb1,$xb1 + vpaddd @key[6],$xb2,$xb2 + vpaddd @key[7],$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vshufi32x4 \$0,$xb0,$xa0,$xt3 # "de-interlace" further + vshufi32x4 \$3,$xb0,$xa0,$xb0 + vshufi32x4 \$0,$xb1,$xa1,$xa0 + vshufi32x4 \$3,$xb1,$xa1,$xb1 + vshufi32x4 \$0,$xb2,$xa2,$xa1 + vshufi32x4 \$3,$xb2,$xa2,$xb2 + vshufi32x4 \$0,$xb3,$xa3,$xa2 + vshufi32x4 \$3,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); +$code.=<<___; + vpaddd @key[8],$xc0,$xc0 + vpaddd @key[9],$xc1,$xc1 + vpaddd @key[10],$xc2,$xc2 + vpaddd @key[11],$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd @key[12],$xd0,$xd0 + vpaddd @key[13],$xd1,$xd1 + vpaddd @key[14],$xd2,$xd2 + vpaddd @key[15],$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xd0,$xc0,$xd0 + vperm2i128 \$0x20,$xd1,$xc1,$xc0 + vperm2i128 \$0x31,$xd1,$xc1,$xd1 + vperm2i128 \$0x20,$xd2,$xc2,$xc1 + vperm2i128 \$0x31,$xd2,$xc2,$xd2 + vperm2i128 \$0x20,$xd3,$xc3,$xc2 + vperm2i128 \$0x31,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); + ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)= + ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3); +$code.=<<___; + cmp \$64*8,$len + jb .Ltail8xvl + + mov \$0x80,%eax # size optimization + vpxord 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + lea ($inp,%rax),$inp # size optimization + vmovdqu32 $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + lea ($out,%rax),$out # size optimization + + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vpxor 0x40($inp),$xc1,$xc1 + vpxor 0x60($inp),$xd1,$xd1 + lea ($inp,%rax),$inp # size optimization + vmovdqu $xa1,0x00($out) + vmovdqu $xb1,0x20($out) + vmovdqu $xc1,0x40($out) + vmovdqu $xd1,0x60($out) + lea ($out,%rax),$out # size optimization + + vpxord 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vpxor 0x40($inp),$xc2,$xc2 + vpxor 0x60($inp),$xd2,$xd2 + lea ($inp,%rax),$inp # size optimization + vmovdqu32 $xa2,0x00($out) + vmovdqu $xb2,0x20($out) + vmovdqu $xc2,0x40($out) + vmovdqu $xd2,0x60($out) + lea ($out,%rax),$out # size optimization + + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vpxor 0x40($inp),$xc3,$xc3 + vpxor 0x60($inp),$xd3,$xd3 + lea ($inp,%rax),$inp # size optimization + vmovdqu $xa3,0x00($out) + vmovdqu $xb3,0x20($out) + vmovdqu $xc3,0x40($out) + vmovdqu $xd3,0x60($out) + lea ($out,%rax),$out # size optimization + + vpbroadcastd 0(%r9),%ymm0 # reload key + vpbroadcastd 4(%r9),%ymm1 + + sub \$64*8,$len + jnz .Loop_outer8xvl + + jmp .Ldone8xvl + +.align 32 +.Ltail8xvl: + vmovdqa64 $xa0,%ymm8 # size optimization +___ +$xa0 = "%ymm8"; +$code.=<<___; + xor %r9,%r9 + sub $inp,$out + cmp \$64*1,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vmovdqu $xa0,0x00($out,$inp) + vmovdqu $xb0,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc0,$xa0 + vmovdqa $xd0,$xb0 + lea 64($inp),$inp + + cmp \$64*2,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xc0,$xc0 + vpxor 0x20($inp),$xd0,$xd0 + vmovdqu $xc0,0x00($out,$inp) + vmovdqu $xd0,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xa1,$xa0 + vmovdqa $xb1,$xb0 + lea 64($inp),$inp + + cmp \$64*3,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vmovdqu $xa1,0x00($out,$inp) + vmovdqu $xb1,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc1,$xa0 + vmovdqa $xd1,$xb0 + lea 64($inp),$inp + + cmp \$64*4,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xc1,$xc1 + vpxor 0x20($inp),$xd1,$xd1 + vmovdqu $xc1,0x00($out,$inp) + vmovdqu $xd1,0x20($out,$inp) + je .Ldone8xvl + vmovdqa32 $xa2,$xa0 + vmovdqa $xb2,$xb0 + lea 64($inp),$inp + + cmp \$64*5,$len + jb .Less_than_64_8xvl + vpxord 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vmovdqu32 $xa2,0x00($out,$inp) + vmovdqu $xb2,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc2,$xa0 + vmovdqa $xd2,$xb0 + lea 64($inp),$inp + + cmp \$64*6,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xc2,$xc2 + vpxor 0x20($inp),$xd2,$xd2 + vmovdqu $xc2,0x00($out,$inp) + vmovdqu $xd2,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xa3,$xa0 + vmovdqa $xb3,$xb0 + lea 64($inp),$inp + + cmp \$64*7,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vmovdqu $xa3,0x00($out,$inp) + vmovdqu $xb3,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc3,$xa0 + vmovdqa $xd3,$xb0 + lea 64($inp),$inp + +.Less_than_64_8xvl: + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x20(%rsp) + lea ($out,$inp),$out + and \$63,$len + +.Loop_tail8xvl: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail8xvl + + vpxor $xa0,$xa0,$xa0 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xa0,0x20(%rsp) + +.Ldone8xvl: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L8xvl_epilogue: + ret +.cfi_endproc +.size chacha20_8xvl,.-chacha20_8xvl +___ +if($kernel) { + $code .= "#endif\n"; +} +} + +# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, +# CONTEXT *context,DISPATCHER_CONTEXT *disp) +if ($win64) { +$rec="%rcx"; +$frame="%rdx"; +$context="%r8"; +$disp="%r9"; + +$code.=<<___; +.extern __imp_RtlVirtualUnwind +.type se_handler,\@abi-omnipotent +.align 16 +se_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + lea .Lctr32_body(%rip),%r10 + cmp %r10,%rbx # context->Rip<.Lprologue + jb .Lcommon_seh_tail + + mov 152($context),%rax # pull context->Rsp + + lea .Lno_data(%rip),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=.Lepilogue + jae .Lcommon_seh_tail + + lea 64+24+48(%rax),%rax + + mov -8(%rax),%rbx + mov -16(%rax),%rbp + mov -24(%rax),%r12 + mov -32(%rax),%r13 + mov -40(%rax),%r14 + mov -48(%rax),%r15 + mov %rbx,144($context) # restore context->Rbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R14 + +.Lcommon_seh_tail: + mov 8(%rax),%rdi + mov 16(%rax),%rsi + mov %rax,152($context) # restore context->Rsp + mov %rsi,168($context) # restore context->Rsi + mov %rdi,176($context) # restore context->Rdi + + mov 40($disp),%rdi # disp->ContextRecord + mov $context,%rsi # context + mov \$154,%ecx # sizeof(CONTEXT) + .long 0xa548f3fc # cld; rep movsq + + mov $disp,%rsi + xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + mov 8(%rsi),%rdx # arg2, disp->ImageBase + mov 0(%rsi),%r8 # arg3, disp->ControlPc + mov 16(%rsi),%r9 # arg4, disp->FunctionEntry + mov 40(%rsi),%r10 # disp->ContextRecord + lea 56(%rsi),%r11 # &disp->HandlerData + lea 24(%rsi),%r12 # &disp->EstablisherFrame + mov %r10,32(%rsp) # arg5 + mov %r11,40(%rsp) # arg6 + mov %r12,48(%rsp) # arg7 + mov %rcx,56(%rsp) # arg8, (NULL) + call *__imp_RtlVirtualUnwind(%rip) + + mov \$1,%eax # ExceptionContinueSearch + add \$64,%rsp + popfq + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %rdi + pop %rsi + ret +.size se_handler,.-se_handler + +.type simd_handler,\@abi-omnipotent +.align 16 +simd_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->RipR9 + + mov 4(%r11),%r10d # HandlerData[1] + mov 8(%r11),%ecx # HandlerData[2] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=epilogue label + jae .Lcommon_seh_tail + + neg %rcx + lea -8(%rax,%rcx),%rsi + lea 512($context),%rdi # &context.Xmm6 + neg %ecx + shr \$3,%ecx + .long 0xa548f3fc # cld; rep movsq + + jmp .Lcommon_seh_tail +.size simd_handler,.-simd_handler + +.section .pdata +.align 4 + .rva .LSEH_begin_chacha20_ctr32 + .rva .LSEH_end_chacha20_ctr32 + .rva .LSEH_info_chacha20_ctr32 + + .rva .LSEH_begin_chacha20_ssse3 + .rva .LSEH_end_chacha20_ssse3 + .rva .LSEH_info_chacha20_ssse3 + + .rva .LSEH_begin_chacha20_128 + .rva .LSEH_end_chacha20_128 + .rva .LSEH_info_chacha20_128 + + .rva .LSEH_begin_chacha20_4x + .rva .LSEH_end_chacha20_4x + .rva .LSEH_info_chacha20_4x +___ +$code.=<<___ if ($avx); + .rva .LSEH_begin_chacha20_xop + .rva .LSEH_end_chacha20_xop + .rva .LSEH_info_chacha20_xop +___ +$code.=<<___ if ($avx>1); + .rva .LSEH_begin_chacha20_avx2 + .rva .LSEH_end_chacha20_avx2 + .rva .LSEH_info_chacha20_avx2 +___ +$code.=<<___ if ($avx>2); + .rva .LSEH_begin_chacha20_avx512 + .rva .LSEH_end_chacha20_avx512 + .rva .LSEH_info_chacha20_avx512 + + .rva .LSEH_begin_chacha20_avx512vl + .rva .LSEH_end_chacha20_avx512vl + .rva .LSEH_info_chacha20_avx512vl + + .rva .LSEH_begin_chacha20_16x + .rva .LSEH_end_chacha20_16x + .rva .LSEH_info_chacha20_16x + + .rva .LSEH_begin_chacha20_8xvl + .rva .LSEH_end_chacha20_8xvl + .rva .LSEH_info_chacha20_8xvl +___ +$code.=<<___; +.section .xdata +.align 8 +.LSEH_info_chacha20_ctr32: + .byte 9,0,0,0 + .rva se_handler + +.LSEH_info_chacha20_ssse3: + .byte 9,0,0,0 + .rva simd_handler + .rva .Lssse3_body,.Lssse3_epilogue + .long 0x20,0 + +.LSEH_info_chacha20_128: + .byte 9,0,0,0 + .rva simd_handler + .rva .L128_body,.L128_epilogue + .long 0x60,0 + +.LSEH_info_chacha20_4x: + .byte 9,0,0,0 + .rva simd_handler + .rva .L4x_body,.L4x_epilogue + .long 0xa0,0 +___ +$code.=<<___ if ($avx); +.LSEH_info_chacha20_xop: + .byte 9,0,0,0 + .rva simd_handler + .rva .L4xop_body,.L4xop_epilogue # HandlerData[] + .long 0xa0,0 +___ +$code.=<<___ if ($avx>1); +.LSEH_info_chacha20_avx2: + .byte 9,0,0,0 + .rva simd_handler + .rva .L8x_body,.L8x_epilogue # HandlerData[] + .long 0xa0,0 +___ +$code.=<<___ if ($avx>2); +.LSEH_info_chacha20_avx512: + .byte 9,0,0,0 + .rva simd_handler + .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[] + .long 0x20,0 + +.LSEH_info_chacha20_avx512vl: + .byte 9,0,0,0 + .rva simd_handler + .rva .Lavx512vl_body,.Lavx512vl_epilogue # HandlerData[] + .long 0x20,0 + +.LSEH_info_chacha20_16x: + .byte 9,0,0,0 + .rva simd_handler + .rva .L16x_body,.L16x_epilogue # HandlerData[] + .long 0xa0,0 + +.LSEH_info_chacha20_8xvl: + .byte 9,0,0,0 + .rva simd_handler + .rva .L8xvl_body,.L8xvl_epilogue # HandlerData[] + .long 0xa0,0 +___ +} + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/ge; + + s/%x#%[yz]/%x/g; # "down-shift" + + if ($kernel) { + s/(^\.type.*),[0-9]+$/\1/; + next if /^\.cfi.*/; + } + + print $_,"\n"; +} + +close STDOUT; diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20.c b/net/wireguard/crypto/zinc/chacha20/chacha20.c new file mode 100644 index 000000000000..f4ca8b09d01e --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * Implementation of the ChaCha20 stream cipher. + * + * Information: https://cr.yp.to/chacha.html + */ + +#include +#include "../selftest/run.h" + +#include +#include +#include +#include +#include // For crypto_xor_cpy. + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "chacha20-x86_64-glue.c" +#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64) +#include "chacha20-arm-glue.c" +#elif defined(CONFIG_ZINC_ARCH_MIPS) +#include "chacha20-mips-glue.c" +#else +static bool *const chacha20_nobs[] __initconst = { }; +static void __init chacha20_fpu_init(void) +{ +} +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + return false; +} +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + return false; +} +#endif + +#define QUARTER_ROUND(x, a, b, c, d) ( \ + x[a] += x[b], \ + x[d] = rol32((x[d] ^ x[a]), 16), \ + x[c] += x[d], \ + x[b] = rol32((x[b] ^ x[c]), 12), \ + x[a] += x[b], \ + x[d] = rol32((x[d] ^ x[a]), 8), \ + x[c] += x[d], \ + x[b] = rol32((x[b] ^ x[c]), 7) \ +) + +#define C(i, j) (i * 4 + j) + +#define DOUBLE_ROUND(x) ( \ + /* Column Round */ \ + QUARTER_ROUND(x, C(0, 0), C(1, 0), C(2, 0), C(3, 0)), \ + QUARTER_ROUND(x, C(0, 1), C(1, 1), C(2, 1), C(3, 1)), \ + QUARTER_ROUND(x, C(0, 2), C(1, 2), C(2, 2), C(3, 2)), \ + QUARTER_ROUND(x, C(0, 3), C(1, 3), C(2, 3), C(3, 3)), \ + /* Diagonal Round */ \ + QUARTER_ROUND(x, C(0, 0), C(1, 1), C(2, 2), C(3, 3)), \ + QUARTER_ROUND(x, C(0, 1), C(1, 2), C(2, 3), C(3, 0)), \ + QUARTER_ROUND(x, C(0, 2), C(1, 3), C(2, 0), C(3, 1)), \ + QUARTER_ROUND(x, C(0, 3), C(1, 0), C(2, 1), C(3, 2)) \ +) + +#define TWENTY_ROUNDS(x) ( \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x) \ +) + +static void chacha20_block_generic(struct chacha20_ctx *ctx, __le32 *stream) +{ + u32 x[CHACHA20_BLOCK_WORDS]; + int i; + + for (i = 0; i < ARRAY_SIZE(x); ++i) + x[i] = ctx->state[i]; + + TWENTY_ROUNDS(x); + + for (i = 0; i < ARRAY_SIZE(x); ++i) + stream[i] = cpu_to_le32(x[i] + ctx->state[i]); + + ctx->counter[0] += 1; +} + +static void chacha20_generic(struct chacha20_ctx *ctx, u8 *out, const u8 *in, + u32 len) +{ + __le32 buf[CHACHA20_BLOCK_WORDS]; + + while (len >= CHACHA20_BLOCK_SIZE) { + chacha20_block_generic(ctx, buf); + crypto_xor_cpy(out, in, (u8 *)buf, CHACHA20_BLOCK_SIZE); + len -= CHACHA20_BLOCK_SIZE; + out += CHACHA20_BLOCK_SIZE; + in += CHACHA20_BLOCK_SIZE; + } + if (len) { + chacha20_block_generic(ctx, buf); + crypto_xor_cpy(out, in, (u8 *)buf, len); + } +} + +void chacha20(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 len, + simd_context_t *simd_context) +{ + if (!chacha20_arch(ctx, dst, src, len, simd_context)) + chacha20_generic(ctx, dst, src, len); +} + +static void hchacha20_generic(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE]) +{ + u32 x[] = { CHACHA20_CONSTANT_EXPA, + CHACHA20_CONSTANT_ND_3, + CHACHA20_CONSTANT_2_BY, + CHACHA20_CONSTANT_TE_K, + get_unaligned_le32(key + 0), + get_unaligned_le32(key + 4), + get_unaligned_le32(key + 8), + get_unaligned_le32(key + 12), + get_unaligned_le32(key + 16), + get_unaligned_le32(key + 20), + get_unaligned_le32(key + 24), + get_unaligned_le32(key + 28), + get_unaligned_le32(nonce + 0), + get_unaligned_le32(nonce + 4), + get_unaligned_le32(nonce + 8), + get_unaligned_le32(nonce + 12) + }; + + TWENTY_ROUNDS(x); + + memcpy(derived_key + 0, x + 0, sizeof(u32) * 4); + memcpy(derived_key + 4, x + 12, sizeof(u32) * 4); +} + +/* Derived key should be 32-bit aligned */ +void hchacha20(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], simd_context_t *simd_context) +{ + if (!hchacha20_arch(derived_key, nonce, key, simd_context)) + hchacha20_generic(derived_key, nonce, key); +} + +#include "../selftest/chacha20.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init chacha20_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + chacha20_fpu_init(); + if (!selftest_run("chacha20", chacha20_selftest, chacha20_nobs, + ARRAY_SIZE(chacha20_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ChaCha20 stream cipher"); +MODULE_AUTHOR("Jason A. Donenfeld "); +#endif diff --git a/net/wireguard/crypto/zinc/chacha20poly1305.c b/net/wireguard/crypto/zinc/chacha20poly1305.c new file mode 100644 index 000000000000..cee29db01bc0 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20poly1305.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is an implementation of the ChaCha20Poly1305 AEAD construction. + * + * Information: https://tools.ietf.org/html/rfc8439 + */ + +#include +#include +#include +#include "selftest/run.h" + +#include +#include +#include +#include +#include // For blkcipher_walk. + +static const u8 pad0[CHACHA20_BLOCK_SIZE] = { 0 }; + +static inline void +__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + union { + u8 block0[POLY1305_KEY_SIZE]; + __le64 lens[2]; + } b = { { 0 } }; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + chacha20(&chacha20_state, dst, src, src_len, simd_context); + + poly1305_update(&poly1305_state, dst, src_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + poly1305_final(&poly1305_state, dst + src_len, simd_context); + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); +} + +void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context; + + simd_get(&simd_context); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, nonce, key, + &simd_context); + simd_put(&simd_context); +} + +bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, + const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + struct sg_mapping_iter miter; + size_t partial = 0; + ssize_t sl; + union { + u8 chacha20_stream[CHACHA20_BLOCK_SIZE]; + u8 block0[POLY1305_KEY_SIZE]; + u8 mac[POLY1305_MAC_SIZE]; + __le64 lens[2]; + } b __aligned(16) = { { 0 } }; + + if (WARN_ON(src_len > INT_MAX)) + return false; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + sg_miter_start(&miter, src, sg_nents(src), SG_MITER_TO_SG | SG_MITER_ATOMIC); + for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) { + u8 *addr = miter.addr; + size_t length = min_t(size_t, sl, miter.length); + + if (unlikely(partial)) { + size_t l = min(length, CHACHA20_BLOCK_SIZE - partial); + + crypto_xor(addr, b.chacha20_stream + partial, l); + partial = (partial + l) & (CHACHA20_BLOCK_SIZE - 1); + + addr += l; + length -= l; + } + + if (likely(length >= CHACHA20_BLOCK_SIZE || length == sl)) { + size_t l = length; + + if (unlikely(length < sl)) + l &= ~(CHACHA20_BLOCK_SIZE - 1); + chacha20(&chacha20_state, addr, addr, l, simd_context); + addr += l; + length -= l; + } + + if (unlikely(length > 0)) { + chacha20(&chacha20_state, b.chacha20_stream, pad0, + CHACHA20_BLOCK_SIZE, simd_context); + crypto_xor(addr, b.chacha20_stream, length); + partial = length; + } + + poly1305_update(&poly1305_state, miter.addr, + min_t(size_t, sl, miter.length), simd_context); + + simd_relax(simd_context); + } + + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + if (likely(sl <= -POLY1305_MAC_SIZE)) + poly1305_final(&poly1305_state, miter.addr + miter.length + sl, + simd_context); + + sg_miter_stop(&miter); + + if (unlikely(sl > -POLY1305_MAC_SIZE)) { + poly1305_final(&poly1305_state, b.mac, simd_context); + scatterwalk_map_and_copy(b.mac, src, src_len, sizeof(b.mac), 1); + } + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); + return true; +} + +static inline bool +__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + int ret; + size_t dst_len; + union { + u8 block0[POLY1305_KEY_SIZE]; + u8 mac[POLY1305_MAC_SIZE]; + __le64 lens[2]; + } b = { { 0 } }; + + if (unlikely(src_len < POLY1305_MAC_SIZE)) + return false; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + dst_len = src_len - POLY1305_MAC_SIZE; + poly1305_update(&poly1305_state, src, dst_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - dst_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(dst_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + poly1305_final(&poly1305_state, b.mac, simd_context); + + ret = crypto_memneq(b.mac, src + dst_len, POLY1305_MAC_SIZE); + if (likely(!ret)) + chacha20(&chacha20_state, dst, src, dst_len, simd_context); + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); + + return !ret; +} + +bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context, ret; + + simd_get(&simd_context); + ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, nonce, + key, &simd_context); + simd_put(&simd_context); + return ret; +} + +bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, + size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + struct sg_mapping_iter miter; + size_t partial = 0; + ssize_t sl; + union { + u8 chacha20_stream[CHACHA20_BLOCK_SIZE]; + u8 block0[POLY1305_KEY_SIZE]; + struct { + u8 read_mac[POLY1305_MAC_SIZE]; + u8 computed_mac[POLY1305_MAC_SIZE]; + }; + __le64 lens[2]; + } b __aligned(16) = { { 0 } }; + bool ret = false; + + if (unlikely(src_len < POLY1305_MAC_SIZE || WARN_ON(src_len > INT_MAX))) + return ret; + src_len -= POLY1305_MAC_SIZE; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + sg_miter_start(&miter, src, sg_nents(src), SG_MITER_TO_SG | SG_MITER_ATOMIC); + for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) { + u8 *addr = miter.addr; + size_t length = min_t(size_t, sl, miter.length); + + poly1305_update(&poly1305_state, addr, length, simd_context); + + if (unlikely(partial)) { + size_t l = min(length, CHACHA20_BLOCK_SIZE - partial); + + crypto_xor(addr, b.chacha20_stream + partial, l); + partial = (partial + l) & (CHACHA20_BLOCK_SIZE - 1); + + addr += l; + length -= l; + } + + if (likely(length >= CHACHA20_BLOCK_SIZE || length == sl)) { + size_t l = length; + + if (unlikely(length < sl)) + l &= ~(CHACHA20_BLOCK_SIZE - 1); + chacha20(&chacha20_state, addr, addr, l, simd_context); + addr += l; + length -= l; + } + + if (unlikely(length > 0)) { + chacha20(&chacha20_state, b.chacha20_stream, pad0, + CHACHA20_BLOCK_SIZE, simd_context); + crypto_xor(addr, b.chacha20_stream, length); + partial = length; + } + + simd_relax(simd_context); + } + + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + if (likely(sl <= -POLY1305_MAC_SIZE)) { + poly1305_final(&poly1305_state, b.computed_mac, simd_context); + ret = !crypto_memneq(b.computed_mac, + miter.addr + miter.length + sl, + POLY1305_MAC_SIZE); + } + + sg_miter_stop(&miter); + + if (unlikely(sl > -POLY1305_MAC_SIZE)) { + poly1305_final(&poly1305_state, b.computed_mac, simd_context); + scatterwalk_map_and_copy(b.read_mac, src, src_len, + sizeof(b.read_mac), 0); + ret = !crypto_memneq(b.read_mac, b.computed_mac, + POLY1305_MAC_SIZE); + + } + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); + return ret; +} + +void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context; + u32 derived_key[CHACHA20_KEY_WORDS] __aligned(16); + + simd_get(&simd_context); + hchacha20(derived_key, nonce, key, &simd_context); + cpu_to_le32_array(derived_key, ARRAY_SIZE(derived_key)); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + get_unaligned_le64(nonce + 16), + (u8 *)derived_key, &simd_context); + memzero_explicit(derived_key, CHACHA20POLY1305_KEY_SIZE); + simd_put(&simd_context); +} + +bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + bool ret; + simd_context_t simd_context; + u32 derived_key[CHACHA20_KEY_WORDS] __aligned(16); + + simd_get(&simd_context); + hchacha20(derived_key, nonce, key, &simd_context); + cpu_to_le32_array(derived_key, ARRAY_SIZE(derived_key)); + ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, + get_unaligned_le64(nonce + 16), + (u8 *)derived_key, &simd_context); + memzero_explicit(derived_key, CHACHA20POLY1305_KEY_SIZE); + simd_put(&simd_context); + return ret; +} + +#include "selftest/chacha20poly1305.c" + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init chacha20poly1305_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!selftest_run("chacha20poly1305", chacha20poly1305_selftest, + NULL, 0)) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction"); +MODULE_AUTHOR("Jason A. Donenfeld "); +#endif diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c b/net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c new file mode 100644 index 000000000000..e0c5a5d297c0 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#include + +asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]); + +static bool curve25519_use_neon __ro_after_init; +static bool *const curve25519_nobs[] __initconst = { &curve25519_use_neon }; +static void __init curve25519_fpu_init(void) +{ + curve25519_use_neon = elf_hwcap & HWCAP_NEON; +} + +static inline bool curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + simd_context_t simd_context; + bool used_arch = false; + + simd_get(&simd_context); + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + !IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && curve25519_use_neon && + simd_use(&simd_context)) { + curve25519_neon(mypublic, secret, basepoint); + used_arch = true; + } + simd_put(&simd_context); + return used_arch; +} + +static inline bool curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + return false; +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-arm.S b/net/wireguard/crypto/zinc/curve25519/curve25519-arm.S new file mode 100644 index 000000000000..8eca8a11ef28 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-arm.S @@ -0,0 +1,2064 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This + * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been + * manually reworked for use in kernel space. + */ + +#if defined(CONFIG_KERNEL_MODE_NEON) && !defined(__ARMEB__) +#include + +.text +.fpu neon +.arch armv7-a +.align 4 + +SYM_FUNC_START(curve25519_neon) + push {r4-r11, lr} + mov ip, sp + sub r3, sp, #704 + and r3, r3, #0xfffffff0 + mov sp, r3 + movw r4, #0 + movw r5, #254 + vmov.i32 q0, #1 + vshr.u64 q1, q0, #7 + vshr.u64 q0, q0, #8 + vmov.i32 d4, #19 + vmov.i32 d5, #38 + add r6, sp, #480 + vst1.8 {d2-d3}, [r6, : 128]! + vst1.8 {d0-d1}, [r6, : 128]! + vst1.8 {d4-d5}, [r6, : 128] + add r6, r3, #0 + vmov.i32 q2, #0 + vst1.8 {d4-d5}, [r6, : 128]! + vst1.8 {d4-d5}, [r6, : 128]! + vst1.8 d4, [r6, : 64] + add r6, r3, #0 + movw r7, #960 + sub r7, r7, #2 + neg r7, r7 + sub r7, r7, r7, LSL #7 + str r7, [r6] + add r6, sp, #672 + vld1.8 {d4-d5}, [r1]! + vld1.8 {d6-d7}, [r1] + vst1.8 {d4-d5}, [r6, : 128]! + vst1.8 {d6-d7}, [r6, : 128] + sub r1, r6, #16 + ldrb r6, [r1] + and r6, r6, #248 + strb r6, [r1] + ldrb r6, [r1, #31] + and r6, r6, #127 + orr r6, r6, #64 + strb r6, [r1, #31] + vmov.i64 q2, #0xffffffff + vshr.u64 q3, q2, #7 + vshr.u64 q2, q2, #6 + vld1.8 {d8}, [r2] + vld1.8 {d10}, [r2] + add r2, r2, #6 + vld1.8 {d12}, [r2] + vld1.8 {d14}, [r2] + add r2, r2, #6 + vld1.8 {d16}, [r2] + add r2, r2, #4 + vld1.8 {d18}, [r2] + vld1.8 {d20}, [r2] + add r2, r2, #6 + vld1.8 {d22}, [r2] + add r2, r2, #2 + vld1.8 {d24}, [r2] + vld1.8 {d26}, [r2] + vshr.u64 q5, q5, #26 + vshr.u64 q6, q6, #3 + vshr.u64 q7, q7, #29 + vshr.u64 q8, q8, #6 + vshr.u64 q10, q10, #25 + vshr.u64 q11, q11, #3 + vshr.u64 q12, q12, #12 + vshr.u64 q13, q13, #38 + vand q4, q4, q2 + vand q6, q6, q2 + vand q8, q8, q2 + vand q10, q10, q2 + vand q2, q12, q2 + vand q5, q5, q3 + vand q7, q7, q3 + vand q9, q9, q3 + vand q11, q11, q3 + vand q3, q13, q3 + add r2, r3, #48 + vadd.i64 q12, q4, q1 + vadd.i64 q13, q10, q1 + vshr.s64 q12, q12, #26 + vshr.s64 q13, q13, #26 + vadd.i64 q5, q5, q12 + vshl.i64 q12, q12, #26 + vadd.i64 q14, q5, q0 + vadd.i64 q11, q11, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q11, q0 + vsub.i64 q4, q4, q12 + vshr.s64 q12, q14, #25 + vsub.i64 q10, q10, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q12 + vshl.i64 q12, q12, #25 + vadd.i64 q14, q6, q1 + vadd.i64 q2, q2, q13 + vsub.i64 q5, q5, q12 + vshr.s64 q12, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q1 + vadd.i64 q7, q7, q12 + vshl.i64 q12, q12, #26 + vadd.i64 q15, q7, q0 + vsub.i64 q11, q11, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q12 + vshr.s64 q12, q15, #25 + vadd.i64 q3, q3, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q3, q0 + vadd.i64 q8, q8, q12 + vshl.i64 q12, q12, #25 + vadd.i64 q15, q8, q1 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q7, q12 + vshr.s64 q12, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q9, q9, q12 + vtrn.32 d12, d14 + vshl.i64 q12, q12, #26 + vtrn.32 d13, d15 + vadd.i64 q0, q9, q0 + vadd.i64 q4, q4, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q6, q13, #4 + vsub.i64 q7, q8, q12 + vshr.s64 q0, q0, #25 + vadd.i64 q4, q4, q6 + vadd.i64 q6, q10, q0 + vshl.i64 q0, q0, #25 + vadd.i64 q8, q6, q1 + vadd.i64 q4, q4, q13 + vshl.i64 q10, q13, #25 + vadd.i64 q1, q4, q1 + vsub.i64 q0, q9, q0 + vshr.s64 q8, q8, #26 + vsub.i64 q3, q3, q10 + vtrn.32 d14, d0 + vshr.s64 q1, q1, #26 + vtrn.32 d15, d1 + vadd.i64 q0, q11, q8 + vst1.8 d14, [r2, : 64] + vshl.i64 q7, q8, #26 + vadd.i64 q5, q5, q1 + vtrn.32 d4, d6 + vshl.i64 q1, q1, #26 + vtrn.32 d5, d7 + vsub.i64 q3, q6, q7 + add r2, r2, #16 + vsub.i64 q1, q4, q1 + vst1.8 d4, [r2, : 64] + vtrn.32 d6, d0 + vtrn.32 d7, d1 + sub r2, r2, #8 + vtrn.32 d2, d10 + vtrn.32 d3, d11 + vst1.8 d6, [r2, : 64] + sub r2, r2, #24 + vst1.8 d2, [r2, : 64] + add r2, r3, #96 + vmov.i32 q0, #0 + vmov.i64 d2, #0xff + vmov.i64 d3, #0 + vshr.u32 q1, q1, #7 + vst1.8 {d2-d3}, [r2, : 128]! + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 d0, [r2, : 64] + add r2, r3, #144 + vmov.i32 q0, #0 + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 d0, [r2, : 64] + add r2, r3, #240 + vmov.i32 q0, #0 + vmov.i64 d2, #0xff + vmov.i64 d3, #0 + vshr.u32 q1, q1, #7 + vst1.8 {d2-d3}, [r2, : 128]! + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 d0, [r2, : 64] + add r2, r3, #48 + add r6, r3, #192 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4}, [r2, : 64] + vst1.8 {d0-d1}, [r6, : 128]! + vst1.8 {d2-d3}, [r6, : 128]! + vst1.8 d4, [r6, : 64] +.Lmainloop: + mov r2, r5, LSR #3 + and r6, r5, #7 + ldrb r2, [r1, r2] + mov r2, r2, LSR r6 + and r2, r2, #1 + str r5, [sp, #456] + eor r4, r4, r2 + str r2, [sp, #460] + neg r2, r4 + add r4, r3, #96 + add r5, r3, #192 + add r6, r3, #144 + vld1.8 {d8-d9}, [r4, : 128]! + add r7, r3, #240 + vld1.8 {d10-d11}, [r5, : 128]! + veor q6, q4, q5 + vld1.8 {d14-d15}, [r6, : 128]! + vdup.i32 q8, r2 + vld1.8 {d18-d19}, [r7, : 128]! + veor q10, q7, q9 + vld1.8 {d22-d23}, [r4, : 128]! + vand q6, q6, q8 + vld1.8 {d24-d25}, [r5, : 128]! + vand q10, q10, q8 + vld1.8 {d26-d27}, [r6, : 128]! + veor q4, q4, q6 + vld1.8 {d28-d29}, [r7, : 128]! + veor q5, q5, q6 + vld1.8 {d0}, [r4, : 64] + veor q6, q7, q10 + vld1.8 {d2}, [r5, : 64] + veor q7, q9, q10 + vld1.8 {d4}, [r6, : 64] + veor q9, q11, q12 + vld1.8 {d6}, [r7, : 64] + veor q10, q0, q1 + sub r2, r4, #32 + vand q9, q9, q8 + sub r4, r5, #32 + vand q10, q10, q8 + sub r5, r6, #32 + veor q11, q11, q9 + sub r6, r7, #32 + veor q0, q0, q10 + veor q9, q12, q9 + veor q1, q1, q10 + veor q10, q13, q14 + veor q12, q2, q3 + vand q10, q10, q8 + vand q8, q12, q8 + veor q12, q13, q10 + veor q2, q2, q8 + veor q10, q14, q10 + veor q3, q3, q8 + vadd.i32 q8, q4, q6 + vsub.i32 q4, q4, q6 + vst1.8 {d16-d17}, [r2, : 128]! + vadd.i32 q6, q11, q12 + vst1.8 {d8-d9}, [r5, : 128]! + vsub.i32 q4, q11, q12 + vst1.8 {d12-d13}, [r2, : 128]! + vadd.i32 q6, q0, q2 + vst1.8 {d8-d9}, [r5, : 128]! + vsub.i32 q0, q0, q2 + vst1.8 d12, [r2, : 64] + vadd.i32 q2, q5, q7 + vst1.8 d0, [r5, : 64] + vsub.i32 q0, q5, q7 + vst1.8 {d4-d5}, [r4, : 128]! + vadd.i32 q2, q9, q10 + vst1.8 {d0-d1}, [r6, : 128]! + vsub.i32 q0, q9, q10 + vst1.8 {d4-d5}, [r4, : 128]! + vadd.i32 q2, q1, q3 + vst1.8 {d0-d1}, [r6, : 128]! + vsub.i32 q0, q1, q3 + vst1.8 d4, [r4, : 64] + vst1.8 d0, [r6, : 64] + add r2, sp, #512 + add r4, r3, #96 + add r5, r3, #144 + vld1.8 {d0-d1}, [r2, : 128] + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4-d5}, [r5, : 128]! + vzip.i32 q1, q2 + vld1.8 {d6-d7}, [r4, : 128]! + vld1.8 {d8-d9}, [r5, : 128]! + vshl.i32 q5, q1, #1 + vzip.i32 q3, q4 + vshl.i32 q6, q2, #1 + vld1.8 {d14}, [r4, : 64] + vshl.i32 q8, q3, #1 + vld1.8 {d15}, [r5, : 64] + vshl.i32 q9, q4, #1 + vmul.i32 d21, d7, d1 + vtrn.32 d14, d15 + vmul.i32 q11, q4, q0 + vmul.i32 q0, q7, q0 + vmull.s32 q12, d2, d2 + vmlal.s32 q12, d11, d1 + vmlal.s32 q12, d12, d0 + vmlal.s32 q12, d13, d23 + vmlal.s32 q12, d16, d22 + vmlal.s32 q12, d7, d21 + vmull.s32 q10, d2, d11 + vmlal.s32 q10, d4, d1 + vmlal.s32 q10, d13, d0 + vmlal.s32 q10, d6, d23 + vmlal.s32 q10, d17, d22 + vmull.s32 q13, d10, d4 + vmlal.s32 q13, d11, d3 + vmlal.s32 q13, d13, d1 + vmlal.s32 q13, d16, d0 + vmlal.s32 q13, d17, d23 + vmlal.s32 q13, d8, d22 + vmull.s32 q1, d10, d5 + vmlal.s32 q1, d11, d4 + vmlal.s32 q1, d6, d1 + vmlal.s32 q1, d17, d0 + vmlal.s32 q1, d8, d23 + vmull.s32 q14, d10, d6 + vmlal.s32 q14, d11, d13 + vmlal.s32 q14, d4, d4 + vmlal.s32 q14, d17, d1 + vmlal.s32 q14, d18, d0 + vmlal.s32 q14, d9, d23 + vmull.s32 q11, d10, d7 + vmlal.s32 q11, d11, d6 + vmlal.s32 q11, d12, d5 + vmlal.s32 q11, d8, d1 + vmlal.s32 q11, d19, d0 + vmull.s32 q15, d10, d8 + vmlal.s32 q15, d11, d17 + vmlal.s32 q15, d12, d6 + vmlal.s32 q15, d13, d5 + vmlal.s32 q15, d19, d1 + vmlal.s32 q15, d14, d0 + vmull.s32 q2, d10, d9 + vmlal.s32 q2, d11, d8 + vmlal.s32 q2, d12, d7 + vmlal.s32 q2, d13, d6 + vmlal.s32 q2, d14, d1 + vmull.s32 q0, d15, d1 + vmlal.s32 q0, d10, d14 + vmlal.s32 q0, d11, d19 + vmlal.s32 q0, d12, d8 + vmlal.s32 q0, d13, d17 + vmlal.s32 q0, d6, d6 + add r2, sp, #480 + vld1.8 {d18-d19}, [r2, : 128]! + vmull.s32 q3, d16, d7 + vmlal.s32 q3, d10, d15 + vmlal.s32 q3, d11, d14 + vmlal.s32 q3, d12, d9 + vmlal.s32 q3, d13, d8 + vld1.8 {d8-d9}, [r2, : 128] + vadd.i64 q5, q12, q9 + vadd.i64 q6, q15, q9 + vshr.s64 q5, q5, #26 + vshr.s64 q6, q6, #26 + vadd.i64 q7, q10, q5 + vshl.i64 q5, q5, #26 + vadd.i64 q8, q7, q4 + vadd.i64 q2, q2, q6 + vshl.i64 q6, q6, #26 + vadd.i64 q10, q2, q4 + vsub.i64 q5, q12, q5 + vshr.s64 q8, q8, #25 + vsub.i64 q6, q15, q6 + vshr.s64 q10, q10, #25 + vadd.i64 q12, q13, q8 + vshl.i64 q8, q8, #25 + vadd.i64 q13, q12, q9 + vadd.i64 q0, q0, q10 + vsub.i64 q7, q7, q8 + vshr.s64 q8, q13, #26 + vshl.i64 q10, q10, #25 + vadd.i64 q13, q0, q9 + vadd.i64 q1, q1, q8 + vshl.i64 q8, q8, #26 + vadd.i64 q15, q1, q4 + vsub.i64 q2, q2, q10 + vshr.s64 q10, q13, #26 + vsub.i64 q8, q12, q8 + vshr.s64 q12, q15, #25 + vadd.i64 q3, q3, q10 + vshl.i64 q10, q10, #26 + vadd.i64 q13, q3, q4 + vadd.i64 q14, q14, q12 + add r2, r3, #288 + vshl.i64 q12, q12, #25 + add r4, r3, #336 + vadd.i64 q15, q14, q9 + add r2, r2, #8 + vsub.i64 q0, q0, q10 + add r4, r4, #8 + vshr.s64 q10, q13, #25 + vsub.i64 q1, q1, q12 + vshr.s64 q12, q15, #26 + vadd.i64 q13, q10, q10 + vadd.i64 q11, q11, q12 + vtrn.32 d16, d2 + vshl.i64 q12, q12, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q11, q4 + vadd.i64 q4, q5, q13 + vst1.8 d16, [r2, : 64]! + vshl.i64 q5, q10, #4 + vst1.8 d17, [r4, : 64]! + vsub.i64 q8, q14, q12 + vshr.s64 q1, q1, #25 + vadd.i64 q4, q4, q5 + vadd.i64 q5, q6, q1 + vshl.i64 q1, q1, #25 + vadd.i64 q6, q5, q9 + vadd.i64 q4, q4, q10 + vshl.i64 q10, q10, #25 + vadd.i64 q9, q4, q9 + vsub.i64 q1, q11, q1 + vshr.s64 q6, q6, #26 + vsub.i64 q3, q3, q10 + vtrn.32 d16, d2 + vshr.s64 q9, q9, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q2, q6 + vst1.8 d16, [r2, : 64] + vshl.i64 q2, q6, #26 + vst1.8 d17, [r4, : 64] + vadd.i64 q6, q7, q9 + vtrn.32 d0, d6 + vshl.i64 q7, q9, #26 + vtrn.32 d1, d7 + vsub.i64 q2, q5, q2 + add r2, r2, #16 + vsub.i64 q3, q4, q7 + vst1.8 d0, [r2, : 64] + add r4, r4, #16 + vst1.8 d1, [r4, : 64] + vtrn.32 d4, d2 + vtrn.32 d5, d3 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d6, d12 + vtrn.32 d7, d13 + vst1.8 d4, [r2, : 64] + vst1.8 d5, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d6, [r2, : 64] + vst1.8 d7, [r4, : 64] + add r2, r3, #240 + add r4, r3, #96 + vld1.8 {d0-d1}, [r4, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4}, [r4, : 64] + add r4, r3, #144 + vld1.8 {d6-d7}, [r4, : 128]! + vtrn.32 q0, q3 + vld1.8 {d8-d9}, [r4, : 128]! + vshl.i32 q5, q0, #4 + vtrn.32 q1, q4 + vshl.i32 q6, q3, #4 + vadd.i32 q5, q5, q0 + vadd.i32 q6, q6, q3 + vshl.i32 q7, q1, #4 + vld1.8 {d5}, [r4, : 64] + vshl.i32 q8, q4, #4 + vtrn.32 d4, d5 + vadd.i32 q7, q7, q1 + vadd.i32 q8, q8, q4 + vld1.8 {d18-d19}, [r2, : 128]! + vshl.i32 q10, q2, #4 + vld1.8 {d22-d23}, [r2, : 128]! + vadd.i32 q10, q10, q2 + vld1.8 {d24}, [r2, : 64] + vadd.i32 q5, q5, q0 + add r2, r3, #192 + vld1.8 {d26-d27}, [r2, : 128]! + vadd.i32 q6, q6, q3 + vld1.8 {d28-d29}, [r2, : 128]! + vadd.i32 q8, q8, q4 + vld1.8 {d25}, [r2, : 64] + vadd.i32 q10, q10, q2 + vtrn.32 q9, q13 + vadd.i32 q7, q7, q1 + vadd.i32 q5, q5, q0 + vtrn.32 q11, q14 + vadd.i32 q6, q6, q3 + add r2, sp, #528 + vadd.i32 q10, q10, q2 + vtrn.32 d24, d25 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q6, q13, #1 + vst1.8 {d20-d21}, [r2, : 128]! + vshl.i32 q10, q14, #1 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q15, q12, #1 + vadd.i32 q8, q8, q4 + vext.32 d10, d31, d30, #0 + vadd.i32 q7, q7, q1 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q8, d18, d5 + vmlal.s32 q8, d26, d4 + vmlal.s32 q8, d19, d9 + vmlal.s32 q8, d27, d3 + vmlal.s32 q8, d22, d8 + vmlal.s32 q8, d28, d2 + vmlal.s32 q8, d23, d7 + vmlal.s32 q8, d29, d1 + vmlal.s32 q8, d24, d6 + vmlal.s32 q8, d25, d0 + vst1.8 {d14-d15}, [r2, : 128]! + vmull.s32 q2, d18, d4 + vmlal.s32 q2, d12, d9 + vmlal.s32 q2, d13, d8 + vmlal.s32 q2, d19, d3 + vmlal.s32 q2, d22, d2 + vmlal.s32 q2, d23, d1 + vmlal.s32 q2, d24, d0 + vst1.8 {d20-d21}, [r2, : 128]! + vmull.s32 q7, d18, d9 + vmlal.s32 q7, d26, d3 + vmlal.s32 q7, d19, d8 + vmlal.s32 q7, d27, d2 + vmlal.s32 q7, d22, d7 + vmlal.s32 q7, d28, d1 + vmlal.s32 q7, d23, d6 + vmlal.s32 q7, d29, d0 + vst1.8 {d10-d11}, [r2, : 128]! + vmull.s32 q5, d18, d3 + vmlal.s32 q5, d19, d2 + vmlal.s32 q5, d22, d1 + vmlal.s32 q5, d23, d0 + vmlal.s32 q5, d12, d8 + vst1.8 {d16-d17}, [r2, : 128] + vmull.s32 q4, d18, d8 + vmlal.s32 q4, d26, d2 + vmlal.s32 q4, d19, d7 + vmlal.s32 q4, d27, d1 + vmlal.s32 q4, d22, d6 + vmlal.s32 q4, d28, d0 + vmull.s32 q8, d18, d7 + vmlal.s32 q8, d26, d1 + vmlal.s32 q8, d19, d6 + vmlal.s32 q8, d27, d0 + add r2, sp, #544 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q7, d24, d21 + vmlal.s32 q7, d25, d20 + vmlal.s32 q4, d23, d21 + vmlal.s32 q4, d29, d20 + vmlal.s32 q8, d22, d21 + vmlal.s32 q8, d28, d20 + vmlal.s32 q5, d24, d20 + vst1.8 {d14-d15}, [r2, : 128] + vmull.s32 q7, d18, d6 + vmlal.s32 q7, d26, d0 + add r2, sp, #624 + vld1.8 {d30-d31}, [r2, : 128] + vmlal.s32 q2, d30, d21 + vmlal.s32 q7, d19, d21 + vmlal.s32 q7, d27, d20 + add r2, sp, #592 + vld1.8 {d26-d27}, [r2, : 128] + vmlal.s32 q4, d25, d27 + vmlal.s32 q8, d29, d27 + vmlal.s32 q8, d25, d26 + vmlal.s32 q7, d28, d27 + vmlal.s32 q7, d29, d26 + add r2, sp, #576 + vld1.8 {d28-d29}, [r2, : 128] + vmlal.s32 q4, d24, d29 + vmlal.s32 q8, d23, d29 + vmlal.s32 q8, d24, d28 + vmlal.s32 q7, d22, d29 + vmlal.s32 q7, d23, d28 + vst1.8 {d8-d9}, [r2, : 128] + add r2, sp, #528 + vld1.8 {d8-d9}, [r2, : 128] + vmlal.s32 q7, d24, d9 + vmlal.s32 q7, d25, d31 + vmull.s32 q1, d18, d2 + vmlal.s32 q1, d19, d1 + vmlal.s32 q1, d22, d0 + vmlal.s32 q1, d24, d27 + vmlal.s32 q1, d23, d20 + vmlal.s32 q1, d12, d7 + vmlal.s32 q1, d13, d6 + vmull.s32 q6, d18, d1 + vmlal.s32 q6, d19, d0 + vmlal.s32 q6, d23, d27 + vmlal.s32 q6, d22, d20 + vmlal.s32 q6, d24, d26 + vmull.s32 q0, d18, d0 + vmlal.s32 q0, d22, d27 + vmlal.s32 q0, d23, d26 + vmlal.s32 q0, d24, d31 + vmlal.s32 q0, d19, d20 + add r2, sp, #608 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q2, d18, d7 + vmlal.s32 q5, d18, d6 + vmlal.s32 q1, d18, d21 + vmlal.s32 q0, d18, d28 + vmlal.s32 q6, d18, d29 + vmlal.s32 q2, d19, d6 + vmlal.s32 q5, d19, d21 + vmlal.s32 q1, d19, d29 + vmlal.s32 q0, d19, d9 + vmlal.s32 q6, d19, d28 + add r2, sp, #560 + vld1.8 {d18-d19}, [r2, : 128] + add r2, sp, #480 + vld1.8 {d22-d23}, [r2, : 128] + vmlal.s32 q5, d19, d7 + vmlal.s32 q0, d18, d21 + vmlal.s32 q0, d19, d29 + vmlal.s32 q6, d18, d6 + add r2, sp, #496 + vld1.8 {d6-d7}, [r2, : 128] + vmlal.s32 q6, d19, d21 + add r2, sp, #544 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q0, d30, d8 + add r2, sp, #640 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q5, d30, d29 + add r2, sp, #576 + vld1.8 {d24-d25}, [r2, : 128] + vmlal.s32 q1, d30, d28 + vadd.i64 q13, q0, q11 + vadd.i64 q14, q5, q11 + vmlal.s32 q6, d30, d9 + vshr.s64 q4, q13, #26 + vshr.s64 q13, q14, #26 + vadd.i64 q7, q7, q4 + vshl.i64 q4, q4, #26 + vadd.i64 q14, q7, q3 + vadd.i64 q9, q9, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q9, q3 + vsub.i64 q0, q0, q4 + vshr.s64 q4, q14, #25 + vsub.i64 q5, q5, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q4 + vshl.i64 q4, q4, #25 + vadd.i64 q14, q6, q11 + vadd.i64 q2, q2, q13 + vsub.i64 q4, q7, q4 + vshr.s64 q7, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q11 + vadd.i64 q8, q8, q7 + vshl.i64 q7, q7, #26 + vadd.i64 q15, q8, q3 + vsub.i64 q9, q9, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q7 + vshr.s64 q7, q15, #25 + vadd.i64 q10, q10, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q10, q3 + vadd.i64 q1, q1, q7 + add r2, r3, #144 + vshl.i64 q7, q7, #25 + add r4, r3, #96 + vadd.i64 q15, q1, q11 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + add r4, r4, #8 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q8, q7 + vshr.s64 q8, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q12, q12, q8 + vtrn.32 d12, d14 + vshl.i64 q8, q8, #26 + vtrn.32 d13, d15 + vadd.i64 q3, q12, q3 + vadd.i64 q0, q0, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q7, q13, #4 + vst1.8 d13, [r4, : 64]! + vsub.i64 q1, q1, q8 + vshr.s64 q3, q3, #25 + vadd.i64 q0, q0, q7 + vadd.i64 q5, q5, q3 + vshl.i64 q3, q3, #25 + vadd.i64 q6, q5, q11 + vadd.i64 q0, q0, q13 + vshl.i64 q7, q13, #25 + vadd.i64 q8, q0, q11 + vsub.i64 q3, q12, q3 + vshr.s64 q6, q6, #26 + vsub.i64 q7, q10, q7 + vtrn.32 d2, d6 + vshr.s64 q8, q8, #26 + vtrn.32 d3, d7 + vadd.i64 q3, q9, q6 + vst1.8 d2, [r2, : 64] + vshl.i64 q6, q6, #26 + vst1.8 d3, [r4, : 64] + vadd.i64 q1, q4, q8 + vtrn.32 d4, d14 + vshl.i64 q4, q8, #26 + vtrn.32 d5, d15 + vsub.i64 q5, q5, q6 + add r2, r2, #16 + vsub.i64 q0, q0, q4 + vst1.8 d4, [r2, : 64] + add r4, r4, #16 + vst1.8 d5, [r4, : 64] + vtrn.32 d10, d6 + vtrn.32 d11, d7 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vst1.8 d10, [r2, : 64] + vst1.8 d11, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d0, [r2, : 64] + vst1.8 d1, [r4, : 64] + add r2, r3, #288 + add r4, r3, #336 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vsub.i32 q0, q0, q1 + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4-d5}, [r4, : 128]! + vsub.i32 q1, q1, q2 + add r5, r3, #240 + vld1.8 {d4}, [r2, : 64] + vld1.8 {d6}, [r4, : 64] + vsub.i32 q2, q2, q3 + vst1.8 {d0-d1}, [r5, : 128]! + vst1.8 {d2-d3}, [r5, : 128]! + vst1.8 d4, [r5, : 64] + add r2, r3, #144 + add r4, r3, #96 + add r5, r3, #144 + add r6, r3, #192 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vsub.i32 q2, q0, q1 + vadd.i32 q0, q0, q1 + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d6-d7}, [r4, : 128]! + vsub.i32 q4, q1, q3 + vadd.i32 q1, q1, q3 + vld1.8 {d6}, [r2, : 64] + vld1.8 {d10}, [r4, : 64] + vsub.i32 q6, q3, q5 + vadd.i32 q3, q3, q5 + vst1.8 {d4-d5}, [r5, : 128]! + vst1.8 {d0-d1}, [r6, : 128]! + vst1.8 {d8-d9}, [r5, : 128]! + vst1.8 {d2-d3}, [r6, : 128]! + vst1.8 d12, [r5, : 64] + vst1.8 d6, [r6, : 64] + add r2, r3, #0 + add r4, r3, #240 + vld1.8 {d0-d1}, [r4, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4}, [r4, : 64] + add r4, r3, #336 + vld1.8 {d6-d7}, [r4, : 128]! + vtrn.32 q0, q3 + vld1.8 {d8-d9}, [r4, : 128]! + vshl.i32 q5, q0, #4 + vtrn.32 q1, q4 + vshl.i32 q6, q3, #4 + vadd.i32 q5, q5, q0 + vadd.i32 q6, q6, q3 + vshl.i32 q7, q1, #4 + vld1.8 {d5}, [r4, : 64] + vshl.i32 q8, q4, #4 + vtrn.32 d4, d5 + vadd.i32 q7, q7, q1 + vadd.i32 q8, q8, q4 + vld1.8 {d18-d19}, [r2, : 128]! + vshl.i32 q10, q2, #4 + vld1.8 {d22-d23}, [r2, : 128]! + vadd.i32 q10, q10, q2 + vld1.8 {d24}, [r2, : 64] + vadd.i32 q5, q5, q0 + add r2, r3, #288 + vld1.8 {d26-d27}, [r2, : 128]! + vadd.i32 q6, q6, q3 + vld1.8 {d28-d29}, [r2, : 128]! + vadd.i32 q8, q8, q4 + vld1.8 {d25}, [r2, : 64] + vadd.i32 q10, q10, q2 + vtrn.32 q9, q13 + vadd.i32 q7, q7, q1 + vadd.i32 q5, q5, q0 + vtrn.32 q11, q14 + vadd.i32 q6, q6, q3 + add r2, sp, #528 + vadd.i32 q10, q10, q2 + vtrn.32 d24, d25 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q6, q13, #1 + vst1.8 {d20-d21}, [r2, : 128]! + vshl.i32 q10, q14, #1 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q15, q12, #1 + vadd.i32 q8, q8, q4 + vext.32 d10, d31, d30, #0 + vadd.i32 q7, q7, q1 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q8, d18, d5 + vmlal.s32 q8, d26, d4 + vmlal.s32 q8, d19, d9 + vmlal.s32 q8, d27, d3 + vmlal.s32 q8, d22, d8 + vmlal.s32 q8, d28, d2 + vmlal.s32 q8, d23, d7 + vmlal.s32 q8, d29, d1 + vmlal.s32 q8, d24, d6 + vmlal.s32 q8, d25, d0 + vst1.8 {d14-d15}, [r2, : 128]! + vmull.s32 q2, d18, d4 + vmlal.s32 q2, d12, d9 + vmlal.s32 q2, d13, d8 + vmlal.s32 q2, d19, d3 + vmlal.s32 q2, d22, d2 + vmlal.s32 q2, d23, d1 + vmlal.s32 q2, d24, d0 + vst1.8 {d20-d21}, [r2, : 128]! + vmull.s32 q7, d18, d9 + vmlal.s32 q7, d26, d3 + vmlal.s32 q7, d19, d8 + vmlal.s32 q7, d27, d2 + vmlal.s32 q7, d22, d7 + vmlal.s32 q7, d28, d1 + vmlal.s32 q7, d23, d6 + vmlal.s32 q7, d29, d0 + vst1.8 {d10-d11}, [r2, : 128]! + vmull.s32 q5, d18, d3 + vmlal.s32 q5, d19, d2 + vmlal.s32 q5, d22, d1 + vmlal.s32 q5, d23, d0 + vmlal.s32 q5, d12, d8 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q4, d18, d8 + vmlal.s32 q4, d26, d2 + vmlal.s32 q4, d19, d7 + vmlal.s32 q4, d27, d1 + vmlal.s32 q4, d22, d6 + vmlal.s32 q4, d28, d0 + vmull.s32 q8, d18, d7 + vmlal.s32 q8, d26, d1 + vmlal.s32 q8, d19, d6 + vmlal.s32 q8, d27, d0 + add r2, sp, #544 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q7, d24, d21 + vmlal.s32 q7, d25, d20 + vmlal.s32 q4, d23, d21 + vmlal.s32 q4, d29, d20 + vmlal.s32 q8, d22, d21 + vmlal.s32 q8, d28, d20 + vmlal.s32 q5, d24, d20 + vst1.8 {d14-d15}, [r2, : 128] + vmull.s32 q7, d18, d6 + vmlal.s32 q7, d26, d0 + add r2, sp, #624 + vld1.8 {d30-d31}, [r2, : 128] + vmlal.s32 q2, d30, d21 + vmlal.s32 q7, d19, d21 + vmlal.s32 q7, d27, d20 + add r2, sp, #592 + vld1.8 {d26-d27}, [r2, : 128] + vmlal.s32 q4, d25, d27 + vmlal.s32 q8, d29, d27 + vmlal.s32 q8, d25, d26 + vmlal.s32 q7, d28, d27 + vmlal.s32 q7, d29, d26 + add r2, sp, #576 + vld1.8 {d28-d29}, [r2, : 128] + vmlal.s32 q4, d24, d29 + vmlal.s32 q8, d23, d29 + vmlal.s32 q8, d24, d28 + vmlal.s32 q7, d22, d29 + vmlal.s32 q7, d23, d28 + vst1.8 {d8-d9}, [r2, : 128] + add r2, sp, #528 + vld1.8 {d8-d9}, [r2, : 128] + vmlal.s32 q7, d24, d9 + vmlal.s32 q7, d25, d31 + vmull.s32 q1, d18, d2 + vmlal.s32 q1, d19, d1 + vmlal.s32 q1, d22, d0 + vmlal.s32 q1, d24, d27 + vmlal.s32 q1, d23, d20 + vmlal.s32 q1, d12, d7 + vmlal.s32 q1, d13, d6 + vmull.s32 q6, d18, d1 + vmlal.s32 q6, d19, d0 + vmlal.s32 q6, d23, d27 + vmlal.s32 q6, d22, d20 + vmlal.s32 q6, d24, d26 + vmull.s32 q0, d18, d0 + vmlal.s32 q0, d22, d27 + vmlal.s32 q0, d23, d26 + vmlal.s32 q0, d24, d31 + vmlal.s32 q0, d19, d20 + add r2, sp, #608 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q2, d18, d7 + vmlal.s32 q5, d18, d6 + vmlal.s32 q1, d18, d21 + vmlal.s32 q0, d18, d28 + vmlal.s32 q6, d18, d29 + vmlal.s32 q2, d19, d6 + vmlal.s32 q5, d19, d21 + vmlal.s32 q1, d19, d29 + vmlal.s32 q0, d19, d9 + vmlal.s32 q6, d19, d28 + add r2, sp, #560 + vld1.8 {d18-d19}, [r2, : 128] + add r2, sp, #480 + vld1.8 {d22-d23}, [r2, : 128] + vmlal.s32 q5, d19, d7 + vmlal.s32 q0, d18, d21 + vmlal.s32 q0, d19, d29 + vmlal.s32 q6, d18, d6 + add r2, sp, #496 + vld1.8 {d6-d7}, [r2, : 128] + vmlal.s32 q6, d19, d21 + add r2, sp, #544 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q0, d30, d8 + add r2, sp, #640 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q5, d30, d29 + add r2, sp, #576 + vld1.8 {d24-d25}, [r2, : 128] + vmlal.s32 q1, d30, d28 + vadd.i64 q13, q0, q11 + vadd.i64 q14, q5, q11 + vmlal.s32 q6, d30, d9 + vshr.s64 q4, q13, #26 + vshr.s64 q13, q14, #26 + vadd.i64 q7, q7, q4 + vshl.i64 q4, q4, #26 + vadd.i64 q14, q7, q3 + vadd.i64 q9, q9, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q9, q3 + vsub.i64 q0, q0, q4 + vshr.s64 q4, q14, #25 + vsub.i64 q5, q5, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q4 + vshl.i64 q4, q4, #25 + vadd.i64 q14, q6, q11 + vadd.i64 q2, q2, q13 + vsub.i64 q4, q7, q4 + vshr.s64 q7, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q11 + vadd.i64 q8, q8, q7 + vshl.i64 q7, q7, #26 + vadd.i64 q15, q8, q3 + vsub.i64 q9, q9, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q7 + vshr.s64 q7, q15, #25 + vadd.i64 q10, q10, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q10, q3 + vadd.i64 q1, q1, q7 + add r2, r3, #288 + vshl.i64 q7, q7, #25 + add r4, r3, #96 + vadd.i64 q15, q1, q11 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + add r4, r4, #8 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q8, q7 + vshr.s64 q8, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q12, q12, q8 + vtrn.32 d12, d14 + vshl.i64 q8, q8, #26 + vtrn.32 d13, d15 + vadd.i64 q3, q12, q3 + vadd.i64 q0, q0, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q7, q13, #4 + vst1.8 d13, [r4, : 64]! + vsub.i64 q1, q1, q8 + vshr.s64 q3, q3, #25 + vadd.i64 q0, q0, q7 + vadd.i64 q5, q5, q3 + vshl.i64 q3, q3, #25 + vadd.i64 q6, q5, q11 + vadd.i64 q0, q0, q13 + vshl.i64 q7, q13, #25 + vadd.i64 q8, q0, q11 + vsub.i64 q3, q12, q3 + vshr.s64 q6, q6, #26 + vsub.i64 q7, q10, q7 + vtrn.32 d2, d6 + vshr.s64 q8, q8, #26 + vtrn.32 d3, d7 + vadd.i64 q3, q9, q6 + vst1.8 d2, [r2, : 64] + vshl.i64 q6, q6, #26 + vst1.8 d3, [r4, : 64] + vadd.i64 q1, q4, q8 + vtrn.32 d4, d14 + vshl.i64 q4, q8, #26 + vtrn.32 d5, d15 + vsub.i64 q5, q5, q6 + add r2, r2, #16 + vsub.i64 q0, q0, q4 + vst1.8 d4, [r2, : 64] + add r4, r4, #16 + vst1.8 d5, [r4, : 64] + vtrn.32 d10, d6 + vtrn.32 d11, d7 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vst1.8 d10, [r2, : 64] + vst1.8 d11, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d0, [r2, : 64] + vst1.8 d1, [r4, : 64] + add r2, sp, #512 + add r4, r3, #144 + add r5, r3, #192 + vld1.8 {d0-d1}, [r2, : 128] + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4-d5}, [r5, : 128]! + vzip.i32 q1, q2 + vld1.8 {d6-d7}, [r4, : 128]! + vld1.8 {d8-d9}, [r5, : 128]! + vshl.i32 q5, q1, #1 + vzip.i32 q3, q4 + vshl.i32 q6, q2, #1 + vld1.8 {d14}, [r4, : 64] + vshl.i32 q8, q3, #1 + vld1.8 {d15}, [r5, : 64] + vshl.i32 q9, q4, #1 + vmul.i32 d21, d7, d1 + vtrn.32 d14, d15 + vmul.i32 q11, q4, q0 + vmul.i32 q0, q7, q0 + vmull.s32 q12, d2, d2 + vmlal.s32 q12, d11, d1 + vmlal.s32 q12, d12, d0 + vmlal.s32 q12, d13, d23 + vmlal.s32 q12, d16, d22 + vmlal.s32 q12, d7, d21 + vmull.s32 q10, d2, d11 + vmlal.s32 q10, d4, d1 + vmlal.s32 q10, d13, d0 + vmlal.s32 q10, d6, d23 + vmlal.s32 q10, d17, d22 + vmull.s32 q13, d10, d4 + vmlal.s32 q13, d11, d3 + vmlal.s32 q13, d13, d1 + vmlal.s32 q13, d16, d0 + vmlal.s32 q13, d17, d23 + vmlal.s32 q13, d8, d22 + vmull.s32 q1, d10, d5 + vmlal.s32 q1, d11, d4 + vmlal.s32 q1, d6, d1 + vmlal.s32 q1, d17, d0 + vmlal.s32 q1, d8, d23 + vmull.s32 q14, d10, d6 + vmlal.s32 q14, d11, d13 + vmlal.s32 q14, d4, d4 + vmlal.s32 q14, d17, d1 + vmlal.s32 q14, d18, d0 + vmlal.s32 q14, d9, d23 + vmull.s32 q11, d10, d7 + vmlal.s32 q11, d11, d6 + vmlal.s32 q11, d12, d5 + vmlal.s32 q11, d8, d1 + vmlal.s32 q11, d19, d0 + vmull.s32 q15, d10, d8 + vmlal.s32 q15, d11, d17 + vmlal.s32 q15, d12, d6 + vmlal.s32 q15, d13, d5 + vmlal.s32 q15, d19, d1 + vmlal.s32 q15, d14, d0 + vmull.s32 q2, d10, d9 + vmlal.s32 q2, d11, d8 + vmlal.s32 q2, d12, d7 + vmlal.s32 q2, d13, d6 + vmlal.s32 q2, d14, d1 + vmull.s32 q0, d15, d1 + vmlal.s32 q0, d10, d14 + vmlal.s32 q0, d11, d19 + vmlal.s32 q0, d12, d8 + vmlal.s32 q0, d13, d17 + vmlal.s32 q0, d6, d6 + add r2, sp, #480 + vld1.8 {d18-d19}, [r2, : 128]! + vmull.s32 q3, d16, d7 + vmlal.s32 q3, d10, d15 + vmlal.s32 q3, d11, d14 + vmlal.s32 q3, d12, d9 + vmlal.s32 q3, d13, d8 + vld1.8 {d8-d9}, [r2, : 128] + vadd.i64 q5, q12, q9 + vadd.i64 q6, q15, q9 + vshr.s64 q5, q5, #26 + vshr.s64 q6, q6, #26 + vadd.i64 q7, q10, q5 + vshl.i64 q5, q5, #26 + vadd.i64 q8, q7, q4 + vadd.i64 q2, q2, q6 + vshl.i64 q6, q6, #26 + vadd.i64 q10, q2, q4 + vsub.i64 q5, q12, q5 + vshr.s64 q8, q8, #25 + vsub.i64 q6, q15, q6 + vshr.s64 q10, q10, #25 + vadd.i64 q12, q13, q8 + vshl.i64 q8, q8, #25 + vadd.i64 q13, q12, q9 + vadd.i64 q0, q0, q10 + vsub.i64 q7, q7, q8 + vshr.s64 q8, q13, #26 + vshl.i64 q10, q10, #25 + vadd.i64 q13, q0, q9 + vadd.i64 q1, q1, q8 + vshl.i64 q8, q8, #26 + vadd.i64 q15, q1, q4 + vsub.i64 q2, q2, q10 + vshr.s64 q10, q13, #26 + vsub.i64 q8, q12, q8 + vshr.s64 q12, q15, #25 + vadd.i64 q3, q3, q10 + vshl.i64 q10, q10, #26 + vadd.i64 q13, q3, q4 + vadd.i64 q14, q14, q12 + add r2, r3, #144 + vshl.i64 q12, q12, #25 + add r4, r3, #192 + vadd.i64 q15, q14, q9 + add r2, r2, #8 + vsub.i64 q0, q0, q10 + add r4, r4, #8 + vshr.s64 q10, q13, #25 + vsub.i64 q1, q1, q12 + vshr.s64 q12, q15, #26 + vadd.i64 q13, q10, q10 + vadd.i64 q11, q11, q12 + vtrn.32 d16, d2 + vshl.i64 q12, q12, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q11, q4 + vadd.i64 q4, q5, q13 + vst1.8 d16, [r2, : 64]! + vshl.i64 q5, q10, #4 + vst1.8 d17, [r4, : 64]! + vsub.i64 q8, q14, q12 + vshr.s64 q1, q1, #25 + vadd.i64 q4, q4, q5 + vadd.i64 q5, q6, q1 + vshl.i64 q1, q1, #25 + vadd.i64 q6, q5, q9 + vadd.i64 q4, q4, q10 + vshl.i64 q10, q10, #25 + vadd.i64 q9, q4, q9 + vsub.i64 q1, q11, q1 + vshr.s64 q6, q6, #26 + vsub.i64 q3, q3, q10 + vtrn.32 d16, d2 + vshr.s64 q9, q9, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q2, q6 + vst1.8 d16, [r2, : 64] + vshl.i64 q2, q6, #26 + vst1.8 d17, [r4, : 64] + vadd.i64 q6, q7, q9 + vtrn.32 d0, d6 + vshl.i64 q7, q9, #26 + vtrn.32 d1, d7 + vsub.i64 q2, q5, q2 + add r2, r2, #16 + vsub.i64 q3, q4, q7 + vst1.8 d0, [r2, : 64] + add r4, r4, #16 + vst1.8 d1, [r4, : 64] + vtrn.32 d4, d2 + vtrn.32 d5, d3 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d6, d12 + vtrn.32 d7, d13 + vst1.8 d4, [r2, : 64] + vst1.8 d5, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d6, [r2, : 64] + vst1.8 d7, [r4, : 64] + add r2, r3, #336 + add r4, r3, #288 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vadd.i32 q0, q0, q1 + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4-d5}, [r4, : 128]! + vadd.i32 q1, q1, q2 + add r5, r3, #288 + vld1.8 {d4}, [r2, : 64] + vld1.8 {d6}, [r4, : 64] + vadd.i32 q2, q2, q3 + vst1.8 {d0-d1}, [r5, : 128]! + vst1.8 {d2-d3}, [r5, : 128]! + vst1.8 d4, [r5, : 64] + add r2, r3, #48 + add r4, r3, #144 + vld1.8 {d0-d1}, [r4, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4}, [r4, : 64] + add r4, r3, #288 + vld1.8 {d6-d7}, [r4, : 128]! + vtrn.32 q0, q3 + vld1.8 {d8-d9}, [r4, : 128]! + vshl.i32 q5, q0, #4 + vtrn.32 q1, q4 + vshl.i32 q6, q3, #4 + vadd.i32 q5, q5, q0 + vadd.i32 q6, q6, q3 + vshl.i32 q7, q1, #4 + vld1.8 {d5}, [r4, : 64] + vshl.i32 q8, q4, #4 + vtrn.32 d4, d5 + vadd.i32 q7, q7, q1 + vadd.i32 q8, q8, q4 + vld1.8 {d18-d19}, [r2, : 128]! + vshl.i32 q10, q2, #4 + vld1.8 {d22-d23}, [r2, : 128]! + vadd.i32 q10, q10, q2 + vld1.8 {d24}, [r2, : 64] + vadd.i32 q5, q5, q0 + add r2, r3, #240 + vld1.8 {d26-d27}, [r2, : 128]! + vadd.i32 q6, q6, q3 + vld1.8 {d28-d29}, [r2, : 128]! + vadd.i32 q8, q8, q4 + vld1.8 {d25}, [r2, : 64] + vadd.i32 q10, q10, q2 + vtrn.32 q9, q13 + vadd.i32 q7, q7, q1 + vadd.i32 q5, q5, q0 + vtrn.32 q11, q14 + vadd.i32 q6, q6, q3 + add r2, sp, #528 + vadd.i32 q10, q10, q2 + vtrn.32 d24, d25 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q6, q13, #1 + vst1.8 {d20-d21}, [r2, : 128]! + vshl.i32 q10, q14, #1 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q15, q12, #1 + vadd.i32 q8, q8, q4 + vext.32 d10, d31, d30, #0 + vadd.i32 q7, q7, q1 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q8, d18, d5 + vmlal.s32 q8, d26, d4 + vmlal.s32 q8, d19, d9 + vmlal.s32 q8, d27, d3 + vmlal.s32 q8, d22, d8 + vmlal.s32 q8, d28, d2 + vmlal.s32 q8, d23, d7 + vmlal.s32 q8, d29, d1 + vmlal.s32 q8, d24, d6 + vmlal.s32 q8, d25, d0 + vst1.8 {d14-d15}, [r2, : 128]! + vmull.s32 q2, d18, d4 + vmlal.s32 q2, d12, d9 + vmlal.s32 q2, d13, d8 + vmlal.s32 q2, d19, d3 + vmlal.s32 q2, d22, d2 + vmlal.s32 q2, d23, d1 + vmlal.s32 q2, d24, d0 + vst1.8 {d20-d21}, [r2, : 128]! + vmull.s32 q7, d18, d9 + vmlal.s32 q7, d26, d3 + vmlal.s32 q7, d19, d8 + vmlal.s32 q7, d27, d2 + vmlal.s32 q7, d22, d7 + vmlal.s32 q7, d28, d1 + vmlal.s32 q7, d23, d6 + vmlal.s32 q7, d29, d0 + vst1.8 {d10-d11}, [r2, : 128]! + vmull.s32 q5, d18, d3 + vmlal.s32 q5, d19, d2 + vmlal.s32 q5, d22, d1 + vmlal.s32 q5, d23, d0 + vmlal.s32 q5, d12, d8 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q4, d18, d8 + vmlal.s32 q4, d26, d2 + vmlal.s32 q4, d19, d7 + vmlal.s32 q4, d27, d1 + vmlal.s32 q4, d22, d6 + vmlal.s32 q4, d28, d0 + vmull.s32 q8, d18, d7 + vmlal.s32 q8, d26, d1 + vmlal.s32 q8, d19, d6 + vmlal.s32 q8, d27, d0 + add r2, sp, #544 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q7, d24, d21 + vmlal.s32 q7, d25, d20 + vmlal.s32 q4, d23, d21 + vmlal.s32 q4, d29, d20 + vmlal.s32 q8, d22, d21 + vmlal.s32 q8, d28, d20 + vmlal.s32 q5, d24, d20 + vst1.8 {d14-d15}, [r2, : 128] + vmull.s32 q7, d18, d6 + vmlal.s32 q7, d26, d0 + add r2, sp, #624 + vld1.8 {d30-d31}, [r2, : 128] + vmlal.s32 q2, d30, d21 + vmlal.s32 q7, d19, d21 + vmlal.s32 q7, d27, d20 + add r2, sp, #592 + vld1.8 {d26-d27}, [r2, : 128] + vmlal.s32 q4, d25, d27 + vmlal.s32 q8, d29, d27 + vmlal.s32 q8, d25, d26 + vmlal.s32 q7, d28, d27 + vmlal.s32 q7, d29, d26 + add r2, sp, #576 + vld1.8 {d28-d29}, [r2, : 128] + vmlal.s32 q4, d24, d29 + vmlal.s32 q8, d23, d29 + vmlal.s32 q8, d24, d28 + vmlal.s32 q7, d22, d29 + vmlal.s32 q7, d23, d28 + vst1.8 {d8-d9}, [r2, : 128] + add r2, sp, #528 + vld1.8 {d8-d9}, [r2, : 128] + vmlal.s32 q7, d24, d9 + vmlal.s32 q7, d25, d31 + vmull.s32 q1, d18, d2 + vmlal.s32 q1, d19, d1 + vmlal.s32 q1, d22, d0 + vmlal.s32 q1, d24, d27 + vmlal.s32 q1, d23, d20 + vmlal.s32 q1, d12, d7 + vmlal.s32 q1, d13, d6 + vmull.s32 q6, d18, d1 + vmlal.s32 q6, d19, d0 + vmlal.s32 q6, d23, d27 + vmlal.s32 q6, d22, d20 + vmlal.s32 q6, d24, d26 + vmull.s32 q0, d18, d0 + vmlal.s32 q0, d22, d27 + vmlal.s32 q0, d23, d26 + vmlal.s32 q0, d24, d31 + vmlal.s32 q0, d19, d20 + add r2, sp, #608 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q2, d18, d7 + vmlal.s32 q5, d18, d6 + vmlal.s32 q1, d18, d21 + vmlal.s32 q0, d18, d28 + vmlal.s32 q6, d18, d29 + vmlal.s32 q2, d19, d6 + vmlal.s32 q5, d19, d21 + vmlal.s32 q1, d19, d29 + vmlal.s32 q0, d19, d9 + vmlal.s32 q6, d19, d28 + add r2, sp, #560 + vld1.8 {d18-d19}, [r2, : 128] + add r2, sp, #480 + vld1.8 {d22-d23}, [r2, : 128] + vmlal.s32 q5, d19, d7 + vmlal.s32 q0, d18, d21 + vmlal.s32 q0, d19, d29 + vmlal.s32 q6, d18, d6 + add r2, sp, #496 + vld1.8 {d6-d7}, [r2, : 128] + vmlal.s32 q6, d19, d21 + add r2, sp, #544 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q0, d30, d8 + add r2, sp, #640 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q5, d30, d29 + add r2, sp, #576 + vld1.8 {d24-d25}, [r2, : 128] + vmlal.s32 q1, d30, d28 + vadd.i64 q13, q0, q11 + vadd.i64 q14, q5, q11 + vmlal.s32 q6, d30, d9 + vshr.s64 q4, q13, #26 + vshr.s64 q13, q14, #26 + vadd.i64 q7, q7, q4 + vshl.i64 q4, q4, #26 + vadd.i64 q14, q7, q3 + vadd.i64 q9, q9, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q9, q3 + vsub.i64 q0, q0, q4 + vshr.s64 q4, q14, #25 + vsub.i64 q5, q5, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q4 + vshl.i64 q4, q4, #25 + vadd.i64 q14, q6, q11 + vadd.i64 q2, q2, q13 + vsub.i64 q4, q7, q4 + vshr.s64 q7, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q11 + vadd.i64 q8, q8, q7 + vshl.i64 q7, q7, #26 + vadd.i64 q15, q8, q3 + vsub.i64 q9, q9, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q7 + vshr.s64 q7, q15, #25 + vadd.i64 q10, q10, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q10, q3 + vadd.i64 q1, q1, q7 + add r2, r3, #240 + vshl.i64 q7, q7, #25 + add r4, r3, #144 + vadd.i64 q15, q1, q11 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + add r4, r4, #8 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q8, q7 + vshr.s64 q8, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q12, q12, q8 + vtrn.32 d12, d14 + vshl.i64 q8, q8, #26 + vtrn.32 d13, d15 + vadd.i64 q3, q12, q3 + vadd.i64 q0, q0, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q7, q13, #4 + vst1.8 d13, [r4, : 64]! + vsub.i64 q1, q1, q8 + vshr.s64 q3, q3, #25 + vadd.i64 q0, q0, q7 + vadd.i64 q5, q5, q3 + vshl.i64 q3, q3, #25 + vadd.i64 q6, q5, q11 + vadd.i64 q0, q0, q13 + vshl.i64 q7, q13, #25 + vadd.i64 q8, q0, q11 + vsub.i64 q3, q12, q3 + vshr.s64 q6, q6, #26 + vsub.i64 q7, q10, q7 + vtrn.32 d2, d6 + vshr.s64 q8, q8, #26 + vtrn.32 d3, d7 + vadd.i64 q3, q9, q6 + vst1.8 d2, [r2, : 64] + vshl.i64 q6, q6, #26 + vst1.8 d3, [r4, : 64] + vadd.i64 q1, q4, q8 + vtrn.32 d4, d14 + vshl.i64 q4, q8, #26 + vtrn.32 d5, d15 + vsub.i64 q5, q5, q6 + add r2, r2, #16 + vsub.i64 q0, q0, q4 + vst1.8 d4, [r2, : 64] + add r4, r4, #16 + vst1.8 d5, [r4, : 64] + vtrn.32 d10, d6 + vtrn.32 d11, d7 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vst1.8 d10, [r2, : 64] + vst1.8 d11, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d0, [r2, : 64] + vst1.8 d1, [r4, : 64] + ldr r2, [sp, #456] + ldr r4, [sp, #460] + subs r5, r2, #1 + bge .Lmainloop + add r1, r3, #144 + add r2, r3, #336 + vld1.8 {d0-d1}, [r1, : 128]! + vld1.8 {d2-d3}, [r1, : 128]! + vld1.8 {d4}, [r1, : 64] + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 {d2-d3}, [r2, : 128]! + vst1.8 d4, [r2, : 64] + movw r1, #0 +.Linvertloop: + add r2, r3, #144 + movw r4, #0 + movw r5, #2 + cmp r1, #1 + moveq r5, #1 + addeq r2, r3, #336 + addeq r4, r3, #48 + cmp r1, #2 + moveq r5, #1 + addeq r2, r3, #48 + cmp r1, #3 + moveq r5, #5 + addeq r4, r3, #336 + cmp r1, #4 + moveq r5, #10 + cmp r1, #5 + moveq r5, #20 + cmp r1, #6 + moveq r5, #10 + addeq r2, r3, #336 + addeq r4, r3, #336 + cmp r1, #7 + moveq r5, #50 + cmp r1, #8 + moveq r5, #100 + cmp r1, #9 + moveq r5, #50 + addeq r2, r3, #336 + cmp r1, #10 + moveq r5, #5 + addeq r2, r3, #48 + cmp r1, #11 + moveq r5, #0 + addeq r2, r3, #96 + add r6, r3, #144 + add r7, r3, #288 + vld1.8 {d0-d1}, [r6, : 128]! + vld1.8 {d2-d3}, [r6, : 128]! + vld1.8 {d4}, [r6, : 64] + vst1.8 {d0-d1}, [r7, : 128]! + vst1.8 {d2-d3}, [r7, : 128]! + vst1.8 d4, [r7, : 64] + cmp r5, #0 + beq .Lskipsquaringloop +.Lsquaringloop: + add r6, r3, #288 + add r7, r3, #288 + add r8, r3, #288 + vmov.i32 q0, #19 + vmov.i32 q1, #0 + vmov.i32 q2, #1 + vzip.i32 q1, q2 + vld1.8 {d4-d5}, [r7, : 128]! + vld1.8 {d6-d7}, [r7, : 128]! + vld1.8 {d9}, [r7, : 64] + vld1.8 {d10-d11}, [r6, : 128]! + add r7, sp, #384 + vld1.8 {d12-d13}, [r6, : 128]! + vmul.i32 q7, q2, q0 + vld1.8 {d8}, [r6, : 64] + vext.32 d17, d11, d10, #1 + vmul.i32 q9, q3, q0 + vext.32 d16, d10, d8, #1 + vshl.u32 q10, q5, q1 + vext.32 d22, d14, d4, #1 + vext.32 d24, d18, d6, #1 + vshl.u32 q13, q6, q1 + vshl.u32 d28, d8, d2 + vrev64.i32 d22, d22 + vmul.i32 d1, d9, d1 + vrev64.i32 d24, d24 + vext.32 d29, d8, d13, #1 + vext.32 d0, d1, d9, #1 + vrev64.i32 d0, d0 + vext.32 d2, d9, d1, #1 + vext.32 d23, d15, d5, #1 + vmull.s32 q4, d20, d4 + vrev64.i32 d23, d23 + vmlal.s32 q4, d21, d1 + vrev64.i32 d2, d2 + vmlal.s32 q4, d26, d19 + vext.32 d3, d5, d15, #1 + vmlal.s32 q4, d27, d18 + vrev64.i32 d3, d3 + vmlal.s32 q4, d28, d15 + vext.32 d14, d12, d11, #1 + vmull.s32 q5, d16, d23 + vext.32 d15, d13, d12, #1 + vmlal.s32 q5, d17, d4 + vst1.8 d8, [r7, : 64]! + vmlal.s32 q5, d14, d1 + vext.32 d12, d9, d8, #0 + vmlal.s32 q5, d15, d19 + vmov.i64 d13, #0 + vmlal.s32 q5, d29, d18 + vext.32 d25, d19, d7, #1 + vmlal.s32 q6, d20, d5 + vrev64.i32 d25, d25 + vmlal.s32 q6, d21, d4 + vst1.8 d11, [r7, : 64]! + vmlal.s32 q6, d26, d1 + vext.32 d9, d10, d10, #0 + vmlal.s32 q6, d27, d19 + vmov.i64 d8, #0 + vmlal.s32 q6, d28, d18 + vmlal.s32 q4, d16, d24 + vmlal.s32 q4, d17, d5 + vmlal.s32 q4, d14, d4 + vst1.8 d12, [r7, : 64]! + vmlal.s32 q4, d15, d1 + vext.32 d10, d13, d12, #0 + vmlal.s32 q4, d29, d19 + vmov.i64 d11, #0 + vmlal.s32 q5, d20, d6 + vmlal.s32 q5, d21, d5 + vmlal.s32 q5, d26, d4 + vext.32 d13, d8, d8, #0 + vmlal.s32 q5, d27, d1 + vmov.i64 d12, #0 + vmlal.s32 q5, d28, d19 + vst1.8 d9, [r7, : 64]! + vmlal.s32 q6, d16, d25 + vmlal.s32 q6, d17, d6 + vst1.8 d10, [r7, : 64] + vmlal.s32 q6, d14, d5 + vext.32 d8, d11, d10, #0 + vmlal.s32 q6, d15, d4 + vmov.i64 d9, #0 + vmlal.s32 q6, d29, d1 + vmlal.s32 q4, d20, d7 + vmlal.s32 q4, d21, d6 + vmlal.s32 q4, d26, d5 + vext.32 d11, d12, d12, #0 + vmlal.s32 q4, d27, d4 + vmov.i64 d10, #0 + vmlal.s32 q4, d28, d1 + vmlal.s32 q5, d16, d0 + sub r6, r7, #32 + vmlal.s32 q5, d17, d7 + vmlal.s32 q5, d14, d6 + vext.32 d30, d9, d8, #0 + vmlal.s32 q5, d15, d5 + vld1.8 {d31}, [r6, : 64]! + vmlal.s32 q5, d29, d4 + vmlal.s32 q15, d20, d0 + vext.32 d0, d6, d18, #1 + vmlal.s32 q15, d21, d25 + vrev64.i32 d0, d0 + vmlal.s32 q15, d26, d24 + vext.32 d1, d7, d19, #1 + vext.32 d7, d10, d10, #0 + vmlal.s32 q15, d27, d23 + vrev64.i32 d1, d1 + vld1.8 {d6}, [r6, : 64] + vmlal.s32 q15, d28, d22 + vmlal.s32 q3, d16, d4 + add r6, r6, #24 + vmlal.s32 q3, d17, d2 + vext.32 d4, d31, d30, #0 + vmov d17, d11 + vmlal.s32 q3, d14, d1 + vext.32 d11, d13, d13, #0 + vext.32 d13, d30, d30, #0 + vmlal.s32 q3, d15, d0 + vext.32 d1, d8, d8, #0 + vmlal.s32 q3, d29, d3 + vld1.8 {d5}, [r6, : 64] + sub r6, r6, #16 + vext.32 d10, d6, d6, #0 + vmov.i32 q1, #0xffffffff + vshl.i64 q4, q1, #25 + add r7, sp, #480 + vld1.8 {d14-d15}, [r7, : 128] + vadd.i64 q9, q2, q7 + vshl.i64 q1, q1, #26 + vshr.s64 q10, q9, #26 + vld1.8 {d0}, [r6, : 64]! + vadd.i64 q5, q5, q10 + vand q9, q9, q1 + vld1.8 {d16}, [r6, : 64]! + add r6, sp, #496 + vld1.8 {d20-d21}, [r6, : 128] + vadd.i64 q11, q5, q10 + vsub.i64 q2, q2, q9 + vshr.s64 q9, q11, #25 + vext.32 d12, d5, d4, #0 + vand q11, q11, q4 + vadd.i64 q0, q0, q9 + vmov d19, d7 + vadd.i64 q3, q0, q7 + vsub.i64 q5, q5, q11 + vshr.s64 q11, q3, #26 + vext.32 d18, d11, d10, #0 + vand q3, q3, q1 + vadd.i64 q8, q8, q11 + vadd.i64 q11, q8, q10 + vsub.i64 q0, q0, q3 + vshr.s64 q3, q11, #25 + vand q11, q11, q4 + vadd.i64 q3, q6, q3 + vadd.i64 q6, q3, q7 + vsub.i64 q8, q8, q11 + vshr.s64 q11, q6, #26 + vand q6, q6, q1 + vadd.i64 q9, q9, q11 + vadd.i64 d25, d19, d21 + vsub.i64 q3, q3, q6 + vshr.s64 d23, d25, #25 + vand q4, q12, q4 + vadd.i64 d21, d23, d23 + vshl.i64 d25, d23, #4 + vadd.i64 d21, d21, d23 + vadd.i64 d25, d25, d21 + vadd.i64 d4, d4, d25 + vzip.i32 q0, q8 + vadd.i64 d12, d4, d14 + add r6, r8, #8 + vst1.8 d0, [r6, : 64] + vsub.i64 d19, d19, d9 + add r6, r6, #16 + vst1.8 d16, [r6, : 64] + vshr.s64 d22, d12, #26 + vand q0, q6, q1 + vadd.i64 d10, d10, d22 + vzip.i32 q3, q9 + vsub.i64 d4, d4, d0 + sub r6, r6, #8 + vst1.8 d6, [r6, : 64] + add r6, r6, #16 + vst1.8 d18, [r6, : 64] + vzip.i32 q2, q5 + sub r6, r6, #32 + vst1.8 d4, [r6, : 64] + subs r5, r5, #1 + bhi .Lsquaringloop +.Lskipsquaringloop: + mov r2, r2 + add r5, r3, #288 + add r6, r3, #144 + vmov.i32 q0, #19 + vmov.i32 q1, #0 + vmov.i32 q2, #1 + vzip.i32 q1, q2 + vld1.8 {d4-d5}, [r5, : 128]! + vld1.8 {d6-d7}, [r5, : 128]! + vld1.8 {d9}, [r5, : 64] + vld1.8 {d10-d11}, [r2, : 128]! + add r5, sp, #384 + vld1.8 {d12-d13}, [r2, : 128]! + vmul.i32 q7, q2, q0 + vld1.8 {d8}, [r2, : 64] + vext.32 d17, d11, d10, #1 + vmul.i32 q9, q3, q0 + vext.32 d16, d10, d8, #1 + vshl.u32 q10, q5, q1 + vext.32 d22, d14, d4, #1 + vext.32 d24, d18, d6, #1 + vshl.u32 q13, q6, q1 + vshl.u32 d28, d8, d2 + vrev64.i32 d22, d22 + vmul.i32 d1, d9, d1 + vrev64.i32 d24, d24 + vext.32 d29, d8, d13, #1 + vext.32 d0, d1, d9, #1 + vrev64.i32 d0, d0 + vext.32 d2, d9, d1, #1 + vext.32 d23, d15, d5, #1 + vmull.s32 q4, d20, d4 + vrev64.i32 d23, d23 + vmlal.s32 q4, d21, d1 + vrev64.i32 d2, d2 + vmlal.s32 q4, d26, d19 + vext.32 d3, d5, d15, #1 + vmlal.s32 q4, d27, d18 + vrev64.i32 d3, d3 + vmlal.s32 q4, d28, d15 + vext.32 d14, d12, d11, #1 + vmull.s32 q5, d16, d23 + vext.32 d15, d13, d12, #1 + vmlal.s32 q5, d17, d4 + vst1.8 d8, [r5, : 64]! + vmlal.s32 q5, d14, d1 + vext.32 d12, d9, d8, #0 + vmlal.s32 q5, d15, d19 + vmov.i64 d13, #0 + vmlal.s32 q5, d29, d18 + vext.32 d25, d19, d7, #1 + vmlal.s32 q6, d20, d5 + vrev64.i32 d25, d25 + vmlal.s32 q6, d21, d4 + vst1.8 d11, [r5, : 64]! + vmlal.s32 q6, d26, d1 + vext.32 d9, d10, d10, #0 + vmlal.s32 q6, d27, d19 + vmov.i64 d8, #0 + vmlal.s32 q6, d28, d18 + vmlal.s32 q4, d16, d24 + vmlal.s32 q4, d17, d5 + vmlal.s32 q4, d14, d4 + vst1.8 d12, [r5, : 64]! + vmlal.s32 q4, d15, d1 + vext.32 d10, d13, d12, #0 + vmlal.s32 q4, d29, d19 + vmov.i64 d11, #0 + vmlal.s32 q5, d20, d6 + vmlal.s32 q5, d21, d5 + vmlal.s32 q5, d26, d4 + vext.32 d13, d8, d8, #0 + vmlal.s32 q5, d27, d1 + vmov.i64 d12, #0 + vmlal.s32 q5, d28, d19 + vst1.8 d9, [r5, : 64]! + vmlal.s32 q6, d16, d25 + vmlal.s32 q6, d17, d6 + vst1.8 d10, [r5, : 64] + vmlal.s32 q6, d14, d5 + vext.32 d8, d11, d10, #0 + vmlal.s32 q6, d15, d4 + vmov.i64 d9, #0 + vmlal.s32 q6, d29, d1 + vmlal.s32 q4, d20, d7 + vmlal.s32 q4, d21, d6 + vmlal.s32 q4, d26, d5 + vext.32 d11, d12, d12, #0 + vmlal.s32 q4, d27, d4 + vmov.i64 d10, #0 + vmlal.s32 q4, d28, d1 + vmlal.s32 q5, d16, d0 + sub r2, r5, #32 + vmlal.s32 q5, d17, d7 + vmlal.s32 q5, d14, d6 + vext.32 d30, d9, d8, #0 + vmlal.s32 q5, d15, d5 + vld1.8 {d31}, [r2, : 64]! + vmlal.s32 q5, d29, d4 + vmlal.s32 q15, d20, d0 + vext.32 d0, d6, d18, #1 + vmlal.s32 q15, d21, d25 + vrev64.i32 d0, d0 + vmlal.s32 q15, d26, d24 + vext.32 d1, d7, d19, #1 + vext.32 d7, d10, d10, #0 + vmlal.s32 q15, d27, d23 + vrev64.i32 d1, d1 + vld1.8 {d6}, [r2, : 64] + vmlal.s32 q15, d28, d22 + vmlal.s32 q3, d16, d4 + add r2, r2, #24 + vmlal.s32 q3, d17, d2 + vext.32 d4, d31, d30, #0 + vmov d17, d11 + vmlal.s32 q3, d14, d1 + vext.32 d11, d13, d13, #0 + vext.32 d13, d30, d30, #0 + vmlal.s32 q3, d15, d0 + vext.32 d1, d8, d8, #0 + vmlal.s32 q3, d29, d3 + vld1.8 {d5}, [r2, : 64] + sub r2, r2, #16 + vext.32 d10, d6, d6, #0 + vmov.i32 q1, #0xffffffff + vshl.i64 q4, q1, #25 + add r5, sp, #480 + vld1.8 {d14-d15}, [r5, : 128] + vadd.i64 q9, q2, q7 + vshl.i64 q1, q1, #26 + vshr.s64 q10, q9, #26 + vld1.8 {d0}, [r2, : 64]! + vadd.i64 q5, q5, q10 + vand q9, q9, q1 + vld1.8 {d16}, [r2, : 64]! + add r2, sp, #496 + vld1.8 {d20-d21}, [r2, : 128] + vadd.i64 q11, q5, q10 + vsub.i64 q2, q2, q9 + vshr.s64 q9, q11, #25 + vext.32 d12, d5, d4, #0 + vand q11, q11, q4 + vadd.i64 q0, q0, q9 + vmov d19, d7 + vadd.i64 q3, q0, q7 + vsub.i64 q5, q5, q11 + vshr.s64 q11, q3, #26 + vext.32 d18, d11, d10, #0 + vand q3, q3, q1 + vadd.i64 q8, q8, q11 + vadd.i64 q11, q8, q10 + vsub.i64 q0, q0, q3 + vshr.s64 q3, q11, #25 + vand q11, q11, q4 + vadd.i64 q3, q6, q3 + vadd.i64 q6, q3, q7 + vsub.i64 q8, q8, q11 + vshr.s64 q11, q6, #26 + vand q6, q6, q1 + vadd.i64 q9, q9, q11 + vadd.i64 d25, d19, d21 + vsub.i64 q3, q3, q6 + vshr.s64 d23, d25, #25 + vand q4, q12, q4 + vadd.i64 d21, d23, d23 + vshl.i64 d25, d23, #4 + vadd.i64 d21, d21, d23 + vadd.i64 d25, d25, d21 + vadd.i64 d4, d4, d25 + vzip.i32 q0, q8 + vadd.i64 d12, d4, d14 + add r2, r6, #8 + vst1.8 d0, [r2, : 64] + vsub.i64 d19, d19, d9 + add r2, r2, #16 + vst1.8 d16, [r2, : 64] + vshr.s64 d22, d12, #26 + vand q0, q6, q1 + vadd.i64 d10, d10, d22 + vzip.i32 q3, q9 + vsub.i64 d4, d4, d0 + sub r2, r2, #8 + vst1.8 d6, [r2, : 64] + add r2, r2, #16 + vst1.8 d18, [r2, : 64] + vzip.i32 q2, q5 + sub r2, r2, #32 + vst1.8 d4, [r2, : 64] + cmp r4, #0 + beq .Lskippostcopy + add r2, r3, #144 + mov r4, r4 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4}, [r2, : 64] + vst1.8 {d0-d1}, [r4, : 128]! + vst1.8 {d2-d3}, [r4, : 128]! + vst1.8 d4, [r4, : 64] +.Lskippostcopy: + cmp r1, #1 + bne .Lskipfinalcopy + add r2, r3, #288 + add r4, r3, #144 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4}, [r2, : 64] + vst1.8 {d0-d1}, [r4, : 128]! + vst1.8 {d2-d3}, [r4, : 128]! + vst1.8 d4, [r4, : 64] +.Lskipfinalcopy: + add r1, r1, #1 + cmp r1, #12 + blo .Linvertloop + add r1, r3, #144 + ldr r2, [r1], #4 + ldr r3, [r1], #4 + ldr r4, [r1], #4 + ldr r5, [r1], #4 + ldr r6, [r1], #4 + ldr r7, [r1], #4 + ldr r8, [r1], #4 + ldr r9, [r1], #4 + ldr r10, [r1], #4 + ldr r1, [r1] + add r11, r1, r1, LSL #4 + add r11, r11, r1, LSL #1 + add r11, r11, #16777216 + mov r11, r11, ASR #25 + add r11, r11, r2 + mov r11, r11, ASR #26 + add r11, r11, r3 + mov r11, r11, ASR #25 + add r11, r11, r4 + mov r11, r11, ASR #26 + add r11, r11, r5 + mov r11, r11, ASR #25 + add r11, r11, r6 + mov r11, r11, ASR #26 + add r11, r11, r7 + mov r11, r11, ASR #25 + add r11, r11, r8 + mov r11, r11, ASR #26 + add r11, r11, r9 + mov r11, r11, ASR #25 + add r11, r11, r10 + mov r11, r11, ASR #26 + add r11, r11, r1 + mov r11, r11, ASR #25 + add r2, r2, r11 + add r2, r2, r11, LSL #1 + add r2, r2, r11, LSL #4 + mov r11, r2, ASR #26 + add r3, r3, r11 + sub r2, r2, r11, LSL #26 + mov r11, r3, ASR #25 + add r4, r4, r11 + sub r3, r3, r11, LSL #25 + mov r11, r4, ASR #26 + add r5, r5, r11 + sub r4, r4, r11, LSL #26 + mov r11, r5, ASR #25 + add r6, r6, r11 + sub r5, r5, r11, LSL #25 + mov r11, r6, ASR #26 + add r7, r7, r11 + sub r6, r6, r11, LSL #26 + mov r11, r7, ASR #25 + add r8, r8, r11 + sub r7, r7, r11, LSL #25 + mov r11, r8, ASR #26 + add r9, r9, r11 + sub r8, r8, r11, LSL #26 + mov r11, r9, ASR #25 + add r10, r10, r11 + sub r9, r9, r11, LSL #25 + mov r11, r10, ASR #26 + add r1, r1, r11 + sub r10, r10, r11, LSL #26 + mov r11, r1, ASR #25 + sub r1, r1, r11, LSL #25 + add r2, r2, r3, LSL #26 + mov r3, r3, LSR #6 + add r3, r3, r4, LSL #19 + mov r4, r4, LSR #13 + add r4, r4, r5, LSL #13 + mov r5, r5, LSR #19 + add r5, r5, r6, LSL #6 + add r6, r7, r8, LSL #25 + mov r7, r8, LSR #7 + add r7, r7, r9, LSL #19 + mov r8, r9, LSR #13 + add r8, r8, r10, LSL #12 + mov r9, r10, LSR #20 + add r1, r9, r1, LSL #6 + str r2, [r0] + str r3, [r0, #4] + str r4, [r0, #8] + str r5, [r0, #12] + str r6, [r0, #16] + str r7, [r0, #20] + str r8, [r0, #24] + str r1, [r0, #28] + movw r0, #0 + mov sp, ip + pop {r4-r11, pc} +SYM_FUNC_END(curve25519_neon) +#endif diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c b/net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c new file mode 100644 index 000000000000..42cfb6c00f98 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2016 The fiat-crypto Authors. + * Copyright (C) 2018-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is a machine-generated formally verified implementation of Curve25519 + * ECDH from: . Though originally + * machine generated, it has been tweaked to be suitable for use in the kernel. + * It is optimized for 32-bit machines and machines that cannot work efficiently + * with 128-bit integer types. + */ + +/* fe means field element. Here the field is \Z/(2^255-19). An element t, + * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 + * t[3]+2^102 t[4]+...+2^230 t[9]. + * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. + * Multiplication and carrying produce fe from fe_loose. + */ +typedef struct fe { u32 v[10]; } fe; + +/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc + * Addition and subtraction produce fe_loose from (fe, fe). + */ +typedef struct fe_loose { u32 v[10]; } fe_loose; + +static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s) +{ + /* Ignores top bit of s. */ + u32 a0 = get_unaligned_le32(s); + u32 a1 = get_unaligned_le32(s+4); + u32 a2 = get_unaligned_le32(s+8); + u32 a3 = get_unaligned_le32(s+12); + u32 a4 = get_unaligned_le32(s+16); + u32 a5 = get_unaligned_le32(s+20); + u32 a6 = get_unaligned_le32(s+24); + u32 a7 = get_unaligned_le32(s+28); + h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */ + h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */ + h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */ + h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */ + h[4] = (a3>> 6); /* (32- 6) = 26 */ + h[5] = a4&((1<<25)-1); /* 25 */ + h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */ + h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */ + h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */ + h[9] = (a7>> 6)&((1<<25)-1); /* 25 */ +} + +static __always_inline void fe_frombytes(fe *h, const u8 *s) +{ + fe_frombytes_impl(h->v, s); +} + +static __always_inline u8 /*bool*/ +addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 25 bits of result and 1 bit of carry + * (26 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a + b + c; + *low = x & ((1 << 25) - 1); + return (x >> 25) & 1; +} + +static __always_inline u8 /*bool*/ +addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 26 bits of result and 1 bit of carry + * (27 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a + b + c; + *low = x & ((1 << 26) - 1); + return (x >> 26) & 1; +} + +static __always_inline u8 /*bool*/ +subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 25 bits of result and 1 bit of borrow + * (26 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a - b - c; + *low = x & ((1 << 25) - 1); + return x >> 31; +} + +static __always_inline u8 /*bool*/ +subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 26 bits of result and 1 bit of borrow + *(27 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a - b - c; + *low = x & ((1 << 26) - 1); + return x >> 31; +} + +static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) +{ + t = -!!t; /* all set if nonzero, 0 if 0 */ + return (t&nz) | ((~t)&z); +} + +static __always_inline void fe_freeze(u32 out[10], const u32 in1[10]) +{ + { const u32 x17 = in1[9]; + { const u32 x18 = in1[8]; + { const u32 x16 = in1[7]; + { const u32 x14 = in1[6]; + { const u32 x12 = in1[5]; + { const u32 x10 = in1[4]; + { const u32 x8 = in1[3]; + { const u32 x6 = in1[2]; + { const u32 x4 = in1[1]; + { const u32 x2 = in1[0]; + { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20); + { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23); + { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26); + { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29); + { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32); + { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35); + { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38); + { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41); + { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44); + { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47); + { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff); + { u32 x50 = (x49 & 0x3ffffed); + { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52); + { u32 x54 = (x49 & 0x1ffffff); + { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56); + { u32 x58 = (x49 & 0x3ffffff); + { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60); + { u32 x62 = (x49 & 0x1ffffff); + { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64); + { u32 x66 = (x49 & 0x3ffffff); + { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68); + { u32 x70 = (x49 & 0x1ffffff); + { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72); + { u32 x74 = (x49 & 0x3ffffff); + { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76); + { u32 x78 = (x49 & 0x1ffffff); + { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80); + { u32 x82 = (x49 & 0x3ffffff); + { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84); + { u32 x86 = (x49 & 0x1ffffff); + { u32 x88; addcarryx_u25(x85, x47, x86, &x88); + out[0] = x52; + out[1] = x56; + out[2] = x60; + out[3] = x64; + out[4] = x68; + out[5] = x72; + out[6] = x76; + out[7] = x80; + out[8] = x84; + out[9] = x88; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_tobytes(u8 s[32], const fe *f) +{ + u32 h[10]; + fe_freeze(h, f->v); + s[0] = h[0] >> 0; + s[1] = h[0] >> 8; + s[2] = h[0] >> 16; + s[3] = (h[0] >> 24) | (h[1] << 2); + s[4] = h[1] >> 6; + s[5] = h[1] >> 14; + s[6] = (h[1] >> 22) | (h[2] << 3); + s[7] = h[2] >> 5; + s[8] = h[2] >> 13; + s[9] = (h[2] >> 21) | (h[3] << 5); + s[10] = h[3] >> 3; + s[11] = h[3] >> 11; + s[12] = (h[3] >> 19) | (h[4] << 6); + s[13] = h[4] >> 2; + s[14] = h[4] >> 10; + s[15] = h[4] >> 18; + s[16] = h[5] >> 0; + s[17] = h[5] >> 8; + s[18] = h[5] >> 16; + s[19] = (h[5] >> 24) | (h[6] << 1); + s[20] = h[6] >> 7; + s[21] = h[6] >> 15; + s[22] = (h[6] >> 23) | (h[7] << 3); + s[23] = h[7] >> 5; + s[24] = h[7] >> 13; + s[25] = (h[7] >> 21) | (h[8] << 4); + s[26] = h[8] >> 4; + s[27] = h[8] >> 12; + s[28] = (h[8] >> 20) | (h[9] << 6); + s[29] = h[9] >> 2; + s[30] = h[9] >> 10; + s[31] = h[9] >> 18; +} + +/* h = f */ +static __always_inline void fe_copy(fe *h, const fe *f) +{ + memmove(h, f, sizeof(u32) * 10); +} + +static __always_inline void fe_copy_lt(fe_loose *h, const fe *f) +{ + memmove(h, f, sizeof(u32) * 10); +} + +/* h = 0 */ +static __always_inline void fe_0(fe *h) +{ + memset(h, 0, sizeof(u32) * 10); +} + +/* h = 1 */ +static __always_inline void fe_1(fe *h) +{ + memset(h, 0, sizeof(u32) * 10); + h->v[0] = 1; +} + +static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = in2[9]; + { const u32 x39 = in2[8]; + { const u32 x37 = in2[7]; + { const u32 x35 = in2[6]; + { const u32 x33 = in2[5]; + { const u32 x31 = in2[4]; + { const u32 x29 = in2[3]; + { const u32 x27 = in2[2]; + { const u32 x25 = in2[1]; + { const u32 x23 = in2[0]; + out[0] = (x5 + x23); + out[1] = (x7 + x25); + out[2] = (x9 + x27); + out[3] = (x11 + x29); + out[4] = (x13 + x31); + out[5] = (x15 + x33); + out[6] = (x17 + x35); + out[7] = (x19 + x37); + out[8] = (x21 + x39); + out[9] = (x20 + x38); + }}}}}}}}}}}}}}}}}}}} +} + +/* h = f + g + * Can overlap h with f or g. + */ +static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g) +{ + fe_add_impl(h->v, f->v, g->v); +} + +static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = in2[9]; + { const u32 x39 = in2[8]; + { const u32 x37 = in2[7]; + { const u32 x35 = in2[6]; + { const u32 x33 = in2[5]; + { const u32 x31 = in2[4]; + { const u32 x29 = in2[3]; + { const u32 x27 = in2[2]; + { const u32 x25 = in2[1]; + { const u32 x23 = in2[0]; + out[0] = ((0x7ffffda + x5) - x23); + out[1] = ((0x3fffffe + x7) - x25); + out[2] = ((0x7fffffe + x9) - x27); + out[3] = ((0x3fffffe + x11) - x29); + out[4] = ((0x7fffffe + x13) - x31); + out[5] = ((0x3fffffe + x15) - x33); + out[6] = ((0x7fffffe + x17) - x35); + out[7] = ((0x3fffffe + x19) - x37); + out[8] = ((0x7fffffe + x21) - x39); + out[9] = ((0x3fffffe + x20) - x38); + }}}}}}}}}}}}}}}}}}}} +} + +/* h = f - g + * Can overlap h with f or g. + */ +static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g) +{ + fe_sub_impl(h->v, f->v, g->v); +} + +static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = in2[9]; + { const u32 x39 = in2[8]; + { const u32 x37 = in2[7]; + { const u32 x35 = in2[6]; + { const u32 x33 = in2[5]; + { const u32 x31 = in2[4]; + { const u32 x29 = in2[3]; + { const u32 x27 = in2[2]; + { const u32 x25 = in2[1]; + { const u32 x23 = in2[0]; + { u64 x40 = ((u64)x23 * x5); + { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); + { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); + { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); + { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); + { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); + { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); + { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); + { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); + { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); + { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); + { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); + { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); + { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); + { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); + { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); + { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); + { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); + { u64 x58 = ((u64)(0x2 * x38) * x20); + { u64 x59 = (x48 + (x58 << 0x4)); + { u64 x60 = (x59 + (x58 << 0x1)); + { u64 x61 = (x60 + x58); + { u64 x62 = (x47 + (x57 << 0x4)); + { u64 x63 = (x62 + (x57 << 0x1)); + { u64 x64 = (x63 + x57); + { u64 x65 = (x46 + (x56 << 0x4)); + { u64 x66 = (x65 + (x56 << 0x1)); + { u64 x67 = (x66 + x56); + { u64 x68 = (x45 + (x55 << 0x4)); + { u64 x69 = (x68 + (x55 << 0x1)); + { u64 x70 = (x69 + x55); + { u64 x71 = (x44 + (x54 << 0x4)); + { u64 x72 = (x71 + (x54 << 0x1)); + { u64 x73 = (x72 + x54); + { u64 x74 = (x43 + (x53 << 0x4)); + { u64 x75 = (x74 + (x53 << 0x1)); + { u64 x76 = (x75 + x53); + { u64 x77 = (x42 + (x52 << 0x4)); + { u64 x78 = (x77 + (x52 << 0x1)); + { u64 x79 = (x78 + x52); + { u64 x80 = (x41 + (x51 << 0x4)); + { u64 x81 = (x80 + (x51 << 0x1)); + { u64 x82 = (x81 + x51); + { u64 x83 = (x40 + (x50 << 0x4)); + { u64 x84 = (x83 + (x50 << 0x1)); + { u64 x85 = (x84 + x50); + { u64 x86 = (x85 >> 0x1a); + { u32 x87 = ((u32)x85 & 0x3ffffff); + { u64 x88 = (x86 + x82); + { u64 x89 = (x88 >> 0x19); + { u32 x90 = ((u32)x88 & 0x1ffffff); + { u64 x91 = (x89 + x79); + { u64 x92 = (x91 >> 0x1a); + { u32 x93 = ((u32)x91 & 0x3ffffff); + { u64 x94 = (x92 + x76); + { u64 x95 = (x94 >> 0x19); + { u32 x96 = ((u32)x94 & 0x1ffffff); + { u64 x97 = (x95 + x73); + { u64 x98 = (x97 >> 0x1a); + { u32 x99 = ((u32)x97 & 0x3ffffff); + { u64 x100 = (x98 + x70); + { u64 x101 = (x100 >> 0x19); + { u32 x102 = ((u32)x100 & 0x1ffffff); + { u64 x103 = (x101 + x67); + { u64 x104 = (x103 >> 0x1a); + { u32 x105 = ((u32)x103 & 0x3ffffff); + { u64 x106 = (x104 + x64); + { u64 x107 = (x106 >> 0x19); + { u32 x108 = ((u32)x106 & 0x1ffffff); + { u64 x109 = (x107 + x61); + { u64 x110 = (x109 >> 0x1a); + { u32 x111 = ((u32)x109 & 0x3ffffff); + { u64 x112 = (x110 + x49); + { u64 x113 = (x112 >> 0x19); + { u32 x114 = ((u32)x112 & 0x1ffffff); + { u64 x115 = (x87 + (0x13 * x113)); + { u32 x116 = (u32) (x115 >> 0x1a); + { u32 x117 = ((u32)x115 & 0x3ffffff); + { u32 x118 = (x116 + x90); + { u32 x119 = (x118 >> 0x19); + { u32 x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g) +{ + fe_mul_impl(h->v, f->v, g->v); +} + +static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) +{ + fe_mul_impl(h->v, f->v, g->v); +} + +static __always_inline void +fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) +{ + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_sqr_impl(u32 out[10], const u32 in1[10]) +{ + { const u32 x17 = in1[9]; + { const u32 x18 = in1[8]; + { const u32 x16 = in1[7]; + { const u32 x14 = in1[6]; + { const u32 x12 = in1[5]; + { const u32 x10 = in1[4]; + { const u32 x8 = in1[3]; + { const u32 x6 = in1[2]; + { const u32 x4 = in1[1]; + { const u32 x2 = in1[0]; + { u64 x19 = ((u64)x2 * x2); + { u64 x20 = ((u64)(0x2 * x2) * x4); + { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6))); + { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8))); + { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10)); + { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12))); + { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12))); + { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16))); + { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12)))))); + { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17))); + { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17))))); + { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17))); + { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17)))))); + { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17))); + { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17))); + { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17))); + { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17)); + { u64 x36 = ((u64)(0x2 * x18) * x17); + { u64 x37 = ((u64)(0x2 * x17) * x17); + { u64 x38 = (x27 + (x37 << 0x4)); + { u64 x39 = (x38 + (x37 << 0x1)); + { u64 x40 = (x39 + x37); + { u64 x41 = (x26 + (x36 << 0x4)); + { u64 x42 = (x41 + (x36 << 0x1)); + { u64 x43 = (x42 + x36); + { u64 x44 = (x25 + (x35 << 0x4)); + { u64 x45 = (x44 + (x35 << 0x1)); + { u64 x46 = (x45 + x35); + { u64 x47 = (x24 + (x34 << 0x4)); + { u64 x48 = (x47 + (x34 << 0x1)); + { u64 x49 = (x48 + x34); + { u64 x50 = (x23 + (x33 << 0x4)); + { u64 x51 = (x50 + (x33 << 0x1)); + { u64 x52 = (x51 + x33); + { u64 x53 = (x22 + (x32 << 0x4)); + { u64 x54 = (x53 + (x32 << 0x1)); + { u64 x55 = (x54 + x32); + { u64 x56 = (x21 + (x31 << 0x4)); + { u64 x57 = (x56 + (x31 << 0x1)); + { u64 x58 = (x57 + x31); + { u64 x59 = (x20 + (x30 << 0x4)); + { u64 x60 = (x59 + (x30 << 0x1)); + { u64 x61 = (x60 + x30); + { u64 x62 = (x19 + (x29 << 0x4)); + { u64 x63 = (x62 + (x29 << 0x1)); + { u64 x64 = (x63 + x29); + { u64 x65 = (x64 >> 0x1a); + { u32 x66 = ((u32)x64 & 0x3ffffff); + { u64 x67 = (x65 + x61); + { u64 x68 = (x67 >> 0x19); + { u32 x69 = ((u32)x67 & 0x1ffffff); + { u64 x70 = (x68 + x58); + { u64 x71 = (x70 >> 0x1a); + { u32 x72 = ((u32)x70 & 0x3ffffff); + { u64 x73 = (x71 + x55); + { u64 x74 = (x73 >> 0x19); + { u32 x75 = ((u32)x73 & 0x1ffffff); + { u64 x76 = (x74 + x52); + { u64 x77 = (x76 >> 0x1a); + { u32 x78 = ((u32)x76 & 0x3ffffff); + { u64 x79 = (x77 + x49); + { u64 x80 = (x79 >> 0x19); + { u32 x81 = ((u32)x79 & 0x1ffffff); + { u64 x82 = (x80 + x46); + { u64 x83 = (x82 >> 0x1a); + { u32 x84 = ((u32)x82 & 0x3ffffff); + { u64 x85 = (x83 + x43); + { u64 x86 = (x85 >> 0x19); + { u32 x87 = ((u32)x85 & 0x1ffffff); + { u64 x88 = (x86 + x40); + { u64 x89 = (x88 >> 0x1a); + { u32 x90 = ((u32)x88 & 0x3ffffff); + { u64 x91 = (x89 + x28); + { u64 x92 = (x91 >> 0x19); + { u32 x93 = ((u32)x91 & 0x1ffffff); + { u64 x94 = (x66 + (0x13 * x92)); + { u32 x95 = (u32) (x94 >> 0x1a); + { u32 x96 = ((u32)x94 & 0x3ffffff); + { u32 x97 = (x95 + x69); + { u32 x98 = (x97 >> 0x19); + { u32 x99 = (x97 & 0x1ffffff); + out[0] = x96; + out[1] = x99; + out[2] = (x98 + x72); + out[3] = x75; + out[4] = x78; + out[5] = x81; + out[6] = x84; + out[7] = x87; + out[8] = x90; + out[9] = x93; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_sq_tl(fe *h, const fe_loose *f) +{ + fe_sqr_impl(h->v, f->v); +} + +static __always_inline void fe_sq_tt(fe *h, const fe *f) +{ + fe_sqr_impl(h->v, f->v); +} + +static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) +{ + fe t0; + fe t1; + fe t2; + fe t3; + int i; + + fe_sq_tl(&t0, z); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 2; ++i) + fe_sq_tt(&t1, &t1); + fe_mul_tlt(&t1, z, &t1); + fe_mul_ttt(&t0, &t0, &t1); + fe_sq_tt(&t2, &t0); + fe_mul_ttt(&t1, &t1, &t2); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 5; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 10; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t2, &t2, &t1); + fe_sq_tt(&t3, &t2); + for (i = 1; i < 20; ++i) + fe_sq_tt(&t3, &t3); + fe_mul_ttt(&t2, &t3, &t2); + fe_sq_tt(&t2, &t2); + for (i = 1; i < 10; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 50; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t2, &t2, &t1); + fe_sq_tt(&t3, &t2); + for (i = 1; i < 100; ++i) + fe_sq_tt(&t3, &t3); + fe_mul_ttt(&t2, &t3, &t2); + fe_sq_tt(&t2, &t2); + for (i = 1; i < 50; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t1, &t1); + for (i = 1; i < 5; ++i) + fe_sq_tt(&t1, &t1); + fe_mul_ttt(out, &t1, &t0); +} + +static __always_inline void fe_invert(fe *out, const fe *z) +{ + fe_loose l; + fe_copy_lt(&l, z); + fe_loose_invert(out, &l); +} + +/* Replace (f,g) with (g,f) if b == 1; + * replace (f,g) with (f,g) if b == 0. + * + * Preconditions: b in {0,1} + */ +static __always_inline void fe_cswap(fe *f, fe *g, unsigned int b) +{ + unsigned i; + b = 0 - b; + for (i = 0; i < 10; i++) { + u32 x = f->v[i] ^ g->v[i]; + x &= b; + f->v[i] ^= x; + g->v[i] ^= x; + } +} + +/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/ +static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = 0; + { const u32 x39 = 0; + { const u32 x37 = 0; + { const u32 x35 = 0; + { const u32 x33 = 0; + { const u32 x31 = 0; + { const u32 x29 = 0; + { const u32 x27 = 0; + { const u32 x25 = 0; + { const u32 x23 = 121666; + { u64 x40 = ((u64)x23 * x5); + { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); + { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); + { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); + { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); + { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); + { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); + { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); + { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); + { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); + { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); + { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); + { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); + { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); + { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); + { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); + { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); + { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); + { u64 x58 = ((u64)(0x2 * x38) * x20); + { u64 x59 = (x48 + (x58 << 0x4)); + { u64 x60 = (x59 + (x58 << 0x1)); + { u64 x61 = (x60 + x58); + { u64 x62 = (x47 + (x57 << 0x4)); + { u64 x63 = (x62 + (x57 << 0x1)); + { u64 x64 = (x63 + x57); + { u64 x65 = (x46 + (x56 << 0x4)); + { u64 x66 = (x65 + (x56 << 0x1)); + { u64 x67 = (x66 + x56); + { u64 x68 = (x45 + (x55 << 0x4)); + { u64 x69 = (x68 + (x55 << 0x1)); + { u64 x70 = (x69 + x55); + { u64 x71 = (x44 + (x54 << 0x4)); + { u64 x72 = (x71 + (x54 << 0x1)); + { u64 x73 = (x72 + x54); + { u64 x74 = (x43 + (x53 << 0x4)); + { u64 x75 = (x74 + (x53 << 0x1)); + { u64 x76 = (x75 + x53); + { u64 x77 = (x42 + (x52 << 0x4)); + { u64 x78 = (x77 + (x52 << 0x1)); + { u64 x79 = (x78 + x52); + { u64 x80 = (x41 + (x51 << 0x4)); + { u64 x81 = (x80 + (x51 << 0x1)); + { u64 x82 = (x81 + x51); + { u64 x83 = (x40 + (x50 << 0x4)); + { u64 x84 = (x83 + (x50 << 0x1)); + { u64 x85 = (x84 + x50); + { u64 x86 = (x85 >> 0x1a); + { u32 x87 = ((u32)x85 & 0x3ffffff); + { u64 x88 = (x86 + x82); + { u64 x89 = (x88 >> 0x19); + { u32 x90 = ((u32)x88 & 0x1ffffff); + { u64 x91 = (x89 + x79); + { u64 x92 = (x91 >> 0x1a); + { u32 x93 = ((u32)x91 & 0x3ffffff); + { u64 x94 = (x92 + x76); + { u64 x95 = (x94 >> 0x19); + { u32 x96 = ((u32)x94 & 0x1ffffff); + { u64 x97 = (x95 + x73); + { u64 x98 = (x97 >> 0x1a); + { u32 x99 = ((u32)x97 & 0x3ffffff); + { u64 x100 = (x98 + x70); + { u64 x101 = (x100 >> 0x19); + { u32 x102 = ((u32)x100 & 0x1ffffff); + { u64 x103 = (x101 + x67); + { u64 x104 = (x103 >> 0x1a); + { u32 x105 = ((u32)x103 & 0x3ffffff); + { u64 x106 = (x104 + x64); + { u64 x107 = (x106 >> 0x19); + { u32 x108 = ((u32)x106 & 0x1ffffff); + { u64 x109 = (x107 + x61); + { u64 x110 = (x109 >> 0x1a); + { u32 x111 = ((u32)x109 & 0x3ffffff); + { u64 x112 = (x110 + x49); + { u64 x113 = (x112 >> 0x19); + { u32 x114 = ((u32)x112 & 0x1ffffff); + { u64 x115 = (x87 + (0x13 * x113)); + { u32 x116 = (u32) (x115 >> 0x1a); + { u32 x117 = ((u32)x115 & 0x3ffffff); + { u32 x118 = (x116 + x90); + { u32 x119 = (x118 >> 0x19); + { u32 x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_mul121666(fe *h, const fe_loose *f) +{ + fe_mul_121666_impl(h->v, f->v); +} + +static void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], + const u8 scalar[CURVE25519_KEY_SIZE], + const u8 point[CURVE25519_KEY_SIZE]) +{ + fe x1, x2, z2, x3, z3; + fe_loose x2l, z2l, x3l; + unsigned swap = 0; + int pos; + u8 e[32]; + + memcpy(e, scalar, 32); + curve25519_clamp_secret(e); + + /* The following implementation was transcribed to Coq and proven to + * correspond to unary scalar multiplication in affine coordinates given + * that x1 != 0 is the x coordinate of some point on the curve. It was + * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives + * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was + * quantified over the underlying field, so it applies to Curve25519 + * itself and the quadratic twist of Curve25519. It was not proven in + * Coq that prime-field arithmetic correctly simulates extension-field + * arithmetic on prime-field values. The decoding of the byte array + * representation of e was not considered. + * + * Specification of Montgomery curves in affine coordinates: + * + * + * Proof that these form a group that is isomorphic to a Weierstrass + * curve: + * + * + * Coq transcription and correctness proof of the loop + * (where scalarbits=255): + * + * + * preconditions: 0 <= e < 2^255 (not necessarily e < order), + * fe_invert(0) = 0 + */ + fe_frombytes(&x1, point); + fe_1(&x2); + fe_0(&z2); + fe_copy(&x3, &x1); + fe_1(&z3); + + for (pos = 254; pos >= 0; --pos) { + fe tmp0, tmp1; + fe_loose tmp0l, tmp1l; + /* loop invariant as of right before the test, for the case + * where x1 != 0: + * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 + * is nonzero + * let r := e >> (pos+1) in the following equalities of + * projective points: + * to_xz (r*P) === if swap then (x3, z3) else (x2, z2) + * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) + * x1 is the nonzero x coordinate of the nonzero + * point (r*P-(r+1)*P) + */ + unsigned b = 1 & (e[pos / 8] >> (pos & 7)); + swap ^= b; + fe_cswap(&x2, &x3, swap); + fe_cswap(&z2, &z3, swap); + swap = b; + /* Coq transcription of ladderstep formula (called from + * transcribed loop): + * + * + * x1 != 0 + * x1 = 0 + */ + fe_sub(&tmp0l, &x3, &z3); + fe_sub(&tmp1l, &x2, &z2); + fe_add(&x2l, &x2, &z2); + fe_add(&z2l, &x3, &z3); + fe_mul_tll(&z3, &tmp0l, &x2l); + fe_mul_tll(&z2, &z2l, &tmp1l); + fe_sq_tl(&tmp0, &tmp1l); + fe_sq_tl(&tmp1, &x2l); + fe_add(&x3l, &z3, &z2); + fe_sub(&z2l, &z3, &z2); + fe_mul_ttt(&x2, &tmp1, &tmp0); + fe_sub(&tmp1l, &tmp1, &tmp0); + fe_sq_tl(&z2, &z2l); + fe_mul121666(&z3, &tmp1l); + fe_sq_tl(&x3, &x3l); + fe_add(&tmp0l, &tmp0, &z3); + fe_mul_ttt(&z3, &x1, &z2); + fe_mul_tll(&z2, &tmp1l, &tmp0l); + } + /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) + * else (x2, z2) + */ + fe_cswap(&x2, &x3, swap); + fe_cswap(&z2, &z3, swap); + + fe_invert(&z2, &z2); + fe_mul_ttt(&x2, &x2, &z2); + fe_tobytes(out, &x2); + + memzero_explicit(&x1, sizeof(x1)); + memzero_explicit(&x2, sizeof(x2)); + memzero_explicit(&z2, sizeof(z2)); + memzero_explicit(&x3, sizeof(x3)); + memzero_explicit(&z3, sizeof(z3)); + memzero_explicit(&x2l, sizeof(x2l)); + memzero_explicit(&z2l, sizeof(z2l)); + memzero_explicit(&x3l, sizeof(x3l)); + memzero_explicit(&e, sizeof(e)); +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c b/net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c new file mode 100644 index 000000000000..d6dcd0ce1892 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2016-2017 INRIA and Microsoft Corporation. + * Copyright (C) 2018-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is a machine-generated formally verified implementation of Curve25519 + * ECDH from: . Though originally machine + * generated, it has been tweaked to be suitable for use in the kernel. It is + * optimized for 64-bit machines that can efficiently work with 128-bit + * integer types. + */ + +typedef __uint128_t u128; + +static __always_inline u64 u64_eq_mask(u64 a, u64 b) +{ + u64 x = a ^ b; + u64 minus_x = ~x + (u64)1U; + u64 x_or_minus_x = x | minus_x; + u64 xnx = x_or_minus_x >> (u32)63U; + u64 c = xnx - (u64)1U; + return c; +} + +static __always_inline u64 u64_gte_mask(u64 a, u64 b) +{ + u64 x = a; + u64 y = b; + u64 x_xor_y = x ^ y; + u64 x_sub_y = x - y; + u64 x_sub_y_xor_y = x_sub_y ^ y; + u64 q = x_xor_y | x_sub_y_xor_y; + u64 x_xor_q = x ^ q; + u64 x_xor_q_ = x_xor_q >> (u32)63U; + u64 c = x_xor_q_ - (u64)1U; + return c; +} + +static __always_inline void modulo_carry_top(u64 *b) +{ + u64 b4 = b[4]; + u64 b0 = b[0]; + u64 b4_ = b4 & 0x7ffffffffffffLLU; + u64 b0_ = b0 + 19 * (b4 >> 51); + b[4] = b4_; + b[0] = b0_; +} + +static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input) +{ + { + u128 xi = input[0]; + output[0] = ((u64)(xi)); + } + { + u128 xi = input[1]; + output[1] = ((u64)(xi)); + } + { + u128 xi = input[2]; + output[2] = ((u64)(xi)); + } + { + u128 xi = input[3]; + output[3] = ((u64)(xi)); + } + { + u128 xi = input[4]; + output[4] = ((u64)(xi)); + } +} + +static __always_inline void +fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s) +{ + output[0] += (u128)input[0] * s; + output[1] += (u128)input[1] * s; + output[2] += (u128)input[2] * s; + output[3] += (u128)input[3] * s; + output[4] += (u128)input[4] * s; +} + +static __always_inline void fproduct_carry_wide_(u128 *tmp) +{ + { + u32 ctr = 0; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } + { + u32 ctr = 1; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } + + { + u32 ctr = 2; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } + { + u32 ctr = 3; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } +} + +static __always_inline void fmul_shift_reduce(u64 *output) +{ + u64 tmp = output[4]; + u64 b0; + { + u32 ctr = 5 - 0 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + { + u32 ctr = 5 - 1 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + { + u32 ctr = 5 - 2 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + { + u32 ctr = 5 - 3 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + output[0] = tmp; + b0 = output[0]; + output[0] = 19 * b0; +} + +static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input, + u64 *input21) +{ + u32 i; + u64 input2i; + { + u64 input2i = input21[0]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + { + u64 input2i = input21[1]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + { + u64 input2i = input21[2]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + { + u64 input2i = input21[3]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + i = 4; + input2i = input21[i]; + fproduct_sum_scalar_multiplication_(output, input, input2i); +} + +static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21) +{ + u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] }; + { + u128 b4; + u128 b0; + u128 b4_; + u128 b0_; + u64 i0; + u64 i1; + u64 i0_; + u64 i1_; + u128 t[5] = { 0 }; + fmul_mul_shift_reduce_(t, tmp, input21); + fproduct_carry_wide_(t); + b4 = t[4]; + b0 = t[0]; + b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); + b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); + t[4] = b4_; + t[0] = b0_; + fproduct_copy_from_wide_(output, t); + i0 = output[0]; + i1 = output[1]; + i0_ = i0 & 0x7ffffffffffffLLU; + i1_ = i1 + (i0 >> 51); + output[0] = i0_; + output[1] = i1_; + } +} + +static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output) +{ + u64 r0 = output[0]; + u64 r1 = output[1]; + u64 r2 = output[2]; + u64 r3 = output[3]; + u64 r4 = output[4]; + u64 d0 = r0 * 2; + u64 d1 = r1 * 2; + u64 d2 = r2 * 2 * 19; + u64 d419 = r4 * 19; + u64 d4 = d419 * 2; + u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) + + (((u128)(d2) * (r3)))); + u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) + + (((u128)(r3 * 19) * (r3)))); + u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) + + (((u128)(d4) * (r3)))); + u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) + + (((u128)(r4) * (d419)))); + u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) + + (((u128)(r2) * (r2)))); + tmp[0] = s0; + tmp[1] = s1; + tmp[2] = s2; + tmp[3] = s3; + tmp[4] = s4; +} + +static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output) +{ + u128 b4; + u128 b0; + u128 b4_; + u128 b0_; + u64 i0; + u64 i1; + u64 i0_; + u64 i1_; + fsquare_fsquare__(tmp, output); + fproduct_carry_wide_(tmp); + b4 = tmp[4]; + b0 = tmp[0]; + b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); + b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); + tmp[4] = b4_; + tmp[0] = b0_; + fproduct_copy_from_wide_(output, tmp); + i0 = output[0]; + i1 = output[1]; + i0_ = i0 & 0x7ffffffffffffLLU; + i1_ = i1 + (i0 >> 51); + output[0] = i0_; + output[1] = i1_; +} + +static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp, + u32 count1) +{ + u32 i; + fsquare_fsquare_(tmp, output); + for (i = 1; i < count1; ++i) + fsquare_fsquare_(tmp, output); +} + +static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input, + u32 count1) +{ + u128 t[5]; + memcpy(output, input, 5 * sizeof(*input)); + fsquare_fsquare_times_(output, t, count1); +} + +static __always_inline void fsquare_fsquare_times_inplace(u64 *output, + u32 count1) +{ + u128 t[5]; + fsquare_fsquare_times_(output, t, count1); +} + +static __always_inline void crecip_crecip(u64 *out, u64 *z) +{ + u64 buf[20] = { 0 }; + u64 *a0 = buf; + u64 *t00 = buf + 5; + u64 *b0 = buf + 10; + u64 *t01; + u64 *b1; + u64 *c0; + u64 *a; + u64 *t0; + u64 *b; + u64 *c; + fsquare_fsquare_times(a0, z, 1); + fsquare_fsquare_times(t00, a0, 2); + fmul_fmul(b0, t00, z); + fmul_fmul(a0, b0, a0); + fsquare_fsquare_times(t00, a0, 1); + fmul_fmul(b0, t00, b0); + fsquare_fsquare_times(t00, b0, 5); + t01 = buf + 5; + b1 = buf + 10; + c0 = buf + 15; + fmul_fmul(b1, t01, b1); + fsquare_fsquare_times(t01, b1, 10); + fmul_fmul(c0, t01, b1); + fsquare_fsquare_times(t01, c0, 20); + fmul_fmul(t01, t01, c0); + fsquare_fsquare_times_inplace(t01, 10); + fmul_fmul(b1, t01, b1); + fsquare_fsquare_times(t01, b1, 50); + a = buf; + t0 = buf + 5; + b = buf + 10; + c = buf + 15; + fmul_fmul(c, t0, b); + fsquare_fsquare_times(t0, c, 100); + fmul_fmul(t0, t0, c); + fsquare_fsquare_times_inplace(t0, 50); + fmul_fmul(t0, t0, b); + fsquare_fsquare_times_inplace(t0, 5); + fmul_fmul(out, t0, a); +} + +static __always_inline void fsum(u64 *a, u64 *b) +{ + a[0] += b[0]; + a[1] += b[1]; + a[2] += b[2]; + a[3] += b[3]; + a[4] += b[4]; +} + +static __always_inline void fdifference(u64 *a, u64 *b) +{ + u64 tmp[5] = { 0 }; + u64 b0; + u64 b1; + u64 b2; + u64 b3; + u64 b4; + memcpy(tmp, b, 5 * sizeof(*b)); + b0 = tmp[0]; + b1 = tmp[1]; + b2 = tmp[2]; + b3 = tmp[3]; + b4 = tmp[4]; + tmp[0] = b0 + 0x3fffffffffff68LLU; + tmp[1] = b1 + 0x3ffffffffffff8LLU; + tmp[2] = b2 + 0x3ffffffffffff8LLU; + tmp[3] = b3 + 0x3ffffffffffff8LLU; + tmp[4] = b4 + 0x3ffffffffffff8LLU; + { + u64 xi = a[0]; + u64 yi = tmp[0]; + a[0] = yi - xi; + } + { + u64 xi = a[1]; + u64 yi = tmp[1]; + a[1] = yi - xi; + } + { + u64 xi = a[2]; + u64 yi = tmp[2]; + a[2] = yi - xi; + } + { + u64 xi = a[3]; + u64 yi = tmp[3]; + a[3] = yi - xi; + } + { + u64 xi = a[4]; + u64 yi = tmp[4]; + a[4] = yi - xi; + } +} + +static __always_inline void fscalar(u64 *output, u64 *b, u64 s) +{ + u128 tmp[5]; + u128 b4; + u128 b0; + u128 b4_; + u128 b0_; + { + u64 xi = b[0]; + tmp[0] = ((u128)(xi) * (s)); + } + { + u64 xi = b[1]; + tmp[1] = ((u128)(xi) * (s)); + } + { + u64 xi = b[2]; + tmp[2] = ((u128)(xi) * (s)); + } + { + u64 xi = b[3]; + tmp[3] = ((u128)(xi) * (s)); + } + { + u64 xi = b[4]; + tmp[4] = ((u128)(xi) * (s)); + } + fproduct_carry_wide_(tmp); + b4 = tmp[4]; + b0 = tmp[0]; + b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); + b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); + tmp[4] = b4_; + tmp[0] = b0_; + fproduct_copy_from_wide_(output, tmp); +} + +static __always_inline void crecip(u64 *output, u64 *input) +{ + crecip_crecip(output, input); +} + +static __always_inline void point_swap_conditional_step(u64 *a, u64 *b, + u64 swap1, u32 ctr) +{ + u32 i = ctr - 1; + u64 ai = a[i]; + u64 bi = b[i]; + u64 x = swap1 & (ai ^ bi); + u64 ai1 = ai ^ x; + u64 bi1 = bi ^ x; + a[i] = ai1; + b[i] = bi1; +} + +static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1) +{ + point_swap_conditional_step(a, b, swap1, 5); + point_swap_conditional_step(a, b, swap1, 4); + point_swap_conditional_step(a, b, swap1, 3); + point_swap_conditional_step(a, b, swap1, 2); + point_swap_conditional_step(a, b, swap1, 1); +} + +static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap) +{ + u64 swap1 = 0 - iswap; + point_swap_conditional5(a, b, swap1); + point_swap_conditional5(a + 5, b + 5, swap1); +} + +static __always_inline void point_copy(u64 *output, u64 *input) +{ + memcpy(output, input, 5 * sizeof(*input)); + memcpy(output + 5, input + 5, 5 * sizeof(*input)); +} + +static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p, + u64 *pq, u64 *qmqp) +{ + u64 *qx = qmqp; + u64 *x2 = pp; + u64 *z2 = pp + 5; + u64 *x3 = ppq; + u64 *z3 = ppq + 5; + u64 *x = p; + u64 *z = p + 5; + u64 *xprime = pq; + u64 *zprime = pq + 5; + u64 buf[40] = { 0 }; + u64 *origx = buf; + u64 *origxprime0 = buf + 5; + u64 *xxprime0; + u64 *zzprime0; + u64 *origxprime; + xxprime0 = buf + 25; + zzprime0 = buf + 30; + memcpy(origx, x, 5 * sizeof(*x)); + fsum(x, z); + fdifference(z, origx); + memcpy(origxprime0, xprime, 5 * sizeof(*xprime)); + fsum(xprime, zprime); + fdifference(zprime, origxprime0); + fmul_fmul(xxprime0, xprime, z); + fmul_fmul(zzprime0, x, zprime); + origxprime = buf + 5; + { + u64 *xx0; + u64 *zz0; + u64 *xxprime; + u64 *zzprime; + u64 *zzzprime; + xx0 = buf + 15; + zz0 = buf + 20; + xxprime = buf + 25; + zzprime = buf + 30; + zzzprime = buf + 35; + memcpy(origxprime, xxprime, 5 * sizeof(*xxprime)); + fsum(xxprime, zzprime); + fdifference(zzprime, origxprime); + fsquare_fsquare_times(x3, xxprime, 1); + fsquare_fsquare_times(zzzprime, zzprime, 1); + fmul_fmul(z3, zzzprime, qx); + fsquare_fsquare_times(xx0, x, 1); + fsquare_fsquare_times(zz0, z, 1); + { + u64 *zzz; + u64 *xx; + u64 *zz; + u64 scalar; + zzz = buf + 10; + xx = buf + 15; + zz = buf + 20; + fmul_fmul(x2, xx, zz); + fdifference(zz, xx); + scalar = 121665; + fscalar(zzz, zz, scalar); + fsum(zzz, xx); + fmul_fmul(z2, zzz, zz); + } + } +} + +static __always_inline void +ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, + u64 *q, u8 byt) +{ + u64 bit0 = (u64)(byt >> 7); + u64 bit; + point_swap_conditional(nq, nqpq, bit0); + addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); + bit = (u64)(byt >> 7); + point_swap_conditional(nq2, nqpq2, bit); +} + +static __always_inline void +ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, + u64 *nqpq2, u64 *q, u8 byt) +{ + u8 byt1; + ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); + byt1 = byt << 1; + ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); +} + +static __always_inline void +ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, + u64 *q, u8 byt, u32 i) +{ + while (i--) { + ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, + nqpq2, q, byt); + byt <<= 2; + } +} + +static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, + u64 *nqpq, u64 *nq2, + u64 *nqpq2, u64 *q, + u32 i) +{ + while (i--) { + u8 byte = n1[i]; + ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, + byte, 4); + } +} + +static void ladder_cmult(u64 *result, u8 *n1, u64 *q) +{ + u64 point_buf[40] = { 0 }; + u64 *nq = point_buf; + u64 *nqpq = point_buf + 10; + u64 *nq2 = point_buf + 20; + u64 *nqpq2 = point_buf + 30; + point_copy(nqpq, q); + nq[0] = 1; + ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32); + point_copy(result, nq); +} + +static __always_inline void format_fexpand(u64 *output, const u8 *input) +{ + const u8 *x00 = input + 6; + const u8 *x01 = input + 12; + const u8 *x02 = input + 19; + const u8 *x0 = input + 24; + u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4; + i0 = get_unaligned_le64(input); + i1 = get_unaligned_le64(x00); + i2 = get_unaligned_le64(x01); + i3 = get_unaligned_le64(x02); + i4 = get_unaligned_le64(x0); + output0 = i0 & 0x7ffffffffffffLLU; + output1 = i1 >> 3 & 0x7ffffffffffffLLU; + output2 = i2 >> 6 & 0x7ffffffffffffLLU; + output3 = i3 >> 1 & 0x7ffffffffffffLLU; + output4 = i4 >> 12 & 0x7ffffffffffffLLU; + output[0] = output0; + output[1] = output1; + output[2] = output2; + output[3] = output3; + output[4] = output4; +} + +static __always_inline void format_fcontract_first_carry_pass(u64 *input) +{ + u64 t0 = input[0]; + u64 t1 = input[1]; + u64 t2 = input[2]; + u64 t3 = input[3]; + u64 t4 = input[4]; + u64 t1_ = t1 + (t0 >> 51); + u64 t0_ = t0 & 0x7ffffffffffffLLU; + u64 t2_ = t2 + (t1_ >> 51); + u64 t1__ = t1_ & 0x7ffffffffffffLLU; + u64 t3_ = t3 + (t2_ >> 51); + u64 t2__ = t2_ & 0x7ffffffffffffLLU; + u64 t4_ = t4 + (t3_ >> 51); + u64 t3__ = t3_ & 0x7ffffffffffffLLU; + input[0] = t0_; + input[1] = t1__; + input[2] = t2__; + input[3] = t3__; + input[4] = t4_; +} + +static __always_inline void format_fcontract_first_carry_full(u64 *input) +{ + format_fcontract_first_carry_pass(input); + modulo_carry_top(input); +} + +static __always_inline void format_fcontract_second_carry_pass(u64 *input) +{ + u64 t0 = input[0]; + u64 t1 = input[1]; + u64 t2 = input[2]; + u64 t3 = input[3]; + u64 t4 = input[4]; + u64 t1_ = t1 + (t0 >> 51); + u64 t0_ = t0 & 0x7ffffffffffffLLU; + u64 t2_ = t2 + (t1_ >> 51); + u64 t1__ = t1_ & 0x7ffffffffffffLLU; + u64 t3_ = t3 + (t2_ >> 51); + u64 t2__ = t2_ & 0x7ffffffffffffLLU; + u64 t4_ = t4 + (t3_ >> 51); + u64 t3__ = t3_ & 0x7ffffffffffffLLU; + input[0] = t0_; + input[1] = t1__; + input[2] = t2__; + input[3] = t3__; + input[4] = t4_; +} + +static __always_inline void format_fcontract_second_carry_full(u64 *input) +{ + u64 i0; + u64 i1; + u64 i0_; + u64 i1_; + format_fcontract_second_carry_pass(input); + modulo_carry_top(input); + i0 = input[0]; + i1 = input[1]; + i0_ = i0 & 0x7ffffffffffffLLU; + i1_ = i1 + (i0 >> 51); + input[0] = i0_; + input[1] = i1_; +} + +static __always_inline void format_fcontract_trim(u64 *input) +{ + u64 a0 = input[0]; + u64 a1 = input[1]; + u64 a2 = input[2]; + u64 a3 = input[3]; + u64 a4 = input[4]; + u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU); + u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU); + u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU); + u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU); + u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU); + u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4; + u64 a0_ = a0 - (0x7ffffffffffedLLU & mask); + u64 a1_ = a1 - (0x7ffffffffffffLLU & mask); + u64 a2_ = a2 - (0x7ffffffffffffLLU & mask); + u64 a3_ = a3 - (0x7ffffffffffffLLU & mask); + u64 a4_ = a4 - (0x7ffffffffffffLLU & mask); + input[0] = a0_; + input[1] = a1_; + input[2] = a2_; + input[3] = a3_; + input[4] = a4_; +} + +static __always_inline void format_fcontract_store(u8 *output, u64 *input) +{ + u64 t0 = input[0]; + u64 t1 = input[1]; + u64 t2 = input[2]; + u64 t3 = input[3]; + u64 t4 = input[4]; + u64 o0 = t1 << 51 | t0; + u64 o1 = t2 << 38 | t1 >> 13; + u64 o2 = t3 << 25 | t2 >> 26; + u64 o3 = t4 << 12 | t3 >> 39; + u8 *b0 = output; + u8 *b1 = output + 8; + u8 *b2 = output + 16; + u8 *b3 = output + 24; + put_unaligned_le64(o0, b0); + put_unaligned_le64(o1, b1); + put_unaligned_le64(o2, b2); + put_unaligned_le64(o3, b3); +} + +static __always_inline void format_fcontract(u8 *output, u64 *input) +{ + format_fcontract_first_carry_full(input); + format_fcontract_second_carry_full(input); + format_fcontract_trim(input); + format_fcontract_store(output, input); +} + +static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point) +{ + u64 *x = point; + u64 *z = point + 5; + u64 buf[10] __aligned(32) = { 0 }; + u64 *zmone = buf; + u64 *sc = buf + 5; + crecip(zmone, z); + fmul_fmul(sc, x, zmone); + format_fcontract(scalar, sc); +} + +static void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + u64 buf0[10] __aligned(32) = { 0 }; + u64 *x0 = buf0; + u64 *z = buf0 + 5; + u64 *q; + format_fexpand(x0, basepoint); + z[0] = 1; + q = buf0; + { + u8 e[32] __aligned(32) = { 0 }; + u8 *scalar; + memcpy(e, secret, 32); + curve25519_clamp_secret(e); + scalar = e; + { + u64 buf[15] = { 0 }; + u64 *nq = buf; + u64 *x = nq; + x[0] = 1; + ladder_cmult(nq, scalar, q); + format_scalar_of_point(mypublic, nq); + memzero_explicit(buf, sizeof(buf)); + } + memzero_explicit(e, sizeof(e)); + } + memzero_explicit(buf0, sizeof(buf0)); +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c new file mode 100644 index 000000000000..e08cc2ba74f3 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include + +#include "curve25519-x86_64.c" + +static bool curve25519_use_bmi2_adx __ro_after_init; +static bool *const curve25519_nobs[] __initconst = { + &curve25519_use_bmi2_adx }; + +static void __init curve25519_fpu_init(void) +{ + curve25519_use_bmi2_adx = IS_ENABLED(CONFIG_AS_BMI2) && + IS_ENABLED(CONFIG_AS_ADX) && + boot_cpu_has(X86_FEATURE_BMI2) && + boot_cpu_has(X86_FEATURE_ADX); +} + +static inline bool curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + if (IS_ENABLED(CONFIG_AS_ADX) && IS_ENABLED(CONFIG_AS_BMI2) && + curve25519_use_bmi2_adx) { + curve25519_ever64(mypublic, secret, basepoint); + return true; + } + return false; +} + +static inline bool curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + if (IS_ENABLED(CONFIG_AS_ADX) && IS_ENABLED(CONFIG_AS_BMI2) && + curve25519_use_bmi2_adx) { + curve25519_ever64_base(pub, secret); + return true; + } + return false; +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c new file mode 100644 index 000000000000..8b6872a2f0d0 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c @@ -0,0 +1,1580 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2020 Jason A. Donenfeld . All Rights Reserved. + * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation + */ + +static __always_inline u64 eq_mask(u64 a, u64 b) +{ + u64 x = a ^ b; + u64 minus_x = ~x + (u64)1U; + u64 x_or_minus_x = x | minus_x; + u64 xnx = x_or_minus_x >> (u32)63U; + return xnx - (u64)1U; +} + +static __always_inline u64 gte_mask(u64 a, u64 b) +{ + u64 x = a; + u64 y = b; + u64 x_xor_y = x ^ y; + u64 x_sub_y = x - y; + u64 x_sub_y_xor_y = x_sub_y ^ y; + u64 q = x_xor_y | x_sub_y_xor_y; + u64 x_xor_q = x ^ q; + u64 x_xor_q_ = x_xor_q >> (u32)63U; + return x_xor_q_ - (u64)1U; +} + +/* Computes the addition of four-element f1 with value in f2 + * and returns the carry (if any) */ +static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2) +{ + u64 carry_r; + + asm volatile( + /* Clear registers to propagate the carry bit */ + " xor %%r8d, %%r8d;" + " xor %%r9d, %%r9d;" + " xor %%r10d, %%r10d;" + " xor %%r11d, %%r11d;" + " xor %k1, %k1;" + + /* Begin addition chain */ + " addq 0(%3), %0;" + " movq %0, 0(%2);" + " adcxq 8(%3), %%r8;" + " movq %%r8, 8(%2);" + " adcxq 16(%3), %%r9;" + " movq %%r9, 16(%2);" + " adcxq 24(%3), %%r10;" + " movq %%r10, 24(%2);" + + /* Return the carry bit in a register */ + " adcx %%r11, %1;" + : "+&r"(f2), "=&r"(carry_r) + : "r"(out), "r"(f1) + : "%r8", "%r9", "%r10", "%r11", "memory", "cc"); + + return carry_r; +} + +/* Computes the field addition of two field elements */ +static inline void fadd(u64 *out, const u64 *f1, const u64 *f2) +{ + asm volatile( + /* Compute the raw addition of f1 + f2 */ + " movq 0(%0), %%r8;" + " addq 0(%2), %%r8;" + " movq 8(%0), %%r9;" + " adcxq 8(%2), %%r9;" + " movq 16(%0), %%r10;" + " adcxq 16(%2), %%r10;" + " movq 24(%0), %%r11;" + " adcxq 24(%2), %%r11;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute carry*38 */ + " mov $0, %%rax;" + " mov $38, %0;" + " cmovc %0, %%rax;" + + /* Step 2: Add carry*38 to the original sum */ + " xor %%ecx, %%ecx;" + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %0, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + : "+&r"(f2) + : "r"(out), "r"(f1) + : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"); +} + +/* Computes the field subtraction of two field elements */ +static inline void fsub(u64 *out, const u64 *f1, const u64 *f2) +{ + asm volatile( + /* Compute the raw subtraction of f1-f2 */ + " movq 0(%1), %%r8;" + " subq 0(%2), %%r8;" + " movq 8(%1), %%r9;" + " sbbq 8(%2), %%r9;" + " movq 16(%1), %%r10;" + " sbbq 16(%2), %%r10;" + " movq 24(%1), %%r11;" + " sbbq 24(%2), %%r11;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute carry*38 */ + " mov $0, %%rax;" + " mov $38, %%rcx;" + " cmovc %%rcx, %%rax;" + + /* Step 2: Subtract carry*38 from the original difference */ + " sub %%rax, %%r8;" + " sbb $0, %%r9;" + " sbb $0, %%r10;" + " sbb $0, %%r11;" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rcx, %%rax;" + " sub %%rax, %%r8;" + + /* Store the result */ + " movq %%r8, 0(%0);" + " movq %%r9, 8(%0);" + " movq %%r10, 16(%0);" + " movq %%r11, 24(%0);" + : + : "r"(out), "r"(f1), "r"(f2) + : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"); +} + +/* Computes a field multiplication: out <- f1 * f2 + * Uses the 8-element buffer tmp for intermediate results */ +static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) +{ + asm volatile( + + /* Compute the raw multiplication: tmp <- src1 * src2 */ + + /* Compute src1[0] * src2 */ + " movq 0(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " movq %%r8, 0(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " movq %%r10, 8(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + + /* Compute src1[1] * src2 */ + " movq 8(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 8(%2), %%r8;" + " movq %%r8, 8(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 16(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[2] * src2 */ + " movq 16(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 16(%2), %%r8;" + " movq %%r8, 16(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 24(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[3] * src2 */ + " movq 24(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 24(%2), %%r8;" + " movq %%r8, 24(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 32(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " movq %%rbx, 40(%2);" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " movq %%r14, 48(%2);" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + " movq %%rax, 56(%2);" + + /* Line up pointers */ + " mov %2, %0;" + " mov %3, %2;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %k1, %k1;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %1, %%rax;" + " adox %1, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %1, %%r9;" + " movq %%r9, 8(%2);" + " adcx %1, %%r10;" + " movq %%r10, 16(%2);" + " adcx %1, %%r11;" + " movq %%r11, 24(%2);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%2);" + : "+&r"(f1), "+&r"(f2), "+&r"(tmp) + : "r"(out) + : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", + "%r14", "memory", "cc"); +} + +/* Computes two field multiplications: + * out[0] <- f1[0] * f2[0] + * out[1] <- f1[1] * f2[1] + * Uses the 16-element buffer tmp for intermediate results: */ +static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) +{ + asm volatile( + + /* Compute the raw multiplication tmp[0] <- f1[0] * f2[0] */ + + /* Compute src1[0] * src2 */ + " movq 0(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " movq %%r8, 0(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " movq %%r10, 8(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + + /* Compute src1[1] * src2 */ + " movq 8(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 8(%2), %%r8;" + " movq %%r8, 8(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 16(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[2] * src2 */ + " movq 16(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 16(%2), %%r8;" + " movq %%r8, 16(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 24(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[3] * src2 */ + " movq 24(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 24(%2), %%r8;" + " movq %%r8, 24(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 32(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " movq %%rbx, 40(%2);" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " movq %%r14, 48(%2);" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + " movq %%rax, 56(%2);" + + /* Compute the raw multiplication tmp[1] <- f1[1] * f2[1] */ + + /* Compute src1[0] * src2 */ + " movq 32(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " movq %%r8, 64(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " movq %%r10, 72(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + + /* Compute src1[1] * src2 */ + " movq 40(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 72(%2), %%r8;" + " movq %%r8, 72(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 80(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[2] * src2 */ + " movq 48(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 80(%2), %%r8;" + " movq %%r8, 80(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 88(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[3] * src2 */ + " movq 56(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 88(%2), %%r8;" + " movq %%r8, 88(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 96(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " movq %%rbx, 104(%2);" + " mov $0, %%r8;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " movq %%r14, 112(%2);" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + " movq %%rax, 120(%2);" + + /* Line up pointers */ + " mov %2, %0;" + " mov %3, %2;" + + /* Wrap the results back into the field */ + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %k1, %k1;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %1, %%rax;" + " adox %1, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %1, %%r9;" + " movq %%r9, 8(%2);" + " adcx %1, %%r10;" + " movq %%r10, 16(%2);" + " adcx %1, %%r11;" + " movq %%r11, 24(%2);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%2);" + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 96(%0), %%r8, %%r13;" + " xor %k1, %k1;" + " adoxq 64(%0), %%r8;" + " mulxq 104(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 72(%0), %%r9;" + " mulxq 112(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 80(%0), %%r10;" + " mulxq 120(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 88(%0), %%r11;" + " adcx %1, %%rax;" + " adox %1, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %1, %%r9;" + " movq %%r9, 40(%2);" + " adcx %1, %%r10;" + " movq %%r10, 48(%2);" + " adcx %1, %%r11;" + " movq %%r11, 56(%2);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 32(%2);" + : "+&r"(f1), "+&r"(f2), "+&r"(tmp) + : "r"(out) + : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", + "%r14", "memory", "cc"); +} + +/* Computes the field multiplication of four-element f1 with value in f2 + * Requires f2 to be smaller than 2^17 */ +static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2) +{ + register u64 f2_r asm("rdx") = f2; + + asm volatile( + /* Compute the raw multiplication of f1*f2 */ + " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */ + " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */ + " add %%rcx, %%r9;" + " mov $0, %%rcx;" + " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */ + " adcx %%rbx, %%r10;" + " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */ + " adcx %%r13, %%r11;" + " adcx %%rcx, %%rax;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute carry*38 */ + " mov $38, %%rdx;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + : "+&r"(f2_r) + : "r"(out), "r"(f1) + : "%rax", "%rbx", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r13", + "memory", "cc"); +} + +/* Computes p1 <- bit ? p2 : p1 in constant time */ +static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2) +{ + asm volatile( + /* Transfer bit into CF flag */ + " add $18446744073709551615, %0;" + + /* cswap p1[0], p2[0] */ + " movq 0(%1), %%r8;" + " movq 0(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 0(%1);" + " movq %%r9, 0(%2);" + + /* cswap p1[1], p2[1] */ + " movq 8(%1), %%r8;" + " movq 8(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 8(%1);" + " movq %%r9, 8(%2);" + + /* cswap p1[2], p2[2] */ + " movq 16(%1), %%r8;" + " movq 16(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 16(%1);" + " movq %%r9, 16(%2);" + + /* cswap p1[3], p2[3] */ + " movq 24(%1), %%r8;" + " movq 24(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 24(%1);" + " movq %%r9, 24(%2);" + + /* cswap p1[4], p2[4] */ + " movq 32(%1), %%r8;" + " movq 32(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 32(%1);" + " movq %%r9, 32(%2);" + + /* cswap p1[5], p2[5] */ + " movq 40(%1), %%r8;" + " movq 40(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 40(%1);" + " movq %%r9, 40(%2);" + + /* cswap p1[6], p2[6] */ + " movq 48(%1), %%r8;" + " movq 48(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 48(%1);" + " movq %%r9, 48(%2);" + + /* cswap p1[7], p2[7] */ + " movq 56(%1), %%r8;" + " movq 56(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 56(%1);" + " movq %%r9, 56(%2);" + : "+&r"(bit) + : "r"(p1), "r"(p2) + : "%r8", "%r9", "%r10", "memory", "cc"); +} + +/* Computes the square of a field element: out <- f * f + * Uses the 8-element buffer tmp for intermediate results */ +static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) +{ + asm volatile( + /* Compute the raw multiplication: tmp <- f * f */ + + /* Step 1: Compute all partial products */ + " movq 0(%0), %%rdx;" /* f[0] */ + " mulxq 8(%0), %%r8, %%r14;" + " xor %%r15d, %%r15d;" /* f[1]*f[0] */ + " mulxq 16(%0), %%r9, %%r10;" + " adcx %%r14, %%r9;" /* f[2]*f[0] */ + " mulxq 24(%0), %%rax, %%rcx;" + " adcx %%rax, %%r10;" /* f[3]*f[0] */ + " movq 24(%0), %%rdx;" /* f[3] */ + " mulxq 8(%0), %%r11, %%rbx;" + " adcx %%rcx, %%r11;" /* f[1]*f[3] */ + " mulxq 16(%0), %%rax, %%r13;" + " adcx %%rax, %%rbx;" /* f[2]*f[3] */ + " movq 8(%0), %%rdx;" + " adcx %%r15, %%r13;" /* f1 */ + " mulxq 16(%0), %%rax, %%rcx;" + " mov $0, %%r14;" /* f[2]*f[1] */ + + /* Step 2: Compute two parallel carry chains */ + " xor %%r15d, %%r15d;" + " adox %%rax, %%r10;" + " adcx %%r8, %%r8;" + " adox %%rcx, %%r11;" + " adcx %%r9, %%r9;" + " adox %%r15, %%rbx;" + " adcx %%r10, %%r10;" + " adox %%r15, %%r13;" + " adcx %%r11, %%r11;" + " adox %%r15, %%r14;" + " adcx %%rbx, %%rbx;" + " adcx %%r13, %%r13;" + " adcx %%r14, %%r14;" + + /* Step 3: Compute intermediate squares */ + " movq 0(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ + " movq %%rax, 0(%1);" + " add %%rcx, %%r8;" + " movq %%r8, 8(%1);" + " movq 8(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ + " adcx %%rax, %%r9;" + " movq %%r9, 16(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 24(%1);" + " movq 16(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ + " adcx %%rax, %%r11;" + " movq %%r11, 32(%1);" + " adcx %%rcx, %%rbx;" + " movq %%rbx, 40(%1);" + " movq 24(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ + " adcx %%rax, %%r13;" + " movq %%r13, 48(%1);" + " adcx %%rcx, %%r14;" + " movq %%r14, 56(%1);" + + /* Line up pointers */ + " mov %1, %0;" + " mov %2, %1;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %%ecx, %%ecx;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %%rcx, %%rax;" + " adox %%rcx, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + : "+&r,&r"(f), "+&r,&r"(tmp) + : "r,m"(out) + : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", + "%r13", "%r14", "%r15", "memory", "cc"); +} + +/* Computes two field squarings: + * out[0] <- f[0] * f[0] + * out[1] <- f[1] * f[1] + * Uses the 16-element buffer tmp for intermediate results */ +static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) +{ + asm volatile( + /* Step 1: Compute all partial products */ + " movq 0(%0), %%rdx;" /* f[0] */ + " mulxq 8(%0), %%r8, %%r14;" + " xor %%r15d, %%r15d;" /* f[1]*f[0] */ + " mulxq 16(%0), %%r9, %%r10;" + " adcx %%r14, %%r9;" /* f[2]*f[0] */ + " mulxq 24(%0), %%rax, %%rcx;" + " adcx %%rax, %%r10;" /* f[3]*f[0] */ + " movq 24(%0), %%rdx;" /* f[3] */ + " mulxq 8(%0), %%r11, %%rbx;" + " adcx %%rcx, %%r11;" /* f[1]*f[3] */ + " mulxq 16(%0), %%rax, %%r13;" + " adcx %%rax, %%rbx;" /* f[2]*f[3] */ + " movq 8(%0), %%rdx;" + " adcx %%r15, %%r13;" /* f1 */ + " mulxq 16(%0), %%rax, %%rcx;" + " mov $0, %%r14;" /* f[2]*f[1] */ + + /* Step 2: Compute two parallel carry chains */ + " xor %%r15d, %%r15d;" + " adox %%rax, %%r10;" + " adcx %%r8, %%r8;" + " adox %%rcx, %%r11;" + " adcx %%r9, %%r9;" + " adox %%r15, %%rbx;" + " adcx %%r10, %%r10;" + " adox %%r15, %%r13;" + " adcx %%r11, %%r11;" + " adox %%r15, %%r14;" + " adcx %%rbx, %%rbx;" + " adcx %%r13, %%r13;" + " adcx %%r14, %%r14;" + + /* Step 3: Compute intermediate squares */ + " movq 0(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ + " movq %%rax, 0(%1);" + " add %%rcx, %%r8;" + " movq %%r8, 8(%1);" + " movq 8(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ + " adcx %%rax, %%r9;" + " movq %%r9, 16(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 24(%1);" + " movq 16(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ + " adcx %%rax, %%r11;" + " movq %%r11, 32(%1);" + " adcx %%rcx, %%rbx;" + " movq %%rbx, 40(%1);" + " movq 24(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ + " adcx %%rax, %%r13;" + " movq %%r13, 48(%1);" + " adcx %%rcx, %%r14;" + " movq %%r14, 56(%1);" + + /* Step 1: Compute all partial products */ + " movq 32(%0), %%rdx;" /* f[0] */ + " mulxq 40(%0), %%r8, %%r14;" + " xor %%r15d, %%r15d;" /* f[1]*f[0] */ + " mulxq 48(%0), %%r9, %%r10;" + " adcx %%r14, %%r9;" /* f[2]*f[0] */ + " mulxq 56(%0), %%rax, %%rcx;" + " adcx %%rax, %%r10;" /* f[3]*f[0] */ + " movq 56(%0), %%rdx;" /* f[3] */ + " mulxq 40(%0), %%r11, %%rbx;" + " adcx %%rcx, %%r11;" /* f[1]*f[3] */ + " mulxq 48(%0), %%rax, %%r13;" + " adcx %%rax, %%rbx;" /* f[2]*f[3] */ + " movq 40(%0), %%rdx;" + " adcx %%r15, %%r13;" /* f1 */ + " mulxq 48(%0), %%rax, %%rcx;" + " mov $0, %%r14;" /* f[2]*f[1] */ + + /* Step 2: Compute two parallel carry chains */ + " xor %%r15d, %%r15d;" + " adox %%rax, %%r10;" + " adcx %%r8, %%r8;" + " adox %%rcx, %%r11;" + " adcx %%r9, %%r9;" + " adox %%r15, %%rbx;" + " adcx %%r10, %%r10;" + " adox %%r15, %%r13;" + " adcx %%r11, %%r11;" + " adox %%r15, %%r14;" + " adcx %%rbx, %%rbx;" + " adcx %%r13, %%r13;" + " adcx %%r14, %%r14;" + + /* Step 3: Compute intermediate squares */ + " movq 32(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ + " movq %%rax, 64(%1);" + " add %%rcx, %%r8;" + " movq %%r8, 72(%1);" + " movq 40(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ + " adcx %%rax, %%r9;" + " movq %%r9, 80(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 88(%1);" + " movq 48(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ + " adcx %%rax, %%r11;" + " movq %%r11, 96(%1);" + " adcx %%rcx, %%rbx;" + " movq %%rbx, 104(%1);" + " movq 56(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ + " adcx %%rax, %%r13;" + " movq %%r13, 112(%1);" + " adcx %%rcx, %%r14;" + " movq %%r14, 120(%1);" + + /* Line up pointers */ + " mov %1, %0;" + " mov %2, %1;" + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %%ecx, %%ecx;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %%rcx, %%rax;" + " adox %%rcx, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 96(%0), %%r8, %%r13;" + " xor %%ecx, %%ecx;" + " adoxq 64(%0), %%r8;" + " mulxq 104(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 72(%0), %%r9;" + " mulxq 112(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 80(%0), %%r10;" + " mulxq 120(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 88(%0), %%r11;" + " adcx %%rcx, %%rax;" + " adox %%rcx, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 40(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 48(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 56(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 32(%1);" + : "+&r,&r"(f), "+&r,&r"(tmp) + : "r,m"(out) + : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", + "%r13", "%r14", "%r15", "memory", "cc"); +} + +static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2) +{ + u64 *nq = p01_tmp1; + u64 *nq_p1 = p01_tmp1 + (u32)8U; + u64 *tmp1 = p01_tmp1 + (u32)16U; + u64 *x1 = q; + u64 *x2 = nq; + u64 *z2 = nq + (u32)4U; + u64 *z3 = nq_p1 + (u32)4U; + u64 *a = tmp1; + u64 *b = tmp1 + (u32)4U; + u64 *ab = tmp1; + u64 *dc = tmp1 + (u32)8U; + u64 *x3; + u64 *z31; + u64 *d0; + u64 *c0; + u64 *a1; + u64 *b1; + u64 *d; + u64 *c; + u64 *ab1; + u64 *dc1; + fadd(a, x2, z2); + fsub(b, x2, z2); + x3 = nq_p1; + z31 = nq_p1 + (u32)4U; + d0 = dc; + c0 = dc + (u32)4U; + fadd(c0, x3, z31); + fsub(d0, x3, z31); + fmul2(dc, dc, ab, tmp2); + fadd(x3, d0, c0); + fsub(z31, d0, c0); + a1 = tmp1; + b1 = tmp1 + (u32)4U; + d = tmp1 + (u32)8U; + c = tmp1 + (u32)12U; + ab1 = tmp1; + dc1 = tmp1 + (u32)8U; + fsqr2(dc1, ab1, tmp2); + fsqr2(nq_p1, nq_p1, tmp2); + a1[0U] = c[0U]; + a1[1U] = c[1U]; + a1[2U] = c[2U]; + a1[3U] = c[3U]; + fsub(c, d, c); + fmul_scalar(b1, c, (u64)121665U); + fadd(b1, b1, d); + fmul2(nq, dc1, ab1, tmp2); + fmul(z3, z3, x1, tmp2); +} + +static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) +{ + u64 *x2 = nq; + u64 *z2 = nq + (u32)4U; + u64 *a = tmp1; + u64 *b = tmp1 + (u32)4U; + u64 *d = tmp1 + (u32)8U; + u64 *c = tmp1 + (u32)12U; + u64 *ab = tmp1; + u64 *dc = tmp1 + (u32)8U; + fadd(a, x2, z2); + fsub(b, x2, z2); + fsqr2(dc, ab, tmp2); + a[0U] = c[0U]; + a[1U] = c[1U]; + a[2U] = c[2U]; + a[3U] = c[3U]; + fsub(c, d, c); + fmul_scalar(b, c, (u64)121665U); + fadd(b, b, d); + fmul2(nq, dc, ab, tmp2); +} + +static void montgomery_ladder(u64 *out, const u8 *key, u64 *init1) +{ + u64 tmp2[16U] = { 0U }; + u64 p01_tmp1_swap[33U] = { 0U }; + u64 *p0 = p01_tmp1_swap; + u64 *p01 = p01_tmp1_swap; + u64 *p03 = p01; + u64 *p11 = p01 + (u32)8U; + u64 *x0; + u64 *z0; + u64 *p01_tmp1; + u64 *p01_tmp11; + u64 *nq10; + u64 *nq_p11; + u64 *swap1; + u64 sw0; + u64 *nq1; + u64 *tmp1; + memcpy(p11, init1, (u32)8U * sizeof(init1[0U])); + x0 = p03; + z0 = p03 + (u32)4U; + x0[0U] = (u64)1U; + x0[1U] = (u64)0U; + x0[2U] = (u64)0U; + x0[3U] = (u64)0U; + z0[0U] = (u64)0U; + z0[1U] = (u64)0U; + z0[2U] = (u64)0U; + z0[3U] = (u64)0U; + p01_tmp1 = p01_tmp1_swap; + p01_tmp11 = p01_tmp1_swap; + nq10 = p01_tmp1_swap; + nq_p11 = p01_tmp1_swap + (u32)8U; + swap1 = p01_tmp1_swap + (u32)32U; + cswap2((u64)1U, nq10, nq_p11); + point_add_and_double(init1, p01_tmp11, tmp2); + swap1[0U] = (u64)1U; + { + u32 i; + for (i = (u32)0U; i < (u32)251U; i = i + (u32)1U) { + u64 *p01_tmp12 = p01_tmp1_swap; + u64 *swap2 = p01_tmp1_swap + (u32)32U; + u64 *nq2 = p01_tmp12; + u64 *nq_p12 = p01_tmp12 + (u32)8U; + u64 bit = (u64)(key[((u32)253U - i) / (u32)8U] >> ((u32)253U - i) % (u32)8U & (u8)1U); + u64 sw = swap2[0U] ^ bit; + cswap2(sw, nq2, nq_p12); + point_add_and_double(init1, p01_tmp12, tmp2); + swap2[0U] = bit; + } + } + sw0 = swap1[0U]; + cswap2(sw0, nq10, nq_p11); + nq1 = p01_tmp1; + tmp1 = p01_tmp1 + (u32)16U; + point_double(nq1, tmp1, tmp2); + point_double(nq1, tmp1, tmp2); + point_double(nq1, tmp1, tmp2); + memcpy(out, p0, (u32)8U * sizeof(p0[0U])); + + memzero_explicit(tmp2, sizeof(tmp2)); + memzero_explicit(p01_tmp1_swap, sizeof(p01_tmp1_swap)); +} + +static void fsquare_times(u64 *o, const u64 *inp, u64 *tmp, u32 n1) +{ + u32 i; + fsqr(o, inp, tmp); + for (i = (u32)0U; i < n1 - (u32)1U; i = i + (u32)1U) + fsqr(o, o, tmp); +} + +static void finv(u64 *o, const u64 *i, u64 *tmp) +{ + u64 t1[16U] = { 0U }; + u64 *a0 = t1; + u64 *b = t1 + (u32)4U; + u64 *c = t1 + (u32)8U; + u64 *t00 = t1 + (u32)12U; + u64 *tmp1 = tmp; + u64 *a; + u64 *t0; + fsquare_times(a0, i, tmp1, (u32)1U); + fsquare_times(t00, a0, tmp1, (u32)2U); + fmul(b, t00, i, tmp); + fmul(a0, b, a0, tmp); + fsquare_times(t00, a0, tmp1, (u32)1U); + fmul(b, t00, b, tmp); + fsquare_times(t00, b, tmp1, (u32)5U); + fmul(b, t00, b, tmp); + fsquare_times(t00, b, tmp1, (u32)10U); + fmul(c, t00, b, tmp); + fsquare_times(t00, c, tmp1, (u32)20U); + fmul(t00, t00, c, tmp); + fsquare_times(t00, t00, tmp1, (u32)10U); + fmul(b, t00, b, tmp); + fsquare_times(t00, b, tmp1, (u32)50U); + fmul(c, t00, b, tmp); + fsquare_times(t00, c, tmp1, (u32)100U); + fmul(t00, t00, c, tmp); + fsquare_times(t00, t00, tmp1, (u32)50U); + fmul(t00, t00, b, tmp); + fsquare_times(t00, t00, tmp1, (u32)5U); + a = t1; + t0 = t1 + (u32)12U; + fmul(o, t0, a, tmp); +} + +static void store_felem(u64 *b, u64 *f) +{ + u64 f30 = f[3U]; + u64 top_bit0 = f30 >> (u32)63U; + u64 carry0; + u64 f31; + u64 top_bit; + u64 carry; + u64 f0; + u64 f1; + u64 f2; + u64 f3; + u64 m0; + u64 m1; + u64 m2; + u64 m3; + u64 mask; + u64 f0_; + u64 f1_; + u64 f2_; + u64 f3_; + u64 o0; + u64 o1; + u64 o2; + u64 o3; + f[3U] = f30 & (u64)0x7fffffffffffffffU; + carry0 = add_scalar(f, f, (u64)19U * top_bit0); + f31 = f[3U]; + top_bit = f31 >> (u32)63U; + f[3U] = f31 & (u64)0x7fffffffffffffffU; + carry = add_scalar(f, f, (u64)19U * top_bit); + f0 = f[0U]; + f1 = f[1U]; + f2 = f[2U]; + f3 = f[3U]; + m0 = gte_mask(f0, (u64)0xffffffffffffffedU); + m1 = eq_mask(f1, (u64)0xffffffffffffffffU); + m2 = eq_mask(f2, (u64)0xffffffffffffffffU); + m3 = eq_mask(f3, (u64)0x7fffffffffffffffU); + mask = ((m0 & m1) & m2) & m3; + f0_ = f0 - (mask & (u64)0xffffffffffffffedU); + f1_ = f1 - (mask & (u64)0xffffffffffffffffU); + f2_ = f2 - (mask & (u64)0xffffffffffffffffU); + f3_ = f3 - (mask & (u64)0x7fffffffffffffffU); + o0 = f0_; + o1 = f1_; + o2 = f2_; + o3 = f3_; + b[0U] = o0; + b[1U] = o1; + b[2U] = o2; + b[3U] = o3; +} + +static void encode_point(u8 *o, const u64 *i) +{ + const u64 *x = i; + const u64 *z = i + (u32)4U; + u64 tmp[4U] = { 0U }; + u64 tmp_w[16U] = { 0U }; + finv(tmp, z, tmp_w); + fmul(tmp, tmp, x, tmp_w); + store_felem((u64 *)o, tmp); +} + +static void curve25519_ever64(u8 *out, const u8 *priv, const u8 *pub) +{ + u64 init1[8U] = { 0U }; + u64 tmp[4U] = { 0U }; + u64 tmp3; + u64 *x; + u64 *z; + { + u32 i; + for (i = (u32)0U; i < (u32)4U; i = i + (u32)1U) { + u64 *os = tmp; + const u8 *bj = pub + i * (u32)8U; + u64 u = *(u64 *)bj; + u64 r = u; + u64 x0 = r; + os[i] = x0; + } + } + tmp3 = tmp[3U]; + tmp[3U] = tmp3 & (u64)0x7fffffffffffffffU; + x = init1; + z = init1 + (u32)4U; + z[0U] = (u64)1U; + z[1U] = (u64)0U; + z[2U] = (u64)0U; + z[3U] = (u64)0U; + x[0U] = tmp[0U]; + x[1U] = tmp[1U]; + x[2U] = tmp[2U]; + x[3U] = tmp[3U]; + montgomery_ladder(init1, priv, init1); + encode_point(out, init1); +} + +/* The below constants were generated using this sage script: + * + * #!/usr/bin/env sage + * import sys + * from sage.all import * + * def limbs(n): + * n = int(n) + * l = ((n >> 0) % 2^64, (n >> 64) % 2^64, (n >> 128) % 2^64, (n >> 192) % 2^64) + * return "0x%016xULL, 0x%016xULL, 0x%016xULL, 0x%016xULL" % l + * ec = EllipticCurve(GF(2^255 - 19), [0, 486662, 0, 1, 0]) + * p_minus_s = (ec.lift_x(9) - ec.lift_x(1))[0] + * print("static const u64 p_minus_s[] = { %s };\n" % limbs(p_minus_s)) + * print("static const u64 table_ladder[] = {") + * p = ec.lift_x(9) + * for i in range(252): + * l = (p[0] + p[2]) / (p[0] - p[2]) + * print(("\t%s" + ("," if i != 251 else "")) % limbs(l)) + * p = p * 2 + * print("};") + * + */ + +static const u64 p_minus_s[] = { 0x816b1e0137d48290ULL, 0x440f6a51eb4d1207ULL, 0x52385f46dca2b71dULL, 0x215132111d8354cbULL }; + +static const u64 table_ladder[] = { + 0xfffffffffffffff3ULL, 0xffffffffffffffffULL, 0xffffffffffffffffULL, 0x5fffffffffffffffULL, + 0x6b8220f416aafe96ULL, 0x82ebeb2b4f566a34ULL, 0xd5a9a5b075a5950fULL, 0x5142b2cf4b2488f4ULL, + 0x6aaebc750069680cULL, 0x89cf7820a0f99c41ULL, 0x2a58d9183b56d0f4ULL, 0x4b5aca80e36011a4ULL, + 0x329132348c29745dULL, 0xf4a2e616e1642fd7ULL, 0x1e45bb03ff67bc34ULL, 0x306912d0f42a9b4aULL, + 0xff886507e6af7154ULL, 0x04f50e13dfeec82fULL, 0xaa512fe82abab5ceULL, 0x174e251a68d5f222ULL, + 0xcf96700d82028898ULL, 0x1743e3370a2c02c5ULL, 0x379eec98b4e86eaaULL, 0x0c59888a51e0482eULL, + 0xfbcbf1d699b5d189ULL, 0xacaef0d58e9fdc84ULL, 0xc1c20d06231f7614ULL, 0x2938218da274f972ULL, + 0xf6af49beff1d7f18ULL, 0xcc541c22387ac9c2ULL, 0x96fcc9ef4015c56bULL, 0x69c1627c690913a9ULL, + 0x7a86fd2f4733db0eULL, 0xfdb8c4f29e087de9ULL, 0x095e4b1a8ea2a229ULL, 0x1ad7a7c829b37a79ULL, + 0x342d89cad17ea0c0ULL, 0x67bedda6cced2051ULL, 0x19ca31bf2bb42f74ULL, 0x3df7b4c84980acbbULL, + 0xa8c6444dc80ad883ULL, 0xb91e440366e3ab85ULL, 0xc215cda00164f6d8ULL, 0x3d867c6ef247e668ULL, + 0xc7dd582bcc3e658cULL, 0xfd2c4748ee0e5528ULL, 0xa0fd9b95cc9f4f71ULL, 0x7529d871b0675ddfULL, + 0xb8f568b42d3cbd78ULL, 0x1233011b91f3da82ULL, 0x2dce6ccd4a7c3b62ULL, 0x75e7fc8e9e498603ULL, + 0x2f4f13f1fcd0b6ecULL, 0xf1a8ca1f29ff7a45ULL, 0xc249c1a72981e29bULL, 0x6ebe0dbb8c83b56aULL, + 0x7114fa8d170bb222ULL, 0x65a2dcd5bf93935fULL, 0xbdc41f68b59c979aULL, 0x2f0eef79a2ce9289ULL, + 0x42ecbf0c083c37ceULL, 0x2930bc09ec496322ULL, 0xf294b0c19cfeac0dULL, 0x3780aa4bedfabb80ULL, + 0x56c17d3e7cead929ULL, 0xe7cb4beb2e5722c5ULL, 0x0ce931732dbfe15aULL, 0x41b883c7621052f8ULL, + 0xdbf75ca0c3d25350ULL, 0x2936be086eb1e351ULL, 0xc936e03cb4a9b212ULL, 0x1d45bf82322225aaULL, + 0xe81ab1036a024cc5ULL, 0xe212201c304c9a72ULL, 0xc5d73fba6832b1fcULL, 0x20ffdb5a4d839581ULL, + 0xa283d367be5d0fadULL, 0x6c2b25ca8b164475ULL, 0x9d4935467caaf22eULL, 0x5166408eee85ff49ULL, + 0x3c67baa2fab4e361ULL, 0xb3e433c67ef35cefULL, 0x5259729241159b1cULL, 0x6a621892d5b0ab33ULL, + 0x20b74a387555cdcbULL, 0x532aa10e1208923fULL, 0xeaa17b7762281dd1ULL, 0x61ab3443f05c44bfULL, + 0x257a6c422324def8ULL, 0x131c6c1017e3cf7fULL, 0x23758739f630a257ULL, 0x295a407a01a78580ULL, + 0xf8c443246d5da8d9ULL, 0x19d775450c52fa5dULL, 0x2afcfc92731bf83dULL, 0x7d10c8e81b2b4700ULL, + 0xc8e0271f70baa20bULL, 0x993748867ca63957ULL, 0x5412efb3cb7ed4bbULL, 0x3196d36173e62975ULL, + 0xde5bcad141c7dffcULL, 0x47cc8cd2b395c848ULL, 0xa34cd942e11af3cbULL, 0x0256dbf2d04ecec2ULL, + 0x875ab7e94b0e667fULL, 0xcad4dd83c0850d10ULL, 0x47f12e8f4e72c79fULL, 0x5f1a87bb8c85b19bULL, + 0x7ae9d0b6437f51b8ULL, 0x12c7ce5518879065ULL, 0x2ade09fe5cf77aeeULL, 0x23a05a2f7d2c5627ULL, + 0x5908e128f17c169aULL, 0xf77498dd8ad0852dULL, 0x74b4c4ceab102f64ULL, 0x183abadd10139845ULL, + 0xb165ba8daa92aaacULL, 0xd5c5ef9599386705ULL, 0xbe2f8f0cf8fc40d1ULL, 0x2701e635ee204514ULL, + 0x629fa80020156514ULL, 0xf223868764a8c1ceULL, 0x5b894fff0b3f060eULL, 0x60d9944cf708a3faULL, + 0xaeea001a1c7a201fULL, 0xebf16a633ee2ce63ULL, 0x6f7709594c7a07e1ULL, 0x79b958150d0208cbULL, + 0x24b55e5301d410e7ULL, 0xe3a34edff3fdc84dULL, 0xd88768e4904032d8ULL, 0x131384427b3aaeecULL, + 0x8405e51286234f14ULL, 0x14dc4739adb4c529ULL, 0xb8a2b5b250634ffdULL, 0x2fe2a94ad8a7ff93ULL, + 0xec5c57efe843faddULL, 0x2843ce40f0bb9918ULL, 0xa4b561d6cf3d6305ULL, 0x743629bde8fb777eULL, + 0x343edd46bbaf738fULL, 0xed981828b101a651ULL, 0xa401760b882c797aULL, 0x1fc223e28dc88730ULL, + 0x48604e91fc0fba0eULL, 0xb637f78f052c6fa4ULL, 0x91ccac3d09e9239cULL, 0x23f7eed4437a687cULL, + 0x5173b1118d9bd800ULL, 0x29d641b63189d4a7ULL, 0xfdbf177988bbc586ULL, 0x2959894fcad81df5ULL, + 0xaebc8ef3b4bbc899ULL, 0x4148995ab26992b9ULL, 0x24e20b0134f92cfbULL, 0x40d158894a05dee8ULL, + 0x46b00b1185af76f6ULL, 0x26bac77873187a79ULL, 0x3dc0bf95ab8fff5fULL, 0x2a608bd8945524d7ULL, + 0x26449588bd446302ULL, 0x7c4bc21c0388439cULL, 0x8e98a4f383bd11b2ULL, 0x26218d7bc9d876b9ULL, + 0xe3081542997c178aULL, 0x3c2d29a86fb6606fULL, 0x5c217736fa279374ULL, 0x7dde05734afeb1faULL, + 0x3bf10e3906d42babULL, 0xe4f7803e1980649cULL, 0xe6053bf89595bf7aULL, 0x394faf38da245530ULL, + 0x7a8efb58896928f4ULL, 0xfbc778e9cc6a113cULL, 0x72670ce330af596fULL, 0x48f222a81d3d6cf7ULL, + 0xf01fce410d72caa7ULL, 0x5a20ecc7213b5595ULL, 0x7bc21165c1fa1483ULL, 0x07f89ae31da8a741ULL, + 0x05d2c2b4c6830ff9ULL, 0xd43e330fc6316293ULL, 0xa5a5590a96d3a904ULL, 0x705edb91a65333b6ULL, + 0x048ee15e0bb9a5f7ULL, 0x3240cfca9e0aaf5dULL, 0x8f4b71ceedc4a40bULL, 0x621c0da3de544a6dULL, + 0x92872836a08c4091ULL, 0xce8375b010c91445ULL, 0x8a72eb524f276394ULL, 0x2667fcfa7ec83635ULL, + 0x7f4c173345e8752aULL, 0x061b47feee7079a5ULL, 0x25dd9afa9f86ff34ULL, 0x3780cef5425dc89cULL, + 0x1a46035a513bb4e9ULL, 0x3e1ef379ac575adaULL, 0xc78c5f1c5fa24b50ULL, 0x321a967634fd9f22ULL, + 0x946707b8826e27faULL, 0x3dca84d64c506fd0ULL, 0xc189218075e91436ULL, 0x6d9284169b3b8484ULL, + 0x3a67e840383f2ddfULL, 0x33eec9a30c4f9b75ULL, 0x3ec7c86fa783ef47ULL, 0x26ec449fbac9fbc4ULL, + 0x5c0f38cba09b9e7dULL, 0x81168cc762a3478cULL, 0x3e23b0d306fc121cULL, 0x5a238aa0a5efdcddULL, + 0x1ba26121c4ea43ffULL, 0x36f8c77f7c8832b5ULL, 0x88fbea0b0adcf99aULL, 0x5ca9938ec25bebf9ULL, + 0xd5436a5e51fccda0ULL, 0x1dbc4797c2cd893bULL, 0x19346a65d3224a08ULL, 0x0f5034e49b9af466ULL, + 0xf23c3967a1e0b96eULL, 0xe58b08fa867a4d88ULL, 0xfb2fabc6a7341679ULL, 0x2a75381eb6026946ULL, + 0xc80a3be4c19420acULL, 0x66b1f6c681f2b6dcULL, 0x7cf7036761e93388ULL, 0x25abbbd8a660a4c4ULL, + 0x91ea12ba14fd5198ULL, 0x684950fc4a3cffa9ULL, 0xf826842130f5ad28ULL, 0x3ea988f75301a441ULL, + 0xc978109a695f8c6fULL, 0x1746eb4a0530c3f3ULL, 0x444d6d77b4459995ULL, 0x75952b8c054e5cc7ULL, + 0xa3703f7915f4d6aaULL, 0x66c346202f2647d8ULL, 0xd01469df811d644bULL, 0x77fea47d81a5d71fULL, + 0xc5e9529ef57ca381ULL, 0x6eeeb4b9ce2f881aULL, 0xb6e91a28e8009bd6ULL, 0x4b80be3e9afc3fecULL, + 0x7e3773c526aed2c5ULL, 0x1b4afcb453c9a49dULL, 0xa920bdd7baffb24dULL, 0x7c54699f122d400eULL, + 0xef46c8e14fa94bc8ULL, 0xe0b074ce2952ed5eULL, 0xbea450e1dbd885d5ULL, 0x61b68649320f712cULL, + 0x8a485f7309ccbdd1ULL, 0xbd06320d7d4d1a2dULL, 0x25232973322dbef4ULL, 0x445dc4758c17f770ULL, + 0xdb0434177cc8933cULL, 0xed6fe82175ea059fULL, 0x1efebefdc053db34ULL, 0x4adbe867c65daf99ULL, + 0x3acd71a2a90609dfULL, 0xe5e991856dd04050ULL, 0x1ec69b688157c23cULL, 0x697427f6885cfe4dULL, + 0xd7be7b9b65e1a851ULL, 0xa03d28d522c536ddULL, 0x28399d658fd2b645ULL, 0x49e5b7e17c2641e1ULL, + 0x6f8c3a98700457a4ULL, 0x5078f0a25ebb6778ULL, 0xd13c3ccbc382960fULL, 0x2e003258a7df84b1ULL, + 0x8ad1f39be6296a1cULL, 0xc1eeaa652a5fbfb2ULL, 0x33ee0673fd26f3cbULL, 0x59256173a69d2cccULL, + 0x41ea07aa4e18fc41ULL, 0xd9fc19527c87a51eULL, 0xbdaacb805831ca6fULL, 0x445b652dc916694fULL, + 0xce92a3a7f2172315ULL, 0x1edc282de11b9964ULL, 0xa1823aafe04c314aULL, 0x790a2d94437cf586ULL, + 0x71c447fb93f6e009ULL, 0x8922a56722845276ULL, 0xbf70903b204f5169ULL, 0x2f7a89891ba319feULL, + 0x02a08eb577e2140cULL, 0xed9a4ed4427bdcf4ULL, 0x5253ec44e4323cd1ULL, 0x3e88363c14e9355bULL, + 0xaa66c14277110b8cULL, 0x1ae0391610a23390ULL, 0x2030bd12c93fc2a2ULL, 0x3ee141579555c7abULL, + 0x9214de3a6d6e7d41ULL, 0x3ccdd88607f17efeULL, 0x674f1288f8e11217ULL, 0x5682250f329f93d0ULL, + 0x6cf00b136d2e396eULL, 0x6e4cf86f1014debfULL, 0x5930b1b5bfcc4e83ULL, 0x047069b48aba16b6ULL, + 0x0d4ce4ab69b20793ULL, 0xb24db91a97d0fb9eULL, 0xcdfa50f54e00d01dULL, 0x221b1085368bddb5ULL, + 0xe7e59468b1e3d8d2ULL, 0x53c56563bd122f93ULL, 0xeee8a903e0663f09ULL, 0x61efa662cbbe3d42ULL, + 0x2cf8ddddde6eab2aULL, 0x9bf80ad51435f231ULL, 0x5deadacec9f04973ULL, 0x29275b5d41d29b27ULL, + 0xcfde0f0895ebf14fULL, 0xb9aab96b054905a7ULL, 0xcae80dd9a1c420fdULL, 0x0a63bf2f1673bbc7ULL, + 0x092f6e11958fbc8cULL, 0x672a81e804822fadULL, 0xcac8351560d52517ULL, 0x6f3f7722c8f192f8ULL, + 0xf8ba90ccc2e894b7ULL, 0x2c7557a438ff9f0dULL, 0x894d1d855ae52359ULL, 0x68e122157b743d69ULL, + 0xd87e5570cfb919f3ULL, 0x3f2cdecd95798db9ULL, 0x2121154710c0a2ceULL, 0x3c66a115246dc5b2ULL, + 0xcbedc562294ecb72ULL, 0xba7143c36a280b16ULL, 0x9610c2efd4078b67ULL, 0x6144735d946a4b1eULL, + 0x536f111ed75b3350ULL, 0x0211db8c2041d81bULL, 0xf93cb1000e10413cULL, 0x149dfd3c039e8876ULL, + 0xd479dde46b63155bULL, 0xb66e15e93c837976ULL, 0xdafde43b1f13e038ULL, 0x5fafda1a2e4b0b35ULL, + 0x3600bbdf17197581ULL, 0x3972050bbe3cd2c2ULL, 0x5938906dbdd5be86ULL, 0x34fce5e43f9b860fULL, + 0x75a8a4cd42d14d02ULL, 0x828dabc53441df65ULL, 0x33dcabedd2e131d3ULL, 0x3ebad76fb814d25fULL, + 0xd4906f566f70e10fULL, 0x5d12f7aa51690f5aULL, 0x45adb16e76cefcf2ULL, 0x01f768aead232999ULL, + 0x2b6cc77b6248febdULL, 0x3cd30628ec3aaffdULL, 0xce1c0b80d4ef486aULL, 0x4c3bff2ea6f66c23ULL, + 0x3f2ec4094aeaeb5fULL, 0x61b19b286e372ca7ULL, 0x5eefa966de2a701dULL, 0x23b20565de55e3efULL, + 0xe301ca5279d58557ULL, 0x07b2d4ce27c2874fULL, 0xa532cd8a9dcf1d67ULL, 0x2a52fee23f2bff56ULL, + 0x8624efb37cd8663dULL, 0xbbc7ac20ffbd7594ULL, 0x57b85e9c82d37445ULL, 0x7b3052cb86a6ec66ULL, + 0x3482f0ad2525e91eULL, 0x2cb68043d28edca0ULL, 0xaf4f6d052e1b003aULL, 0x185f8c2529781b0aULL, + 0xaa41de5bd80ce0d6ULL, 0x9407b2416853e9d6ULL, 0x563ec36e357f4c3aULL, 0x4cc4b8dd0e297bceULL, + 0xa2fc1a52ffb8730eULL, 0x1811f16e67058e37ULL, 0x10f9a366cddf4ee1ULL, 0x72f4a0c4a0b9f099ULL, + 0x8c16c06f663f4ea7ULL, 0x693b3af74e970fbaULL, 0x2102e7f1d69ec345ULL, 0x0ba53cbc968a8089ULL, + 0xca3d9dc7fea15537ULL, 0x4c6824bb51536493ULL, 0xb9886314844006b1ULL, 0x40d2a72ab454cc60ULL, + 0x5936a1b712570975ULL, 0x91b9d648debda657ULL, 0x3344094bb64330eaULL, 0x006ba10d12ee51d0ULL, + 0x19228468f5de5d58ULL, 0x0eb12f4c38cc05b0ULL, 0xa1039f9dd5601990ULL, 0x4502d4ce4fff0e0bULL, + 0xeb2054106837c189ULL, 0xd0f6544c6dd3b93cULL, 0x40727064c416d74fULL, 0x6e15c6114b502ef0ULL, + 0x4df2a398cfb1a76bULL, 0x11256c7419f2f6b1ULL, 0x4a497962066e6043ULL, 0x705b3aab41355b44ULL, + 0x365ef536d797b1d8ULL, 0x00076bd622ddf0dbULL, 0x3bbf33b0e0575a88ULL, 0x3777aa05c8e4ca4dULL, + 0x392745c85578db5fULL, 0x6fda4149dbae5ae2ULL, 0xb1f0b00b8adc9867ULL, 0x09963437d36f1da3ULL, + 0x7e824e90a5dc3853ULL, 0xccb5f6641f135cbdULL, 0x6736d86c87ce8fccULL, 0x625f3ce26604249fULL, + 0xaf8ac8059502f63fULL, 0x0c05e70a2e351469ULL, 0x35292e9c764b6305ULL, 0x1a394360c7e23ac3ULL, + 0xd5c6d53251183264ULL, 0x62065abd43c2b74fULL, 0xb5fbf5d03b973f9bULL, 0x13a3da3661206e5eULL, + 0xc6bd5837725d94e5ULL, 0x18e30912205016c5ULL, 0x2088ce1570033c68ULL, 0x7fba1f495c837987ULL, + 0x5a8c7423f2f9079dULL, 0x1735157b34023fc5ULL, 0xe4f9b49ad2fab351ULL, 0x6691ff72c878e33cULL, + 0x122c2adedc5eff3eULL, 0xf8dd4bf1d8956cf4ULL, 0xeb86205d9e9e5bdaULL, 0x049b92b9d975c743ULL, + 0xa5379730b0f6c05aULL, 0x72a0ffacc6f3a553ULL, 0xb0032c34b20dcd6dULL, 0x470e9dbc88d5164aULL, + 0xb19cf10ca237c047ULL, 0xb65466711f6c81a2ULL, 0xb3321bd16dd80b43ULL, 0x48c14f600c5fbe8eULL, + 0x66451c264aa6c803ULL, 0xb66e3904a4fa7da6ULL, 0xd45f19b0b3128395ULL, 0x31602627c3c9bc10ULL, + 0x3120dc4832e4e10dULL, 0xeb20c46756c717f7ULL, 0x00f52e3f67280294ULL, 0x566d4fc14730c509ULL, + 0x7e3a5d40fd837206ULL, 0xc1e926dc7159547aULL, 0x216730fba68d6095ULL, 0x22e8c3843f69cea7ULL, + 0x33d074e8930e4b2bULL, 0xb6e4350e84d15816ULL, 0x5534c26ad6ba2365ULL, 0x7773c12f89f1f3f3ULL, + 0x8cba404da57962aaULL, 0x5b9897a81999ce56ULL, 0x508e862f121692fcULL, 0x3a81907fa093c291ULL, + 0x0dded0ff4725a510ULL, 0x10d8cc10673fc503ULL, 0x5b9d151c9f1f4e89ULL, 0x32a5c1d5cb09a44cULL, + 0x1e0aa442b90541fbULL, 0x5f85eb7cc1b485dbULL, 0xbee595ce8a9df2e5ULL, 0x25e496c722422236ULL, + 0x5edf3c46cd0fe5b9ULL, 0x34e75a7ed2a43388ULL, 0xe488de11d761e352ULL, 0x0e878a01a085545cULL, + 0xba493c77e021bb04ULL, 0x2b4d1843c7df899aULL, 0x9ea37a487ae80d67ULL, 0x67a9958011e41794ULL, + 0x4b58051a6697b065ULL, 0x47e33f7d8d6ba6d4ULL, 0xbb4da8d483ca46c1ULL, 0x68becaa181c2db0dULL, + 0x8d8980e90b989aa5ULL, 0xf95eb14a2c93c99bULL, 0x51c6c7c4796e73a2ULL, 0x6e228363b5efb569ULL, + 0xc6bbc0b02dd624c8ULL, 0x777eb47dec8170eeULL, 0x3cde15a004cfafa9ULL, 0x1dc6bc087160bf9bULL, + 0x2e07e043eec34002ULL, 0x18e9fc677a68dc7fULL, 0xd8da03188bd15b9aULL, 0x48fbc3bb00568253ULL, + 0x57547d4cfb654ce1ULL, 0xd3565b82a058e2adULL, 0xf63eaf0bbf154478ULL, 0x47531ef114dfbb18ULL, + 0xe1ec630a4278c587ULL, 0x5507d546ca8e83f3ULL, 0x85e135c63adc0c2bULL, 0x0aa7efa85682844eULL, + 0x72691ba8b3e1f615ULL, 0x32b4e9701fbe3ffaULL, 0x97b6d92e39bb7868ULL, 0x2cfe53dea02e39e8ULL, + 0x687392cd85cd52b0ULL, 0x27ff66c910e29831ULL, 0x97134556a9832d06ULL, 0x269bb0360a84f8a0ULL, + 0x706e55457643f85cULL, 0x3734a48c9b597d1bULL, 0x7aee91e8c6efa472ULL, 0x5cd6abc198a9d9e0ULL, + 0x0e04de06cb3ce41aULL, 0xd8c6eb893402e138ULL, 0x904659bb686e3772ULL, 0x7215c371746ba8c8ULL, + 0xfd12a97eeae4a2d9ULL, 0x9514b7516394f2c5ULL, 0x266fd5809208f294ULL, 0x5c847085619a26b9ULL, + 0x52985410fed694eaULL, 0x3c905b934a2ed254ULL, 0x10bb47692d3be467ULL, 0x063b3d2d69e5e9e1ULL, + 0x472726eedda57debULL, 0xefb6c4ae10f41891ULL, 0x2b1641917b307614ULL, 0x117c554fc4f45b7cULL, + 0xc07cf3118f9d8812ULL, 0x01dbd82050017939ULL, 0xd7e803f4171b2827ULL, 0x1015e87487d225eaULL, + 0xc58de3fed23acc4dULL, 0x50db91c294a7be2dULL, 0x0b94d43d1c9cf457ULL, 0x6b1640fa6e37524aULL, + 0x692f346c5fda0d09ULL, 0x200b1c59fa4d3151ULL, 0xb8c46f760777a296ULL, 0x4b38395f3ffdfbcfULL, + 0x18d25e00be54d671ULL, 0x60d50582bec8aba6ULL, 0x87ad8f263b78b982ULL, 0x50fdf64e9cda0432ULL, + 0x90f567aac578dcf0ULL, 0xef1e9b0ef2a3133bULL, 0x0eebba9242d9de71ULL, 0x15473c9bf03101c7ULL, + 0x7c77e8ae56b78095ULL, 0xb678e7666e6f078eULL, 0x2da0b9615348ba1fULL, 0x7cf931c1ff733f0bULL, + 0x26b357f50a0a366cULL, 0xe9708cf42b87d732ULL, 0xc13aeea5f91cb2c0ULL, 0x35d90c991143bb4cULL, + 0x47c1c404a9a0d9dcULL, 0x659e58451972d251ULL, 0x3875a8c473b38c31ULL, 0x1fbd9ed379561f24ULL, + 0x11fabc6fd41ec28dULL, 0x7ef8dfe3cd2a2dcaULL, 0x72e73b5d8c404595ULL, 0x6135fa4954b72f27ULL, + 0xccfc32a2de24b69cULL, 0x3f55698c1f095d88ULL, 0xbe3350ed5ac3f929ULL, 0x5e9bf806ca477eebULL, + 0xe9ce8fb63c309f68ULL, 0x5376f63565e1f9f4ULL, 0xd1afcfb35a6393f1ULL, 0x6632a1ede5623506ULL, + 0x0b7d6c390c2ded4cULL, 0x56cb3281df04cb1fULL, 0x66305a1249ecc3c7ULL, 0x5d588b60a38ca72aULL, + 0xa6ecbf78e8e5f42dULL, 0x86eeb44b3c8a3eecULL, 0xec219c48fbd21604ULL, 0x1aaf1af517c36731ULL, + 0xc306a2836769bde7ULL, 0x208280622b1e2adbULL, 0x8027f51ffbff94a6ULL, 0x76cfa1ce1124f26bULL, + 0x18eb00562422abb6ULL, 0xf377c4d58f8c29c3ULL, 0x4dbbc207f531561aULL, 0x0253b7f082128a27ULL, + 0x3d1f091cb62c17e0ULL, 0x4860e1abd64628a9ULL, 0x52d17436309d4253ULL, 0x356f97e13efae576ULL, + 0xd351e11aa150535bULL, 0x3e6b45bb1dd878ccULL, 0x0c776128bed92c98ULL, 0x1d34ae93032885b8ULL, + 0x4ba0488ca85ba4c3ULL, 0x985348c33c9ce6ceULL, 0x66124c6f97bda770ULL, 0x0f81a0290654124aULL, + 0x9ed09ca6569b86fdULL, 0x811009fd18af9a2dULL, 0xff08d03f93d8c20aULL, 0x52a148199faef26bULL, + 0x3e03f9dc2d8d1b73ULL, 0x4205801873961a70ULL, 0xc0d987f041a35970ULL, 0x07aa1f15a1c0d549ULL, + 0xdfd46ce08cd27224ULL, 0x6d0a024f934e4239ULL, 0x808a7a6399897b59ULL, 0x0a4556e9e13d95a2ULL, + 0xd21a991fe9c13045ULL, 0x9b0e8548fe7751b8ULL, 0x5da643cb4bf30035ULL, 0x77db28d63940f721ULL, + 0xfc5eeb614adc9011ULL, 0x5229419ae8c411ebULL, 0x9ec3e7787d1dcf74ULL, 0x340d053e216e4cb5ULL, + 0xcac7af39b48df2b4ULL, 0xc0faec2871a10a94ULL, 0x140a69245ca575edULL, 0x0cf1c37134273a4cULL, + 0xc8ee306ac224b8a5ULL, 0x57eaee7ccb4930b0ULL, 0xa1e806bdaacbe74fULL, 0x7d9a62742eeb657dULL, + 0x9eb6b6ef546c4830ULL, 0x885cca1fddb36e2eULL, 0xe6b9f383ef0d7105ULL, 0x58654fef9d2e0412ULL, + 0xa905c4ffbe0e8e26ULL, 0x942de5df9b31816eULL, 0x497d723f802e88e1ULL, 0x30684dea602f408dULL, + 0x21e5a278a3e6cb34ULL, 0xaefb6e6f5b151dc4ULL, 0xb30b8e049d77ca15ULL, 0x28c3c9cf53b98981ULL, + 0x287fb721556cdd2aULL, 0x0d317ca897022274ULL, 0x7468c7423a543258ULL, 0x4a7f11464eb5642fULL, + 0xa237a4774d193aa6ULL, 0xd865986ea92129a1ULL, 0x24c515ecf87c1a88ULL, 0x604003575f39f5ebULL, + 0x47b9f189570a9b27ULL, 0x2b98cede465e4b78ULL, 0x026df551dbb85c20ULL, 0x74fcd91047e21901ULL, + 0x13e2a90a23c1bfa3ULL, 0x0cb0074e478519f6ULL, 0x5ff1cbbe3af6cf44ULL, 0x67fe5438be812dbeULL, + 0xd13cf64fa40f05b0ULL, 0x054dfb2f32283787ULL, 0x4173915b7f0d2aeaULL, 0x482f144f1f610d4eULL, + 0xf6210201b47f8234ULL, 0x5d0ae1929e70b990ULL, 0xdcd7f455b049567cULL, 0x7e93d0f1f0916f01ULL, + 0xdd79cbf18a7db4faULL, 0xbe8391bf6f74c62fULL, 0x027145d14b8291bdULL, 0x585a73ea2cbf1705ULL, + 0x485ca03e928a0db2ULL, 0x10fc01a5742857e7ULL, 0x2f482edbd6d551a7ULL, 0x0f0433b5048fdb8aULL, + 0x60da2e8dd7dc6247ULL, 0x88b4c9d38cd4819aULL, 0x13033ac001f66697ULL, 0x273b24fe3b367d75ULL, + 0xc6e8f66a31b3b9d4ULL, 0x281514a494df49d5ULL, 0xd1726fdfc8b23da7ULL, 0x4b3ae7d103dee548ULL, + 0xc6256e19ce4b9d7eULL, 0xff5c5cf186e3c61cULL, 0xacc63ca34b8ec145ULL, 0x74621888fee66574ULL, + 0x956f409645290a1eULL, 0xef0bf8e3263a962eULL, 0xed6a50eb5ec2647bULL, 0x0694283a9dca7502ULL, + 0x769b963643a2dcd1ULL, 0x42b7c8ea09fc5353ULL, 0x4f002aee13397eabULL, 0x63005e2c19b7d63aULL, + 0xca6736da63023beaULL, 0x966c7f6db12a99b7ULL, 0xace09390c537c5e1ULL, 0x0b696063a1aa89eeULL, + 0xebb03e97288c56e5ULL, 0x432a9f9f938c8be8ULL, 0xa6a5a93d5b717f71ULL, 0x1a5fb4c3e18f9d97ULL, + 0x1c94e7ad1c60cdceULL, 0xee202a43fc02c4a0ULL, 0x8dafe4d867c46a20ULL, 0x0a10263c8ac27b58ULL, + 0xd0dea9dfe4432a4aULL, 0x856af87bbe9277c5ULL, 0xce8472acc212c71aULL, 0x6f151b6d9bbb1e91ULL, + 0x26776c527ceed56aULL, 0x7d211cb7fbf8faecULL, 0x37ae66a6fd4609ccULL, 0x1f81b702d2770c42ULL, + 0x2fb0b057eac58392ULL, 0xe1dd89fe29744e9dULL, 0xc964f8eb17beb4f8ULL, 0x29571073c9a2d41eULL, + 0xa948a18981c0e254ULL, 0x2df6369b65b22830ULL, 0xa33eb2d75fcfd3c6ULL, 0x078cd6ec4199a01fULL, + 0x4a584a41ad900d2fULL, 0x32142b78e2c74c52ULL, 0x68c4e8338431c978ULL, 0x7f69ea9008689fc2ULL, + 0x52f2c81e46a38265ULL, 0xfd78072d04a832fdULL, 0x8cd7d5fa25359e94ULL, 0x4de71b7454cc29d2ULL, + 0x42eb60ad1eda6ac9ULL, 0x0aad37dfdbc09c3aULL, 0x81004b71e33cc191ULL, 0x44e6be345122803cULL, + 0x03fe8388ba1920dbULL, 0xf5d57c32150db008ULL, 0x49c8c4281af60c29ULL, 0x21edb518de701aeeULL, + 0x7fb63e418f06dc99ULL, 0xa4460d99c166d7b8ULL, 0x24dd5248ce520a83ULL, 0x5ec3ad712b928358ULL, + 0x15022a5fbd17930fULL, 0xa4f64a77d82570e3ULL, 0x12bc8d6915783712ULL, 0x498194c0fc620abbULL, + 0x38a2d9d255686c82ULL, 0x785c6bd9193e21f0ULL, 0xe4d5c81ab24a5484ULL, 0x56307860b2e20989ULL, + 0x429d55f78b4d74c4ULL, 0x22f1834643350131ULL, 0x1e60c24598c71fffULL, 0x59f2f014979983efULL, + 0x46a47d56eb494a44ULL, 0x3e22a854d636a18eULL, 0xb346e15274491c3bULL, 0x2ceafd4e5390cde7ULL, + 0xba8a8538be0d6675ULL, 0x4b9074bb50818e23ULL, 0xcbdab89085d304c3ULL, 0x61a24fe0e56192c4ULL, + 0xcb7615e6db525bcbULL, 0xdd7d8c35a567e4caULL, 0xe6b4153acafcdd69ULL, 0x2d668e097f3c9766ULL, + 0xa57e7e265ce55ef0ULL, 0x5d9f4e527cd4b967ULL, 0xfbc83606492fd1e5ULL, 0x090d52beb7c3f7aeULL, + 0x09b9515a1e7b4d7cULL, 0x1f266a2599da44c0ULL, 0xa1c49548e2c55504ULL, 0x7ef04287126f15ccULL, + 0xfed1659dbd30ef15ULL, 0x8b4ab9eec4e0277bULL, 0x884d6236a5df3291ULL, 0x1fd96ea6bf5cf788ULL, + 0x42a161981f190d9aULL, 0x61d849507e6052c1ULL, 0x9fe113bf285a2cd5ULL, 0x7c22d676dbad85d8ULL, + 0x82e770ed2bfbd27dULL, 0x4c05b2ece996f5a5ULL, 0xcd40a9c2b0900150ULL, 0x5895319213d9bf64ULL, + 0xe7cc5d703fea2e08ULL, 0xb50c491258e2188cULL, 0xcce30baa48205bf0ULL, 0x537c659ccfa32d62ULL, + 0x37b6623a98cfc088ULL, 0xfe9bed1fa4d6aca4ULL, 0x04d29b8e56a8d1b0ULL, 0x725f71c40b519575ULL, + 0x28c7f89cd0339ce6ULL, 0x8367b14469ddc18bULL, 0x883ada83a6a1652cULL, 0x585f1974034d6c17ULL, + 0x89cfb266f1b19188ULL, 0xe63b4863e7c35217ULL, 0xd88c9da6b4c0526aULL, 0x3e035c9df0954635ULL, + 0xdd9d5412fb45de9dULL, 0xdd684532e4cff40dULL, 0x4b5c999b151d671cULL, 0x2d8c2cc811e7f690ULL, + 0x7f54be1d90055d40ULL, 0xa464c5df464aaf40ULL, 0x33979624f0e917beULL, 0x2c018dc527356b30ULL, + 0xa5415024e330b3d4ULL, 0x73ff3d96691652d3ULL, 0x94ec42c4ef9b59f1ULL, 0x0747201618d08e5aULL, + 0x4d6ca48aca411c53ULL, 0x66415f2fcfa66119ULL, 0x9c4dd40051e227ffULL, 0x59810bc09a02f7ebULL, + 0x2a7eb171b3dc101dULL, 0x441c5ab99ffef68eULL, 0x32025c9b93b359eaULL, 0x5e8ce0a71e9d112fULL, + 0xbfcccb92429503fdULL, 0xd271ba752f095d55ULL, 0x345ead5e972d091eULL, 0x18c8df11a83103baULL, + 0x90cd949a9aed0f4cULL, 0xc5d1f4cb6660e37eULL, 0xb8cac52d56c52e0bULL, 0x6e42e400c5808e0dULL, + 0xa3b46966eeaefd23ULL, 0x0c4f1f0be39ecdcaULL, 0x189dc8c9d683a51dULL, 0x51f27f054c09351bULL, + 0x4c487ccd2a320682ULL, 0x587ea95bb3df1c96ULL, 0xc8ccf79e555cb8e8ULL, 0x547dc829a206d73dULL, + 0xb822a6cd80c39b06ULL, 0xe96d54732000d4c6ULL, 0x28535b6f91463b4dULL, 0x228f4660e2486e1dULL, + 0x98799538de8d3abfULL, 0x8cd8330045ebca6eULL, 0x79952a008221e738ULL, 0x4322e1a7535cd2bbULL, + 0xb114c11819d1801cULL, 0x2016e4d84f3f5ec7ULL, 0xdd0e2df409260f4cULL, 0x5ec362c0ae5f7266ULL, + 0xc0462b18b8b2b4eeULL, 0x7cc8d950274d1afbULL, 0xf25f7105436b02d2ULL, 0x43bbf8dcbff9ccd3ULL, + 0xb6ad1767a039e9dfULL, 0xb0714da8f69d3583ULL, 0x5e55fa18b42931f5ULL, 0x4ed5558f33c60961ULL, + 0x1fe37901c647a5ddULL, 0x593ddf1f8081d357ULL, 0x0249a4fd813fd7a6ULL, 0x69acca274e9caf61ULL, + 0x047ba3ea330721c9ULL, 0x83423fc20e7e1ea0ULL, 0x1df4c0af01314a60ULL, 0x09a62dab89289527ULL, + 0xa5b325a49cc6cb00ULL, 0xe94b5dc654b56cb6ULL, 0x3be28779adc994a0ULL, 0x4296e8f8ba3a4aadULL, + 0x328689761e451eabULL, 0x2e4d598bff59594aULL, 0x49b96853d7a7084aULL, 0x4980a319601420a8ULL, + 0x9565b9e12f552c42ULL, 0x8a5318db7100fe96ULL, 0x05c90b4d43add0d7ULL, 0x538b4cd66a5d4edaULL, + 0xf4e94fc3e89f039fULL, 0x592c9af26f618045ULL, 0x08a36eb5fd4b9550ULL, 0x25fffaf6c2ed1419ULL, + 0x34434459cc79d354ULL, 0xeeecbfb4b1d5476bULL, 0xddeb34a061615d99ULL, 0x5129cecceb64b773ULL, + 0xee43215894993520ULL, 0x772f9c7cf14c0b3bULL, 0xd2e2fce306bedad5ULL, 0x715f42b546f06a97ULL, + 0x434ecdceda5b5f1aULL, 0x0da17115a49741a9ULL, 0x680bd77c73edad2eULL, 0x487c02354edd9041ULL, + 0xb8efeff3a70ed9c4ULL, 0x56a32aa3e857e302ULL, 0xdf3a68bd48a2a5a0ULL, 0x07f650b73176c444ULL, + 0xe38b9b1626e0ccb1ULL, 0x79e053c18b09fb36ULL, 0x56d90319c9f94964ULL, 0x1ca941e7ac9ff5c4ULL, + 0x49c4df29162fa0bbULL, 0x8488cf3282b33305ULL, 0x95dfda14cabb437dULL, 0x3391f78264d5ad86ULL, + 0x729ae06ae2b5095dULL, 0xd58a58d73259a946ULL, 0xe9834262d13921edULL, 0x27fedafaa54bb592ULL, + 0xa99dc5b829ad48bbULL, 0x5f025742499ee260ULL, 0x802c8ecd5d7513fdULL, 0x78ceb3ef3f6dd938ULL, + 0xc342f44f8a135d94ULL, 0x7b9edb44828cdda3ULL, 0x9436d11a0537cfe7ULL, 0x5064b164ec1ab4c8ULL, + 0x7020eccfd37eb2fcULL, 0x1f31ea3ed90d25fcULL, 0x1b930d7bdfa1bb34ULL, 0x5344467a48113044ULL, + 0x70073170f25e6dfbULL, 0xe385dc1a50114cc8ULL, 0x2348698ac8fc4f00ULL, 0x2a77a55284dd40d8ULL, + 0xfe06afe0c98c6ce4ULL, 0xc235df96dddfd6e4ULL, 0x1428d01e33bf1ed3ULL, 0x785768ec9300bdafULL, + 0x9702e57a91deb63bULL, 0x61bdb8bfe5ce8b80ULL, 0x645b426f3d1d58acULL, 0x4804a82227a557bcULL, + 0x8e57048ab44d2601ULL, 0x68d6501a4b3a6935ULL, 0xc39c9ec3f9e1c293ULL, 0x4172f257d4de63e2ULL, + 0xd368b450330c6401ULL, 0x040d3017418f2391ULL, 0x2c34bb6090b7d90dULL, 0x16f649228fdfd51fULL, + 0xbea6818e2b928ef5ULL, 0xe28ccf91cdc11e72ULL, 0x594aaa68e77a36cdULL, 0x313034806c7ffd0fULL, + 0x8a9d27ac2249bd65ULL, 0x19a3b464018e9512ULL, 0xc26ccff352b37ec7ULL, 0x056f68341d797b21ULL, + 0x5e79d6757efd2327ULL, 0xfabdbcb6553afe15ULL, 0xd3e7222c6eaf5a60ULL, 0x7046c76d4dae743bULL, + 0x660be872b18d4a55ULL, 0x19992518574e1496ULL, 0xc103053a302bdcbbULL, 0x3ed8e9800b218e8eULL, + 0x7b0b9239fa75e03eULL, 0xefe9fb684633c083ULL, 0x98a35fbe391a7793ULL, 0x6065510fe2d0fe34ULL, + 0x55cb668548abad0cULL, 0xb4584548da87e527ULL, 0x2c43ecea0107c1ddULL, 0x526028809372de35ULL, + 0x3415c56af9213b1fULL, 0x5bee1a4d017e98dbULL, 0x13f6b105b5cf709bULL, 0x5ff20e3482b29ab6ULL, + 0x0aa29c75cc2e6c90ULL, 0xfc7d73ca3a70e206ULL, 0x899fc38fc4b5c515ULL, 0x250386b124ffc207ULL, + 0x54ea28d5ae3d2b56ULL, 0x9913149dd6de60ceULL, 0x16694fc58f06d6c1ULL, 0x46b23975eb018fc7ULL, + 0x470a6a0fb4b7b4e2ULL, 0x5d92475a8f7253deULL, 0xabeee5b52fbd3adbULL, 0x7fa20801a0806968ULL, + 0x76f3faf19f7714d2ULL, 0xb3e840c12f4660c3ULL, 0x0fb4cd8df212744eULL, 0x4b065a251d3a2dd2ULL, + 0x5cebde383d77cd4aULL, 0x6adf39df882c9cb1ULL, 0xa2dd242eb09af759ULL, 0x3147c0e50e5f6422ULL, + 0x164ca5101d1350dbULL, 0xf8d13479c33fc962ULL, 0xe640ce4d13e5da08ULL, 0x4bdee0c45061f8baULL, + 0xd7c46dc1a4edb1c9ULL, 0x5514d7b6437fd98aULL, 0x58942f6bb2a1c00bULL, 0x2dffb2ab1d70710eULL, + 0xccdfcf2fc18b6d68ULL, 0xa8ebcba8b7806167ULL, 0x980697f95e2937e3ULL, 0x02fbba1cd0126e8cULL +}; + +static void curve25519_ever64_base(u8 *out, const u8 *priv) +{ + u64 swap = 1; + int i, j, k; + u64 tmp[16 + 32 + 4]; + u64 *x1 = &tmp[0]; + u64 *z1 = &tmp[4]; + u64 *x2 = &tmp[8]; + u64 *z2 = &tmp[12]; + u64 *xz1 = &tmp[0]; + u64 *xz2 = &tmp[8]; + u64 *a = &tmp[0 + 16]; + u64 *b = &tmp[4 + 16]; + u64 *c = &tmp[8 + 16]; + u64 *ab = &tmp[0 + 16]; + u64 *abcd = &tmp[0 + 16]; + u64 *ef = &tmp[16 + 16]; + u64 *efgh = &tmp[16 + 16]; + u64 *key = &tmp[0 + 16 + 32]; + + memcpy(key, priv, 32); + ((u8 *)key)[0] &= 248; + ((u8 *)key)[31] = (((u8 *)key)[31] & 127) | 64; + + x1[0] = 1, x1[1] = x1[2] = x1[3] = 0; + z1[0] = 1, z1[1] = z1[2] = z1[3] = 0; + z2[0] = 1, z2[1] = z2[2] = z2[3] = 0; + memcpy(x2, p_minus_s, sizeof(p_minus_s)); + + j = 3; + for (i = 0; i < 4; ++i) { + while (j < (const int[]){ 64, 64, 64, 63 }[i]) { + u64 bit = (key[i] >> j) & 1; + k = (64 * i + j - 3); + swap = swap ^ bit; + cswap2(swap, xz1, xz2); + swap = bit; + fsub(b, x1, z1); + fadd(a, x1, z1); + fmul(c, &table_ladder[4 * k], b, ef); + fsub(b, a, c); + fadd(a, a, c); + fsqr2(ab, ab, efgh); + fmul2(xz1, xz2, ab, efgh); + ++j; + } + j = 0; + } + + point_double(xz1, abcd, efgh); + point_double(xz1, abcd, efgh); + point_double(xz1, abcd, efgh); + encode_point(out, xz1); + + memzero_explicit(tmp, sizeof(tmp)); +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519.c b/net/wireguard/crypto/zinc/curve25519/curve25519.c new file mode 100644 index 000000000000..dffaa09c18db --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is an implementation of the Curve25519 ECDH algorithm, using either + * a 32-bit implementation or a 64-bit implementation with 128-bit integers, + * depending on what is supported by the target compiler. + * + * Information: https://cr.yp.to/ecdh.html + */ + +#include +#include "../selftest/run.h" + +#include +#include +#include +#include +#include +#include // For crypto_memneq. + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "curve25519-x86_64-glue.c" +#elif defined(CONFIG_ZINC_ARCH_ARM) +#include "curve25519-arm-glue.c" +#else +static bool *const curve25519_nobs[] __initconst = { }; +static void __init curve25519_fpu_init(void) +{ +} +static inline bool curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + return false; +} +static inline bool curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + return false; +} +#endif + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) +#include "curve25519-hacl64.c" +#else +#include "curve25519-fiat32.c" +#endif + +static const u8 null_point[CURVE25519_KEY_SIZE] = { 0 }; + +bool curve25519(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + if (!curve25519_arch(mypublic, secret, basepoint)) + curve25519_generic(mypublic, secret, basepoint); + return crypto_memneq(mypublic, null_point, CURVE25519_KEY_SIZE); +} + +bool curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + static const u8 basepoint[CURVE25519_KEY_SIZE] __aligned(32) = { 9 }; + + if (unlikely(!crypto_memneq(secret, null_point, CURVE25519_KEY_SIZE))) + return false; + + if (curve25519_base_arch(pub, secret)) + return crypto_memneq(pub, null_point, CURVE25519_KEY_SIZE); + return curve25519(pub, secret, basepoint); +} + +void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + get_random_bytes_wait(secret, CURVE25519_KEY_SIZE); + curve25519_clamp_secret(secret); +} + +#include "../selftest/curve25519.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init curve25519_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + curve25519_fpu_init(); + if (!selftest_run("curve25519", curve25519_selftest, curve25519_nobs, + ARRAY_SIZE(curve25519_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Curve25519 scalar multiplication"); +MODULE_AUTHOR("Jason A. Donenfeld "); +#endif diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c b/net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c new file mode 100644 index 000000000000..291fe4ba98b0 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include + +asmlinkage void poly1305_init_arm(void *ctx, const u8 key[16]); +asmlinkage void poly1305_blocks_arm(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_emit_arm(void *ctx, u8 mac[16], const u32 nonce[4]); +asmlinkage void poly1305_blocks_neon(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_emit_neon(void *ctx, u8 mac[16], const u32 nonce[4]); + +static bool poly1305_use_neon __ro_after_init; +static bool *const poly1305_nobs[] __initconst = { &poly1305_use_neon }; + +static void __init poly1305_fpu_init(void) +{ +#if defined(CONFIG_ZINC_ARCH_ARM64) + poly1305_use_neon = cpu_have_named_feature(ASIMD); +#elif defined(CONFIG_ZINC_ARCH_ARM) + poly1305_use_neon = elf_hwcap & HWCAP_NEON; +#endif +} + +#if defined(CONFIG_ZINC_ARCH_ARM64) +struct poly1305_arch_internal { + union { + u32 h[5]; + struct { + u64 h0, h1, h2; + }; + }; + u64 is_base2_26; + u64 r[2]; +}; +#elif defined(CONFIG_ZINC_ARCH_ARM) +struct poly1305_arch_internal { + union { + u32 h[5]; + struct { + u64 h0, h1; + u32 h2; + } __packed; + }; + u32 r[4]; + u32 is_base2_26; +}; +#endif + +/* The NEON code uses base 2^26, while the scalar code uses base 2^64 on 64-bit + * and base 2^32 on 32-bit. If we hit the unfortunate situation of using NEON + * and then having to go back to scalar -- because the user is silly and has + * called the update function from two separate contexts -- then we need to + * convert back to the original base before proceeding. The below function is + * written for 64-bit integers, and so we have to swap words at the end on + * big-endian 32-bit. It is possible to reason that the initial reduction below + * is sufficient given the implementation invariants. However, for an avoidance + * of doubt and because this is not performance critical, we do the full + * reduction anyway. + */ +static void convert_to_base2_64(void *ctx) +{ + struct poly1305_arch_internal *state = ctx; + u32 cy; + + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !state->is_base2_26) + return; + + cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy; + cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy; + cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy; + cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy; + state->h0 = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0]; + state->h1 = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12); + state->h2 = state->h[4] >> 24; + if (IS_ENABLED(CONFIG_ZINC_ARCH_ARM) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { + state->h0 = rol64(state->h0, 32); + state->h1 = rol64(state->h1, 32); + } +#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) + cy = (state->h2 >> 2) + (state->h2 & ~3ULL); + state->h2 &= 3; + state->h0 += cy; + state->h1 += (cy = ULT(state->h0, cy)); + state->h2 += ULT(state->h1, cy); +#undef ULT + state->is_base2_26 = 0; +} + +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + poly1305_init_arm(ctx, key); + return true; +} + +static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || + PAGE_SIZE % POLY1305_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !poly1305_use_neon || + !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_blocks_arm(ctx, inp, len, padbit); + return true; + } + + for (;;) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + poly1305_blocks_neon(ctx, inp, bytes, padbit); + len -= bytes; + if (!len) + break; + inp += bytes; + simd_relax(simd_context); + } + return true; +} + +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !poly1305_use_neon || + !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_emit_arm(ctx, mac, nonce); + } else + poly1305_emit_neon(ctx, mac, nonce); + return true; +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl new file mode 100644 index 000000000000..468f41b76fbd --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl @@ -0,0 +1,1276 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# IALU(*)/gcc-4.4 NEON +# +# ARM11xx(ARMv6) 7.78/+100% - +# Cortex-A5 6.35/+130% 3.00 +# Cortex-A8 6.25/+115% 2.36 +# Cortex-A9 5.10/+95% 2.55 +# Cortex-A15 3.85/+85% 1.25(**) +# Snapdragon S4 5.70/+100% 1.48(**) +# +# (*) this is for -march=armv6, i.e. with bunch of ldrb loading data; +# (**) these are trade-off results, they can be improved by ~8% but at +# the cost of 15/12% regression on Cortex-A5/A7, it's even possible +# to improve Cortex-A9 result, but then A5/A7 loose more than 20%; + +$flavour = shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +($ctx,$inp,$len,$padbit)=map("r$_",(0..3)); + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +#else +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ +# define poly1305_init poly1305_init_arm +# define poly1305_blocks poly1305_blocks_arm +# define poly1305_emit poly1305_emit_arm +#endif + +.text +#if defined(__thumb2__) +.syntax unified +.thumb +#else +.code 32 +#endif + +.globl poly1305_emit +.globl poly1305_blocks +.globl poly1305_init +.type poly1305_init,%function +.align 5 +poly1305_init: +.Lpoly1305_init: + stmdb sp!,{r4-r11} + + eor r3,r3,r3 + cmp $inp,#0 + str r3,[$ctx,#0] @ zero hash value + str r3,[$ctx,#4] + str r3,[$ctx,#8] + str r3,[$ctx,#12] + str r3,[$ctx,#16] + str r3,[$ctx,#36] @ is_base2_26 + add $ctx,$ctx,#20 + +#ifdef __thumb2__ + it eq +#endif + moveq r0,#0 + beq .Lno_key + +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + adr r11,.Lpoly1305_init + ldr r12,.LOPENSSL_armcap +#endif + ldrb r4,[$inp,#0] + mov r10,#0x0fffffff + ldrb r5,[$inp,#1] + and r3,r10,#-4 @ 0x0ffffffc + ldrb r6,[$inp,#2] + ldrb r7,[$inp,#3] + orr r4,r4,r5,lsl#8 + ldrb r5,[$inp,#4] + orr r4,r4,r6,lsl#16 + ldrb r6,[$inp,#5] + orr r4,r4,r7,lsl#24 + ldrb r7,[$inp,#6] + and r4,r4,r10 + +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + ldr r12,[r11,r12] @ OPENSSL_armcap_P +# ifdef __APPLE__ + ldr r12,[r12] +# endif +#endif + ldrb r8,[$inp,#7] + orr r5,r5,r6,lsl#8 + ldrb r6,[$inp,#8] + orr r5,r5,r7,lsl#16 + ldrb r7,[$inp,#9] + orr r5,r5,r8,lsl#24 + ldrb r8,[$inp,#10] + and r5,r5,r3 + +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + tst r12,#ARMV7_NEON @ check for NEON +# ifdef __APPLE__ + adr r9,poly1305_blocks_neon + adr r11,poly1305_blocks +# ifdef __thumb2__ + it ne +# endif + movne r11,r9 + adr r12,poly1305_emit + adr r10,poly1305_emit_neon +# ifdef __thumb2__ + it ne +# endif + movne r12,r10 +# else +# ifdef __thumb2__ + itete eq +# endif + addeq r12,r11,#(poly1305_emit-.Lpoly1305_init) + addne r12,r11,#(poly1305_emit_neon-.Lpoly1305_init) + addeq r11,r11,#(poly1305_blocks-.Lpoly1305_init) + addne r11,r11,#(poly1305_blocks_neon-.Lpoly1305_init) +# endif +# ifdef __thumb2__ + orr r12,r12,#1 @ thumb-ify address + orr r11,r11,#1 +# endif +#endif + ldrb r9,[$inp,#11] + orr r6,r6,r7,lsl#8 + ldrb r7,[$inp,#12] + orr r6,r6,r8,lsl#16 + ldrb r8,[$inp,#13] + orr r6,r6,r9,lsl#24 + ldrb r9,[$inp,#14] + and r6,r6,r3 + + ldrb r10,[$inp,#15] + orr r7,r7,r8,lsl#8 + str r4,[$ctx,#0] + orr r7,r7,r9,lsl#16 + str r5,[$ctx,#4] + orr r7,r7,r10,lsl#24 + str r6,[$ctx,#8] + and r7,r7,r3 + str r7,[$ctx,#12] +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + stmia r2,{r11,r12} @ fill functions table + mov r0,#1 +#else + mov r0,#0 +#endif +.Lno_key: + ldmia sp!,{r4-r11} +#if __ARM_ARCH__>=5 + ret @ bx lr +#else + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size poly1305_init,.-poly1305_init +___ +{ +my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12)); +my ($s1,$s2,$s3)=($r1,$r2,$r3); + +$code.=<<___; +.type poly1305_blocks,%function +.align 5 +poly1305_blocks: +.Lpoly1305_blocks: + stmdb sp!,{r3-r11,lr} + + ands $len,$len,#-16 + beq .Lno_data + + cmp $padbit,#0 + add $len,$len,$inp @ end pointer + sub sp,sp,#32 + + ldmia $ctx,{$h0-$r3} @ load context + + str $ctx,[sp,#12] @ offload stuff + mov lr,$inp + str $len,[sp,#16] + str $r1,[sp,#20] + str $r2,[sp,#24] + str $r3,[sp,#28] + b .Loop + +.Loop: +#if __ARM_ARCH__<7 + ldrb r0,[lr],#16 @ load input +# ifdef __thumb2__ + it hi +# endif + addhi $h4,$h4,#1 @ 1<<128 + ldrb r1,[lr,#-15] + ldrb r2,[lr,#-14] + ldrb r3,[lr,#-13] + orr r1,r0,r1,lsl#8 + ldrb r0,[lr,#-12] + orr r2,r1,r2,lsl#16 + ldrb r1,[lr,#-11] + orr r3,r2,r3,lsl#24 + ldrb r2,[lr,#-10] + adds $h0,$h0,r3 @ accumulate input + + ldrb r3,[lr,#-9] + orr r1,r0,r1,lsl#8 + ldrb r0,[lr,#-8] + orr r2,r1,r2,lsl#16 + ldrb r1,[lr,#-7] + orr r3,r2,r3,lsl#24 + ldrb r2,[lr,#-6] + adcs $h1,$h1,r3 + + ldrb r3,[lr,#-5] + orr r1,r0,r1,lsl#8 + ldrb r0,[lr,#-4] + orr r2,r1,r2,lsl#16 + ldrb r1,[lr,#-3] + orr r3,r2,r3,lsl#24 + ldrb r2,[lr,#-2] + adcs $h2,$h2,r3 + + ldrb r3,[lr,#-1] + orr r1,r0,r1,lsl#8 + str lr,[sp,#8] @ offload input pointer + orr r2,r1,r2,lsl#16 + add $s1,$r1,$r1,lsr#2 + orr r3,r2,r3,lsl#24 +#else + ldr r0,[lr],#16 @ load input +# ifdef __thumb2__ + it hi +# endif + addhi $h4,$h4,#1 @ padbit + ldr r1,[lr,#-12] + ldr r2,[lr,#-8] + ldr r3,[lr,#-4] +# ifdef __ARMEB__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +# endif + adds $h0,$h0,r0 @ accumulate input + str lr,[sp,#8] @ offload input pointer + adcs $h1,$h1,r1 + add $s1,$r1,$r1,lsr#2 + adcs $h2,$h2,r2 +#endif + add $s2,$r2,$r2,lsr#2 + adcs $h3,$h3,r3 + add $s3,$r3,$r3,lsr#2 + + umull r2,r3,$h1,$r0 + adc $h4,$h4,#0 + umull r0,r1,$h0,$r0 + umlal r2,r3,$h4,$s1 + umlal r0,r1,$h3,$s1 + ldr $r1,[sp,#20] @ reload $r1 + umlal r2,r3,$h2,$s3 + umlal r0,r1,$h1,$s3 + umlal r2,r3,$h3,$s2 + umlal r0,r1,$h2,$s2 + umlal r2,r3,$h0,$r1 + str r0,[sp,#0] @ future $h0 + mul r0,$s2,$h4 + ldr $r2,[sp,#24] @ reload $r2 + adds r2,r2,r1 @ d1+=d0>>32 + eor r1,r1,r1 + adc lr,r3,#0 @ future $h2 + str r2,[sp,#4] @ future $h1 + + mul r2,$s3,$h4 + eor r3,r3,r3 + umlal r0,r1,$h3,$s3 + ldr $r3,[sp,#28] @ reload $r3 + umlal r2,r3,$h3,$r0 + umlal r0,r1,$h2,$r0 + umlal r2,r3,$h2,$r1 + umlal r0,r1,$h1,$r1 + umlal r2,r3,$h1,$r2 + umlal r0,r1,$h0,$r2 + umlal r2,r3,$h0,$r3 + ldr $h0,[sp,#0] + mul $h4,$r0,$h4 + ldr $h1,[sp,#4] + + adds $h2,lr,r0 @ d2+=d1>>32 + ldr lr,[sp,#8] @ reload input pointer + adc r1,r1,#0 + adds $h3,r2,r1 @ d3+=d2>>32 + ldr r0,[sp,#16] @ reload end pointer + adc r3,r3,#0 + add $h4,$h4,r3 @ h4+=d3>>32 + + and r1,$h4,#-4 + and $h4,$h4,#3 + add r1,r1,r1,lsr#2 @ *=5 + adds $h0,$h0,r1 + adcs $h1,$h1,#0 + adcs $h2,$h2,#0 + adcs $h3,$h3,#0 + adc $h4,$h4,#0 + + cmp r0,lr @ done yet? + bhi .Loop + + ldr $ctx,[sp,#12] + add sp,sp,#32 + stmia $ctx,{$h0-$h4} @ store the result + +.Lno_data: +#if __ARM_ARCH__>=5 + ldmia sp!,{r3-r11,pc} +#else + ldmia sp!,{r3-r11,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size poly1305_blocks,.-poly1305_blocks +___ +} +{ +my ($ctx,$mac,$nonce)=map("r$_",(0..2)); +my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11)); +my $g4=$h4; + +$code.=<<___; +.type poly1305_emit,%function +.align 5 +poly1305_emit: + stmdb sp!,{r4-r11} +.Lpoly1305_emit_enter: + + ldmia $ctx,{$h0-$h4} + adds $g0,$h0,#5 @ compare to modulus + adcs $g1,$h1,#0 + adcs $g2,$h2,#0 + adcs $g3,$h3,#0 + adc $g4,$h4,#0 + tst $g4,#4 @ did it carry/borrow? + +#ifdef __thumb2__ + it ne +#endif + movne $h0,$g0 + ldr $g0,[$nonce,#0] +#ifdef __thumb2__ + it ne +#endif + movne $h1,$g1 + ldr $g1,[$nonce,#4] +#ifdef __thumb2__ + it ne +#endif + movne $h2,$g2 + ldr $g2,[$nonce,#8] +#ifdef __thumb2__ + it ne +#endif + movne $h3,$g3 + ldr $g3,[$nonce,#12] + + adds $h0,$h0,$g0 + adcs $h1,$h1,$g1 + adcs $h2,$h2,$g2 + adc $h3,$h3,$g3 + +#if __ARM_ARCH__>=7 +# ifdef __ARMEB__ + rev $h0,$h0 + rev $h1,$h1 + rev $h2,$h2 + rev $h3,$h3 +# endif + str $h0,[$mac,#0] + str $h1,[$mac,#4] + str $h2,[$mac,#8] + str $h3,[$mac,#12] +#else + strb $h0,[$mac,#0] + mov $h0,$h0,lsr#8 + strb $h1,[$mac,#4] + mov $h1,$h1,lsr#8 + strb $h2,[$mac,#8] + mov $h2,$h2,lsr#8 + strb $h3,[$mac,#12] + mov $h3,$h3,lsr#8 + + strb $h0,[$mac,#1] + mov $h0,$h0,lsr#8 + strb $h1,[$mac,#5] + mov $h1,$h1,lsr#8 + strb $h2,[$mac,#9] + mov $h2,$h2,lsr#8 + strb $h3,[$mac,#13] + mov $h3,$h3,lsr#8 + + strb $h0,[$mac,#2] + mov $h0,$h0,lsr#8 + strb $h1,[$mac,#6] + mov $h1,$h1,lsr#8 + strb $h2,[$mac,#10] + mov $h2,$h2,lsr#8 + strb $h3,[$mac,#14] + mov $h3,$h3,lsr#8 + + strb $h0,[$mac,#3] + strb $h1,[$mac,#7] + strb $h2,[$mac,#11] + strb $h3,[$mac,#15] +#endif + ldmia sp!,{r4-r11} +#if __ARM_ARCH__>=5 + ret @ bx lr +#else + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size poly1305_emit,.-poly1305_emit +___ +{ +my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9)); +my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14)); +my ($T0,$T1,$MASK) = map("q$_",(15,4,0)); + +my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7)); + +$code.=<<___; +#if (defined(__KERNEL__) && defined(CONFIG_KERNEL_MODE_NEON)) || (!defined(__KERNEL__) && __ARM_MAX_ARCH__>=7) +.fpu neon + +.type poly1305_init_neon,%function +.align 5 +poly1305_init_neon: +.Lpoly1305_init_neon: + ldr r4,[$ctx,#20] @ load key base 2^32 + ldr r5,[$ctx,#24] + ldr r6,[$ctx,#28] + ldr r7,[$ctx,#32] + + and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 + mov r3,r4,lsr#26 + mov r4,r5,lsr#20 + orr r3,r3,r5,lsl#6 + mov r5,r6,lsr#14 + orr r4,r4,r6,lsl#12 + mov r6,r7,lsr#8 + orr r5,r5,r7,lsl#18 + and r3,r3,#0x03ffffff + and r4,r4,#0x03ffffff + and r5,r5,#0x03ffffff + + vdup.32 $R0,r2 @ r^1 in both lanes + add r2,r3,r3,lsl#2 @ *5 + vdup.32 $R1,r3 + add r3,r4,r4,lsl#2 + vdup.32 $S1,r2 + vdup.32 $R2,r4 + add r4,r5,r5,lsl#2 + vdup.32 $S2,r3 + vdup.32 $R3,r5 + add r5,r6,r6,lsl#2 + vdup.32 $S3,r4 + vdup.32 $R4,r6 + vdup.32 $S4,r5 + + mov $zeros,#2 @ counter + +.Lsquare_neon: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + + vmull.u32 $D0,$R0,${R0}[1] + vmull.u32 $D1,$R1,${R0}[1] + vmull.u32 $D2,$R2,${R0}[1] + vmull.u32 $D3,$R3,${R0}[1] + vmull.u32 $D4,$R4,${R0}[1] + + vmlal.u32 $D0,$R4,${S1}[1] + vmlal.u32 $D1,$R0,${R1}[1] + vmlal.u32 $D2,$R1,${R1}[1] + vmlal.u32 $D3,$R2,${R1}[1] + vmlal.u32 $D4,$R3,${R1}[1] + + vmlal.u32 $D0,$R3,${S2}[1] + vmlal.u32 $D1,$R4,${S2}[1] + vmlal.u32 $D3,$R1,${R2}[1] + vmlal.u32 $D2,$R0,${R2}[1] + vmlal.u32 $D4,$R2,${R2}[1] + + vmlal.u32 $D0,$R2,${S3}[1] + vmlal.u32 $D3,$R0,${R3}[1] + vmlal.u32 $D1,$R3,${S3}[1] + vmlal.u32 $D2,$R4,${S3}[1] + vmlal.u32 $D4,$R1,${R3}[1] + + vmlal.u32 $D3,$R4,${S4}[1] + vmlal.u32 $D0,$R1,${S4}[1] + vmlal.u32 $D1,$R2,${S4}[1] + vmlal.u32 $D2,$R3,${S4}[1] + vmlal.u32 $D4,$R0,${R4}[1] + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein + @ and P. Schwabe + @ + @ H0>>+H1>>+H2>>+H3>>+H4 + @ H3>>+H4>>*5+H0>>+H1 + @ + @ Trivia. + @ + @ Result of multiplication of n-bit number by m-bit number is + @ n+m bits wide. However! Even though 2^n is a n+1-bit number, + @ m-bit number multiplied by 2^n is still n+m bits wide. + @ + @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2, + @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit + @ one is n+1 bits wide. + @ + @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that + @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 + @ can be 27. However! In cases when their width exceeds 26 bits + @ they are limited by 2^26+2^6. This in turn means that *sum* + @ of the products with these values can still be viewed as sum + @ of 52-bit numbers as long as the amount of addends is not a + @ power of 2. For example, + @ + @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, + @ + @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or + @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than + @ 8 * (2^52) or 2^55. However, the value is then multiplied by + @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12), + @ which is less than 32 * (2^52) or 2^57. And when processing + @ data we are looking at triple as many addends... + @ + @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and + @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the + @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while + @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 + @ instruction accepts 2x32-bit input and writes 2x64-bit result. + @ This means that result of reduction have to be compressed upon + @ loop wrap-around. This can be done in the process of reduction + @ to minimize amount of instructions [as well as amount of + @ 128-bit instructions, which benefits low-end processors], but + @ one has to watch for H2 (which is narrower than H0) and 5*H4 + @ not being wider than 58 bits, so that result of right shift + @ by 26 bits fits in 32 bits. This is also useful on x86, + @ because it allows to use paddd in place for paddq, which + @ benefits Atom, where paddq is ridiculously slow. + + vshr.u64 $T0,$D3,#26 + vmovn.i64 $D3#lo,$D3 + vshr.u64 $T1,$D0,#26 + vmovn.i64 $D0#lo,$D0 + vadd.i64 $D4,$D4,$T0 @ h3 -> h4 + vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff + vadd.i64 $D1,$D1,$T1 @ h0 -> h1 + vbic.i32 $D0#lo,#0xfc000000 + + vshrn.u64 $T0#lo,$D4,#26 + vmovn.i64 $D4#lo,$D4 + vshr.u64 $T1,$D1,#26 + vmovn.i64 $D1#lo,$D1 + vadd.i64 $D2,$D2,$T1 @ h1 -> h2 + vbic.i32 $D4#lo,#0xfc000000 + vbic.i32 $D1#lo,#0xfc000000 + + vadd.i32 $D0#lo,$D0#lo,$T0#lo + vshl.u32 $T0#lo,$T0#lo,#2 + vshrn.u64 $T1#lo,$D2,#26 + vmovn.i64 $D2#lo,$D2 + vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0 + vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3 + vbic.i32 $D2#lo,#0xfc000000 + + vshr.u32 $T0#lo,$D0#lo,#26 + vbic.i32 $D0#lo,#0xfc000000 + vshr.u32 $T1#lo,$D3#lo,#26 + vbic.i32 $D3#lo,#0xfc000000 + vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1 + vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4 + + subs $zeros,$zeros,#1 + beq .Lsquare_break_neon + + add $tbl0,$ctx,#(48+0*9*4) + add $tbl1,$ctx,#(48+1*9*4) + + vtrn.32 $R0,$D0#lo @ r^2:r^1 + vtrn.32 $R2,$D2#lo + vtrn.32 $R3,$D3#lo + vtrn.32 $R1,$D1#lo + vtrn.32 $R4,$D4#lo + + vshl.u32 $S2,$R2,#2 @ *5 + vshl.u32 $S3,$R3,#2 + vshl.u32 $S1,$R1,#2 + vshl.u32 $S4,$R4,#2 + vadd.i32 $S2,$S2,$R2 + vadd.i32 $S1,$S1,$R1 + vadd.i32 $S3,$S3,$R3 + vadd.i32 $S4,$S4,$R4 + + vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! + vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! + vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vst1.32 {${S4}[0]},[$tbl0,:32] + vst1.32 {${S4}[1]},[$tbl1,:32] + + b .Lsquare_neon + +.align 4 +.Lsquare_break_neon: + add $tbl0,$ctx,#(48+2*4*9) + add $tbl1,$ctx,#(48+3*4*9) + + vmov $R0,$D0#lo @ r^4:r^3 + vshl.u32 $S1,$D1#lo,#2 @ *5 + vmov $R1,$D1#lo + vshl.u32 $S2,$D2#lo,#2 + vmov $R2,$D2#lo + vshl.u32 $S3,$D3#lo,#2 + vmov $R3,$D3#lo + vshl.u32 $S4,$D4#lo,#2 + vmov $R4,$D4#lo + vadd.i32 $S1,$S1,$D1#lo + vadd.i32 $S2,$S2,$D2#lo + vadd.i32 $S3,$S3,$D3#lo + vadd.i32 $S4,$S4,$D4#lo + + vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! + vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! + vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vst1.32 {${S4}[0]},[$tbl0] + vst1.32 {${S4}[1]},[$tbl1] + + ret @ bx lr +.size poly1305_init_neon,.-poly1305_init_neon + +#ifdef __KERNEL__ +.globl poly1305_blocks_neon +#endif +.type poly1305_blocks_neon,%function +.align 5 +poly1305_blocks_neon: + ldr ip,[$ctx,#36] @ is_base2_26 + ands $len,$len,#-16 + beq .Lno_data_neon + + cmp $len,#64 + bhs .Lenter_neon + tst ip,ip @ is_base2_26? + beq .Lpoly1305_blocks + +.Lenter_neon: + stmdb sp!,{r4-r7} + vstmdb sp!,{d8-d15} @ ABI specification says so + + tst ip,ip @ is_base2_26? + bne .Lbase2_26_neon + + stmdb sp!,{r1-r3,lr} + bl .Lpoly1305_init_neon + + ldr r4,[$ctx,#0] @ load hash value base 2^32 + ldr r5,[$ctx,#4] + ldr r6,[$ctx,#8] + ldr r7,[$ctx,#12] + ldr ip,[$ctx,#16] + + and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 + mov r3,r4,lsr#26 + veor $D0#lo,$D0#lo,$D0#lo + mov r4,r5,lsr#20 + orr r3,r3,r5,lsl#6 + veor $D1#lo,$D1#lo,$D1#lo + mov r5,r6,lsr#14 + orr r4,r4,r6,lsl#12 + veor $D2#lo,$D2#lo,$D2#lo + mov r6,r7,lsr#8 + orr r5,r5,r7,lsl#18 + veor $D3#lo,$D3#lo,$D3#lo + and r3,r3,#0x03ffffff + orr r6,r6,ip,lsl#24 + veor $D4#lo,$D4#lo,$D4#lo + and r4,r4,#0x03ffffff + mov r1,#1 + and r5,r5,#0x03ffffff + str r1,[$ctx,#36] @ is_base2_26 + + vmov.32 $D0#lo[0],r2 + vmov.32 $D1#lo[0],r3 + vmov.32 $D2#lo[0],r4 + vmov.32 $D3#lo[0],r5 + vmov.32 $D4#lo[0],r6 + adr $zeros,.Lzeros + + ldmia sp!,{r1-r3,lr} + b .Lbase2_32_neon + +.align 4 +.Lbase2_26_neon: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ load hash value + + veor $D0#lo,$D0#lo,$D0#lo + veor $D1#lo,$D1#lo,$D1#lo + veor $D2#lo,$D2#lo,$D2#lo + veor $D3#lo,$D3#lo,$D3#lo + veor $D4#lo,$D4#lo,$D4#lo + vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]! + adr $zeros,.Lzeros + vld1.32 {$D4#lo[0]},[$ctx] + sub $ctx,$ctx,#16 @ rewind + +.Lbase2_32_neon: + add $in2,$inp,#32 + mov $padbit,$padbit,lsl#24 + tst $len,#31 + beq .Leven + + vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]! + vmov.32 $H4#lo[0],$padbit + sub $len,$len,#16 + add $in2,$inp,#32 + +# ifdef __ARMEB__ + vrev32.8 $H0,$H0 + vrev32.8 $H3,$H3 + vrev32.8 $H1,$H1 + vrev32.8 $H2,$H2 +# endif + vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26 + vshl.u32 $H3#lo,$H3#lo,#18 + + vsri.u32 $H3#lo,$H2#lo,#14 + vshl.u32 $H2#lo,$H2#lo,#12 + vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi + + vbic.i32 $H3#lo,#0xfc000000 + vsri.u32 $H2#lo,$H1#lo,#20 + vshl.u32 $H1#lo,$H1#lo,#6 + + vbic.i32 $H2#lo,#0xfc000000 + vsri.u32 $H1#lo,$H0#lo,#26 + vadd.i32 $H3#hi,$H3#lo,$D3#lo + + vbic.i32 $H0#lo,#0xfc000000 + vbic.i32 $H1#lo,#0xfc000000 + vadd.i32 $H2#hi,$H2#lo,$D2#lo + + vadd.i32 $H0#hi,$H0#lo,$D0#lo + vadd.i32 $H1#hi,$H1#lo,$D1#lo + + mov $tbl1,$zeros + add $tbl0,$ctx,#48 + + cmp $len,$len + b .Long_tail + +.align 4 +.Leven: + subs $len,$len,#64 + it lo + movlo $in2,$zeros + + vmov.i32 $H4,#1<<24 @ padbit, yes, always + vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1] + add $inp,$inp,#64 + vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0) + add $in2,$in2,#64 + itt hi + addhi $tbl1,$ctx,#(48+1*9*4) + addhi $tbl0,$ctx,#(48+3*9*4) + +# ifdef __ARMEB__ + vrev32.8 $H0,$H0 + vrev32.8 $H3,$H3 + vrev32.8 $H1,$H1 + vrev32.8 $H2,$H2 +# endif + vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26 + vshl.u32 $H3,$H3,#18 + + vsri.u32 $H3,$H2,#14 + vshl.u32 $H2,$H2,#12 + + vbic.i32 $H3,#0xfc000000 + vsri.u32 $H2,$H1,#20 + vshl.u32 $H1,$H1,#6 + + vbic.i32 $H2,#0xfc000000 + vsri.u32 $H1,$H0,#26 + + vbic.i32 $H0,#0xfc000000 + vbic.i32 $H1,#0xfc000000 + + bls .Lskip_loop + + vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2 + vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4 + vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + b .Loop_neon + +.align 5 +.Loop_neon: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 + @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r + @ \___________________/ + @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 + @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r + @ \___________________/ \____________________/ + @ + @ Note that we start with inp[2:3]*r^2. This is because it + @ doesn't depend on reduction in previous iteration. + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ inp[2:3]*r^2 + + vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1] + vmull.u32 $D2,$H2#hi,${R0}[1] + vadd.i32 $H0#lo,$H0#lo,$D0#lo + vmull.u32 $D0,$H0#hi,${R0}[1] + vadd.i32 $H3#lo,$H3#lo,$D3#lo + vmull.u32 $D3,$H3#hi,${R0}[1] + vmlal.u32 $D2,$H1#hi,${R1}[1] + vadd.i32 $H1#lo,$H1#lo,$D1#lo + vmull.u32 $D1,$H1#hi,${R0}[1] + + vadd.i32 $H4#lo,$H4#lo,$D4#lo + vmull.u32 $D4,$H4#hi,${R0}[1] + subs $len,$len,#64 + vmlal.u32 $D0,$H4#hi,${S1}[1] + it lo + movlo $in2,$zeros + vmlal.u32 $D3,$H2#hi,${R1}[1] + vld1.32 ${S4}[1],[$tbl1,:32] + vmlal.u32 $D1,$H0#hi,${R1}[1] + vmlal.u32 $D4,$H3#hi,${R1}[1] + + vmlal.u32 $D0,$H3#hi,${S2}[1] + vmlal.u32 $D3,$H1#hi,${R2}[1] + vmlal.u32 $D4,$H2#hi,${R2}[1] + vmlal.u32 $D1,$H4#hi,${S2}[1] + vmlal.u32 $D2,$H0#hi,${R2}[1] + + vmlal.u32 $D3,$H0#hi,${R3}[1] + vmlal.u32 $D0,$H2#hi,${S3}[1] + vmlal.u32 $D4,$H1#hi,${R3}[1] + vmlal.u32 $D1,$H3#hi,${S3}[1] + vmlal.u32 $D2,$H4#hi,${S3}[1] + + vmlal.u32 $D3,$H4#hi,${S4}[1] + vmlal.u32 $D0,$H1#hi,${S4}[1] + vmlal.u32 $D4,$H0#hi,${R4}[1] + vmlal.u32 $D1,$H2#hi,${S4}[1] + vmlal.u32 $D2,$H3#hi,${S4}[1] + + vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0) + add $in2,$in2,#64 + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ (hash+inp[0:1])*r^4 and accumulate + + vmlal.u32 $D3,$H3#lo,${R0}[0] + vmlal.u32 $D0,$H0#lo,${R0}[0] + vmlal.u32 $D4,$H4#lo,${R0}[0] + vmlal.u32 $D1,$H1#lo,${R0}[0] + vmlal.u32 $D2,$H2#lo,${R0}[0] + vld1.32 ${S4}[0],[$tbl0,:32] + + vmlal.u32 $D3,$H2#lo,${R1}[0] + vmlal.u32 $D0,$H4#lo,${S1}[0] + vmlal.u32 $D4,$H3#lo,${R1}[0] + vmlal.u32 $D1,$H0#lo,${R1}[0] + vmlal.u32 $D2,$H1#lo,${R1}[0] + + vmlal.u32 $D3,$H1#lo,${R2}[0] + vmlal.u32 $D0,$H3#lo,${S2}[0] + vmlal.u32 $D4,$H2#lo,${R2}[0] + vmlal.u32 $D1,$H4#lo,${S2}[0] + vmlal.u32 $D2,$H0#lo,${R2}[0] + + vmlal.u32 $D3,$H0#lo,${R3}[0] + vmlal.u32 $D0,$H2#lo,${S3}[0] + vmlal.u32 $D4,$H1#lo,${R3}[0] + vmlal.u32 $D1,$H3#lo,${S3}[0] + vmlal.u32 $D3,$H4#lo,${S4}[0] + + vmlal.u32 $D2,$H4#lo,${S3}[0] + vmlal.u32 $D0,$H1#lo,${S4}[0] + vmlal.u32 $D4,$H0#lo,${R4}[0] + vmov.i32 $H4,#1<<24 @ padbit, yes, always + vmlal.u32 $D1,$H2#lo,${S4}[0] + vmlal.u32 $D2,$H3#lo,${S4}[0] + + vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1] + add $inp,$inp,#64 +# ifdef __ARMEB__ + vrev32.8 $H0,$H0 + vrev32.8 $H1,$H1 + vrev32.8 $H2,$H2 + vrev32.8 $H3,$H3 +# endif + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ lazy reduction interleaved with base 2^32 -> base 2^26 of + @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4. + + vshr.u64 $T0,$D3,#26 + vmovn.i64 $D3#lo,$D3 + vshr.u64 $T1,$D0,#26 + vmovn.i64 $D0#lo,$D0 + vadd.i64 $D4,$D4,$T0 @ h3 -> h4 + vbic.i32 $D3#lo,#0xfc000000 + vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26 + vadd.i64 $D1,$D1,$T1 @ h0 -> h1 + vshl.u32 $H3,$H3,#18 + vbic.i32 $D0#lo,#0xfc000000 + + vshrn.u64 $T0#lo,$D4,#26 + vmovn.i64 $D4#lo,$D4 + vshr.u64 $T1,$D1,#26 + vmovn.i64 $D1#lo,$D1 + vadd.i64 $D2,$D2,$T1 @ h1 -> h2 + vsri.u32 $H3,$H2,#14 + vbic.i32 $D4#lo,#0xfc000000 + vshl.u32 $H2,$H2,#12 + vbic.i32 $D1#lo,#0xfc000000 + + vadd.i32 $D0#lo,$D0#lo,$T0#lo + vshl.u32 $T0#lo,$T0#lo,#2 + vbic.i32 $H3,#0xfc000000 + vshrn.u64 $T1#lo,$D2,#26 + vmovn.i64 $D2#lo,$D2 + vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec] + vsri.u32 $H2,$H1,#20 + vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3 + vshl.u32 $H1,$H1,#6 + vbic.i32 $D2#lo,#0xfc000000 + vbic.i32 $H2,#0xfc000000 + + vshrn.u64 $T0#lo,$D0,#26 @ re-narrow + vmovn.i64 $D0#lo,$D0 + vsri.u32 $H1,$H0,#26 + vbic.i32 $H0,#0xfc000000 + vshr.u32 $T1#lo,$D3#lo,#26 + vbic.i32 $D3#lo,#0xfc000000 + vbic.i32 $D0#lo,#0xfc000000 + vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1 + vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4 + vbic.i32 $H1,#0xfc000000 + + bhi .Loop_neon + +.Lskip_loop: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 + + add $tbl1,$ctx,#(48+0*9*4) + add $tbl0,$ctx,#(48+1*9*4) + adds $len,$len,#32 + it ne + movne $len,#0 + bne .Long_tail + + vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi + vadd.i32 $H0#hi,$H0#lo,$D0#lo + vadd.i32 $H3#hi,$H3#lo,$D3#lo + vadd.i32 $H1#hi,$H1#lo,$D1#lo + vadd.i32 $H4#hi,$H4#lo,$D4#lo + +.Long_tail: + vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1 + vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2 + + vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant + vmull.u32 $D2,$H2#hi,$R0 + vadd.i32 $H0#lo,$H0#lo,$D0#lo + vmull.u32 $D0,$H0#hi,$R0 + vadd.i32 $H3#lo,$H3#lo,$D3#lo + vmull.u32 $D3,$H3#hi,$R0 + vadd.i32 $H1#lo,$H1#lo,$D1#lo + vmull.u32 $D1,$H1#hi,$R0 + vadd.i32 $H4#lo,$H4#lo,$D4#lo + vmull.u32 $D4,$H4#hi,$R0 + + vmlal.u32 $D0,$H4#hi,$S1 + vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vmlal.u32 $D3,$H2#hi,$R1 + vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vmlal.u32 $D1,$H0#hi,$R1 + vmlal.u32 $D4,$H3#hi,$R1 + vmlal.u32 $D2,$H1#hi,$R1 + + vmlal.u32 $D3,$H1#hi,$R2 + vld1.32 ${S4}[1],[$tbl1,:32] + vmlal.u32 $D0,$H3#hi,$S2 + vld1.32 ${S4}[0],[$tbl0,:32] + vmlal.u32 $D4,$H2#hi,$R2 + vmlal.u32 $D1,$H4#hi,$S2 + vmlal.u32 $D2,$H0#hi,$R2 + + vmlal.u32 $D3,$H0#hi,$R3 + it ne + addne $tbl1,$ctx,#(48+2*9*4) + vmlal.u32 $D0,$H2#hi,$S3 + it ne + addne $tbl0,$ctx,#(48+3*9*4) + vmlal.u32 $D4,$H1#hi,$R3 + vmlal.u32 $D1,$H3#hi,$S3 + vmlal.u32 $D2,$H4#hi,$S3 + + vmlal.u32 $D3,$H4#hi,$S4 + vorn $MASK,$MASK,$MASK @ all-ones, can be redundant + vmlal.u32 $D0,$H1#hi,$S4 + vshr.u64 $MASK,$MASK,#38 + vmlal.u32 $D4,$H0#hi,$R4 + vmlal.u32 $D1,$H2#hi,$S4 + vmlal.u32 $D2,$H3#hi,$S4 + + beq .Lshort_tail + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ (hash+inp[0:1])*r^4:r^3 and accumulate + + vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3 + vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4 + + vmlal.u32 $D2,$H2#lo,$R0 + vmlal.u32 $D0,$H0#lo,$R0 + vmlal.u32 $D3,$H3#lo,$R0 + vmlal.u32 $D1,$H1#lo,$R0 + vmlal.u32 $D4,$H4#lo,$R0 + + vmlal.u32 $D0,$H4#lo,$S1 + vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vmlal.u32 $D3,$H2#lo,$R1 + vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vmlal.u32 $D1,$H0#lo,$R1 + vmlal.u32 $D4,$H3#lo,$R1 + vmlal.u32 $D2,$H1#lo,$R1 + + vmlal.u32 $D3,$H1#lo,$R2 + vld1.32 ${S4}[1],[$tbl1,:32] + vmlal.u32 $D0,$H3#lo,$S2 + vld1.32 ${S4}[0],[$tbl0,:32] + vmlal.u32 $D4,$H2#lo,$R2 + vmlal.u32 $D1,$H4#lo,$S2 + vmlal.u32 $D2,$H0#lo,$R2 + + vmlal.u32 $D3,$H0#lo,$R3 + vmlal.u32 $D0,$H2#lo,$S3 + vmlal.u32 $D4,$H1#lo,$R3 + vmlal.u32 $D1,$H3#lo,$S3 + vmlal.u32 $D2,$H4#lo,$S3 + + vmlal.u32 $D3,$H4#lo,$S4 + vorn $MASK,$MASK,$MASK @ all-ones + vmlal.u32 $D0,$H1#lo,$S4 + vshr.u64 $MASK,$MASK,#38 + vmlal.u32 $D4,$H0#lo,$R4 + vmlal.u32 $D1,$H2#lo,$S4 + vmlal.u32 $D2,$H3#lo,$S4 + +.Lshort_tail: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ horizontal addition + + vadd.i64 $D3#lo,$D3#lo,$D3#hi + vadd.i64 $D0#lo,$D0#lo,$D0#hi + vadd.i64 $D4#lo,$D4#lo,$D4#hi + vadd.i64 $D1#lo,$D1#lo,$D1#hi + vadd.i64 $D2#lo,$D2#lo,$D2#hi + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ lazy reduction, but without narrowing + + vshr.u64 $T0,$D3,#26 + vand.i64 $D3,$D3,$MASK + vshr.u64 $T1,$D0,#26 + vand.i64 $D0,$D0,$MASK + vadd.i64 $D4,$D4,$T0 @ h3 -> h4 + vadd.i64 $D1,$D1,$T1 @ h0 -> h1 + + vshr.u64 $T0,$D4,#26 + vand.i64 $D4,$D4,$MASK + vshr.u64 $T1,$D1,#26 + vand.i64 $D1,$D1,$MASK + vadd.i64 $D2,$D2,$T1 @ h1 -> h2 + + vadd.i64 $D0,$D0,$T0 + vshl.u64 $T0,$T0,#2 + vshr.u64 $T1,$D2,#26 + vand.i64 $D2,$D2,$MASK + vadd.i64 $D0,$D0,$T0 @ h4 -> h0 + vadd.i64 $D3,$D3,$T1 @ h2 -> h3 + + vshr.u64 $T0,$D0,#26 + vand.i64 $D0,$D0,$MASK + vshr.u64 $T1,$D3,#26 + vand.i64 $D3,$D3,$MASK + vadd.i64 $D1,$D1,$T0 @ h0 -> h1 + vadd.i64 $D4,$D4,$T1 @ h3 -> h4 + + cmp $len,#0 + bne .Leven + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ store hash value + + vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]! + vst1.32 {$D4#lo[0]},[$ctx] + + vldmia sp!,{d8-d15} @ epilogue + ldmia sp!,{r4-r7} +.Lno_data_neon: + ret @ bx lr +.size poly1305_blocks_neon,.-poly1305_blocks_neon + +#ifdef __KERNEL__ +.globl poly1305_emit_neon +#endif +.type poly1305_emit_neon,%function +.align 5 +poly1305_emit_neon: + ldr ip,[$ctx,#36] @ is_base2_26 + + stmdb sp!,{r4-r11} + + tst ip,ip + beq .Lpoly1305_emit_enter + + ldmia $ctx,{$h0-$h4} + eor $g0,$g0,$g0 + + adds $h0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32 + mov $h1,$h1,lsr#6 + adcs $h1,$h1,$h2,lsl#20 + mov $h2,$h2,lsr#12 + adcs $h2,$h2,$h3,lsl#14 + mov $h3,$h3,lsr#18 + adcs $h3,$h3,$h4,lsl#8 + adc $h4,$g0,$h4,lsr#24 @ can be partially reduced ... + + and $g0,$h4,#-4 @ ... so reduce + and $h4,$h3,#3 + add $g0,$g0,$g0,lsr#2 @ *= 5 + adds $h0,$h0,$g0 + adcs $h1,$h1,#0 + adcs $h2,$h2,#0 + adcs $h3,$h3,#0 + adc $h4,$h4,#0 + + adds $g0,$h0,#5 @ compare to modulus + adcs $g1,$h1,#0 + adcs $g2,$h2,#0 + adcs $g3,$h3,#0 + adc $g4,$h4,#0 + tst $g4,#4 @ did it carry/borrow? + + it ne + movne $h0,$g0 + ldr $g0,[$nonce,#0] + it ne + movne $h1,$g1 + ldr $g1,[$nonce,#4] + it ne + movne $h2,$g2 + ldr $g2,[$nonce,#8] + it ne + movne $h3,$g3 + ldr $g3,[$nonce,#12] + + adds $h0,$h0,$g0 @ accumulate nonce + adcs $h1,$h1,$g1 + adcs $h2,$h2,$g2 + adc $h3,$h3,$g3 + +# ifdef __ARMEB__ + rev $h0,$h0 + rev $h1,$h1 + rev $h2,$h2 + rev $h3,$h3 +# endif + str $h0,[$mac,#0] @ store the result + str $h1,[$mac,#4] + str $h2,[$mac,#8] + str $h3,[$mac,#12] + + ldmia sp!,{r4-r11} + ret @ bx lr +.size poly1305_emit_neon,.-poly1305_emit_neon + +.align 5 +.Lzeros: +.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +# ifndef __KERNEL__ +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-.Lpoly1305_init +# endif +#endif +___ +} } +$code.=<<___; +.align 2 +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) +.comm OPENSSL_armcap_P,4,4 +#endif +___ + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/@/ and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or + s/\bret\b/bx lr/go or + s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 + + print $_,"\n"; +} +close STDOUT; # enforce flush diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl new file mode 100644 index 000000000000..d513b45a149b --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl @@ -0,0 +1,974 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# This module implements Poly1305 hash for ARMv8. +# +# June 2015 +# +# Numbers are cycles per processed byte with poly1305_blocks alone. +# +# IALU/gcc-4.9 NEON +# +# Apple A7 1.86/+5% 0.72 +# Cortex-A53 2.69/+58% 1.47 +# Cortex-A57 2.70/+7% 1.14 +# Denver 1.64/+50% 1.18(*) +# X-Gene 2.13/+68% 2.27 +# Mongoose 1.77/+75% 1.12 +# Kryo 2.70/+55% 1.13 +# +# (*) estimate based on resources availability is less than 1.0, +# i.e. measured result is worse than expected, presumably binary +# translator is not almighty; + +$flavour=shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3)); +my ($mac,$nonce)=($inp,$len); + +my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14)); + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +.extern OPENSSL_armcap_P +#else +# define poly1305_init poly1305_init_arm +# define poly1305_blocks poly1305_blocks_arm +# define poly1305_emit poly1305_emit_arm +#endif + +.text + +// forward "declarations" are required for Apple +.globl poly1305_blocks +.globl poly1305_emit +.globl poly1305_init +.type poly1305_init,%function +.align 5 +poly1305_init: + cmp $inp,xzr + stp xzr,xzr,[$ctx] // zero hash value + stp xzr,xzr,[$ctx,#16] // [along with is_base2_26] + + csel x0,xzr,x0,eq + b.eq .Lno_key + +#ifndef __KERNEL__ +# ifdef __ILP32__ + ldrsw $t1,.LOPENSSL_armcap_P +# else + ldr $t1,.LOPENSSL_armcap_P +# endif + adr $t0,.LOPENSSL_armcap_P + ldr w17,[$t0,$t1] +#endif + + ldp $r0,$r1,[$inp] // load key + mov $s1,#0xfffffffc0fffffff + movk $s1,#0x0fff,lsl#48 +#ifdef __AARCH64EB__ + rev $r0,$r0 // flip bytes + rev $r1,$r1 +#endif + and $r0,$r0,$s1 // &=0ffffffc0fffffff + and $s1,$s1,#-4 + and $r1,$r1,$s1 // &=0ffffffc0ffffffc + stp $r0,$r1,[$ctx,#32] // save key value + +#ifndef __KERNEL__ + tst w17,#ARMV7_NEON + + adr $d0,poly1305_blocks + adr $r0,poly1305_blocks_neon + adr $d1,poly1305_emit + adr $r1,poly1305_emit_neon + + csel $d0,$d0,$r0,eq + csel $d1,$d1,$r1,eq + +# ifdef __ILP32__ + stp w12,w13,[$len] +# else + stp $d0,$d1,[$len] +# endif + + mov x0,#1 +#else + mov x0,#0 +#endif +.Lno_key: + ret +.size poly1305_init,.-poly1305_init + +.type poly1305_blocks,%function +.align 5 +poly1305_blocks: + ands $len,$len,#-16 + b.eq .Lno_data + + ldp $h0,$h1,[$ctx] // load hash value + ldp $r0,$r1,[$ctx,#32] // load key value + ldr $h2,[$ctx,#16] + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) + b .Loop + +.align 5 +.Loop: + ldp $t0,$t1,[$inp],#16 // load input + sub $len,$len,#16 +#ifdef __AARCH64EB__ + rev $t0,$t0 + rev $t1,$t1 +#endif + adds $h0,$h0,$t0 // accumulate input + adcs $h1,$h1,$t1 + + mul $d0,$h0,$r0 // h0*r0 + adc $h2,$h2,$padbit + umulh $d1,$h0,$r0 + + mul $t0,$h1,$s1 // h1*5*r1 + umulh $t1,$h1,$s1 + + adds $d0,$d0,$t0 + mul $t0,$h0,$r1 // h0*r1 + adc $d1,$d1,$t1 + umulh $d2,$h0,$r1 + + adds $d1,$d1,$t0 + mul $t0,$h1,$r0 // h1*r0 + adc $d2,$d2,xzr + umulh $t1,$h1,$r0 + + adds $d1,$d1,$t0 + mul $t0,$h2,$s1 // h2*5*r1 + adc $d2,$d2,$t1 + mul $t1,$h2,$r0 // h2*r0 + + adds $d1,$d1,$t0 + adc $d2,$d2,$t1 + + and $t0,$d2,#-4 // final reduction + and $h2,$d2,#3 + add $t0,$t0,$d2,lsr#2 + adds $h0,$d0,$t0 + adcs $h1,$d1,xzr + adc $h2,$h2,xzr + + cbnz $len,.Loop + + stp $h0,$h1,[$ctx] // store hash value + str $h2,[$ctx,#16] + +.Lno_data: + ret +.size poly1305_blocks,.-poly1305_blocks + +.type poly1305_emit,%function +.align 5 +poly1305_emit: + ldp $h0,$h1,[$ctx] // load hash base 2^64 + ldr $h2,[$ctx,#16] + ldp $t0,$t1,[$nonce] // load nonce + + adds $d0,$h0,#5 // compare to modulus + adcs $d1,$h1,xzr + adc $d2,$h2,xzr + + tst $d2,#-4 // see if it's carried/borrowed + + csel $h0,$h0,$d0,eq + csel $h1,$h1,$d1,eq + +#ifdef __AARCH64EB__ + ror $t0,$t0,#32 // flip nonce words + ror $t1,$t1,#32 +#endif + adds $h0,$h0,$t0 // accumulate nonce + adc $h1,$h1,$t1 +#ifdef __AARCH64EB__ + rev $h0,$h0 // flip output bytes + rev $h1,$h1 +#endif + stp $h0,$h1,[$mac] // write result + + ret +.size poly1305_emit,.-poly1305_emit +___ +my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8)); +my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13)); +my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18)); +my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23)); +my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28)); +my ($T0,$T1,$MASK) = map("v$_",(29..31)); + +my ($in2,$zeros)=("x16","x17"); +my $is_base2_26 = $zeros; # borrow + +$code.=<<___; +.type __poly1305_mult,%function +.align 5 +__poly1305_mult: + mul $d0,$h0,$r0 // h0*r0 + umulh $d1,$h0,$r0 + + mul $t0,$h1,$s1 // h1*5*r1 + umulh $t1,$h1,$s1 + + adds $d0,$d0,$t0 + mul $t0,$h0,$r1 // h0*r1 + adc $d1,$d1,$t1 + umulh $d2,$h0,$r1 + + adds $d1,$d1,$t0 + mul $t0,$h1,$r0 // h1*r0 + adc $d2,$d2,xzr + umulh $t1,$h1,$r0 + + adds $d1,$d1,$t0 + mul $t0,$h2,$s1 // h2*5*r1 + adc $d2,$d2,$t1 + mul $t1,$h2,$r0 // h2*r0 + + adds $d1,$d1,$t0 + adc $d2,$d2,$t1 + + and $t0,$d2,#-4 // final reduction + and $h2,$d2,#3 + add $t0,$t0,$d2,lsr#2 + adds $h0,$d0,$t0 + adcs $h1,$d1,xzr + adc $h2,$h2,xzr + + ret +.size __poly1305_mult,.-__poly1305_mult + +.type __poly1305_splat,%function +.align 5 +__poly1305_splat: + and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26 + ubfx x13,$h0,#26,#26 + extr x14,$h1,$h0,#52 + and x14,x14,#0x03ffffff + ubfx x15,$h1,#14,#26 + extr x16,$h2,$h1,#40 + + str w12,[$ctx,#16*0] // r0 + add w12,w13,w13,lsl#2 // r1*5 + str w13,[$ctx,#16*1] // r1 + add w13,w14,w14,lsl#2 // r2*5 + str w12,[$ctx,#16*2] // s1 + str w14,[$ctx,#16*3] // r2 + add w14,w15,w15,lsl#2 // r3*5 + str w13,[$ctx,#16*4] // s2 + str w15,[$ctx,#16*5] // r3 + add w15,w16,w16,lsl#2 // r4*5 + str w14,[$ctx,#16*6] // s3 + str w16,[$ctx,#16*7] // r4 + str w15,[$ctx,#16*8] // s4 + + ret +.size __poly1305_splat,.-__poly1305_splat + +#if !defined(__KERNEL__) || defined(CONFIG_KERNEL_MODE_NEON) +#ifdef __KERNEL__ +.globl poly1305_blocks_neon +.globl poly1305_emit_neon +#endif + +.type poly1305_blocks_neon,%function +.align 5 +poly1305_blocks_neon: + ldr $is_base2_26,[$ctx,#24] + cmp $len,#128 + b.hs .Lblocks_neon + cbz $is_base2_26,poly1305_blocks + +.Lblocks_neon: + stp x29,x30,[sp,#-80]! + add x29,sp,#0 + + ands $len,$len,#-16 + b.eq .Lno_data_neon + + cbz $is_base2_26,.Lbase2_64_neon + + ldp w10,w11,[$ctx] // load hash value base 2^26 + ldp w12,w13,[$ctx,#8] + ldr w14,[$ctx,#16] + + tst $len,#31 + b.eq .Leven_neon + + ldp $r0,$r1,[$ctx,#32] // load key value + + add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64 + lsr $h1,x12,#12 + adds $h0,$h0,x12,lsl#52 + add $h1,$h1,x13,lsl#14 + adc $h1,$h1,xzr + lsr $h2,x14,#24 + adds $h1,$h1,x14,lsl#40 + adc $d2,$h2,xzr // can be partially reduced... + + ldp $d0,$d1,[$inp],#16 // load input + sub $len,$len,#16 + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) + + and $t0,$d2,#-4 // ... so reduce + and $h2,$d2,#3 + add $t0,$t0,$d2,lsr#2 + adds $h0,$h0,$t0 + adcs $h1,$h1,xzr + adc $h2,$h2,xzr + +#ifdef __AARCH64EB__ + rev $d0,$d0 + rev $d1,$d1 +#endif + adds $h0,$h0,$d0 // accumulate input + adcs $h1,$h1,$d1 + adc $h2,$h2,$padbit + + bl __poly1305_mult + ldr x30,[sp,#8] + + cbz $padbit,.Lstore_base2_64_neon + + and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26 + ubfx x11,$h0,#26,#26 + extr x12,$h1,$h0,#52 + and x12,x12,#0x03ffffff + ubfx x13,$h1,#14,#26 + extr x14,$h2,$h1,#40 + + cbnz $len,.Leven_neon + + stp w10,w11,[$ctx] // store hash value base 2^26 + stp w12,w13,[$ctx,#8] + str w14,[$ctx,#16] + b .Lno_data_neon + +.align 4 +.Lstore_base2_64_neon: + stp $h0,$h1,[$ctx] // store hash value base 2^64 + stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed + b .Lno_data_neon + +.align 4 +.Lbase2_64_neon: + ldp $r0,$r1,[$ctx,#32] // load key value + + ldp $h0,$h1,[$ctx] // load hash value base 2^64 + ldr $h2,[$ctx,#16] + + tst $len,#31 + b.eq .Linit_neon + + ldp $d0,$d1,[$inp],#16 // load input + sub $len,$len,#16 + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) +#ifdef __AARCH64EB__ + rev $d0,$d0 + rev $d1,$d1 +#endif + adds $h0,$h0,$d0 // accumulate input + adcs $h1,$h1,$d1 + adc $h2,$h2,$padbit + + bl __poly1305_mult + +.Linit_neon: + and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26 + ubfx x11,$h0,#26,#26 + extr x12,$h1,$h0,#52 + and x12,x12,#0x03ffffff + ubfx x13,$h1,#14,#26 + extr x14,$h2,$h1,#40 + + stp d8,d9,[sp,#16] // meet ABI requirements + stp d10,d11,[sp,#32] + stp d12,d13,[sp,#48] + stp d14,d15,[sp,#64] + + fmov ${H0},x10 + fmov ${H1},x11 + fmov ${H2},x12 + fmov ${H3},x13 + fmov ${H4},x14 + + ////////////////////////////////// initialize r^n table + mov $h0,$r0 // r^1 + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) + mov $h1,$r1 + mov $h2,xzr + add $ctx,$ctx,#48+12 + bl __poly1305_splat + + bl __poly1305_mult // r^2 + sub $ctx,$ctx,#4 + bl __poly1305_splat + + bl __poly1305_mult // r^3 + sub $ctx,$ctx,#4 + bl __poly1305_splat + + bl __poly1305_mult // r^4 + sub $ctx,$ctx,#4 + bl __poly1305_splat + ldr x30,[sp,#8] + + add $in2,$inp,#32 + adr $zeros,.Lzeros + subs $len,$len,#64 + csel $in2,$zeros,$in2,lo + + mov x4,#1 + str x4,[$ctx,#-24] // set is_base2_26 + sub $ctx,$ctx,#48 // restore original $ctx + b .Ldo_neon + +.align 4 +.Leven_neon: + add $in2,$inp,#32 + adr $zeros,.Lzeros + subs $len,$len,#64 + csel $in2,$zeros,$in2,lo + + stp d8,d9,[sp,#16] // meet ABI requirements + stp d10,d11,[sp,#32] + stp d12,d13,[sp,#48] + stp d14,d15,[sp,#64] + + fmov ${H0},x10 + fmov ${H1},x11 + fmov ${H2},x12 + fmov ${H3},x13 + fmov ${H4},x14 + +.Ldo_neon: + ldp x8,x12,[$in2],#16 // inp[2:3] (or zero) + ldp x9,x13,[$in2],#48 + + lsl $padbit,$padbit,#24 + add x15,$ctx,#48 + +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + and x5,x9,#0x03ffffff + ubfx x6,x8,#26,#26 + ubfx x7,x9,#26,#26 + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + extr x8,x12,x8,#52 + extr x9,x13,x9,#52 + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + fmov $IN23_0,x4 + and x8,x8,#0x03ffffff + and x9,x9,#0x03ffffff + ubfx x10,x12,#14,#26 + ubfx x11,x13,#14,#26 + add x12,$padbit,x12,lsr#40 + add x13,$padbit,x13,lsr#40 + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + fmov $IN23_1,x6 + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + fmov $IN23_2,x8 + fmov $IN23_3,x10 + fmov $IN23_4,x12 + + ldp x8,x12,[$inp],#16 // inp[0:1] + ldp x9,x13,[$inp],#48 + + ld1 {$R0,$R1,$S1,$R2},[x15],#64 + ld1 {$S2,$R3,$S3,$R4},[x15],#64 + ld1 {$S4},[x15] + +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + and x5,x9,#0x03ffffff + ubfx x6,x8,#26,#26 + ubfx x7,x9,#26,#26 + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + extr x8,x12,x8,#52 + extr x9,x13,x9,#52 + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + fmov $IN01_0,x4 + and x8,x8,#0x03ffffff + and x9,x9,#0x03ffffff + ubfx x10,x12,#14,#26 + ubfx x11,x13,#14,#26 + add x12,$padbit,x12,lsr#40 + add x13,$padbit,x13,lsr#40 + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + fmov $IN01_1,x6 + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + movi $MASK.2d,#-1 + fmov $IN01_2,x8 + fmov $IN01_3,x10 + fmov $IN01_4,x12 + ushr $MASK.2d,$MASK.2d,#38 + + b.ls .Lskip_loop + +.align 4 +.Loop_neon: + //////////////////////////////////////////////////////////////// + // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 + // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r + // \___________________/ + // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 + // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r + // \___________________/ \____________________/ + // + // Note that we start with inp[2:3]*r^2. This is because it + // doesn't depend on reduction in previous iteration. + //////////////////////////////////////////////////////////////// + // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0 + // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4 + // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3 + // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2 + // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1 + + subs $len,$len,#64 + umull $ACC4,$IN23_0,${R4}[2] + csel $in2,$zeros,$in2,lo + umull $ACC3,$IN23_0,${R3}[2] + umull $ACC2,$IN23_0,${R2}[2] + ldp x8,x12,[$in2],#16 // inp[2:3] (or zero) + umull $ACC1,$IN23_0,${R1}[2] + ldp x9,x13,[$in2],#48 + umull $ACC0,$IN23_0,${R0}[2] +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + + umlal $ACC4,$IN23_1,${R3}[2] + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + umlal $ACC3,$IN23_1,${R2}[2] + and x5,x9,#0x03ffffff + umlal $ACC2,$IN23_1,${R1}[2] + ubfx x6,x8,#26,#26 + umlal $ACC1,$IN23_1,${R0}[2] + ubfx x7,x9,#26,#26 + umlal $ACC0,$IN23_1,${S4}[2] + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + + umlal $ACC4,$IN23_2,${R2}[2] + extr x8,x12,x8,#52 + umlal $ACC3,$IN23_2,${R1}[2] + extr x9,x13,x9,#52 + umlal $ACC2,$IN23_2,${R0}[2] + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + umlal $ACC1,$IN23_2,${S4}[2] + fmov $IN23_0,x4 + umlal $ACC0,$IN23_2,${S3}[2] + and x8,x8,#0x03ffffff + + umlal $ACC4,$IN23_3,${R1}[2] + and x9,x9,#0x03ffffff + umlal $ACC3,$IN23_3,${R0}[2] + ubfx x10,x12,#14,#26 + umlal $ACC2,$IN23_3,${S4}[2] + ubfx x11,x13,#14,#26 + umlal $ACC1,$IN23_3,${S3}[2] + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + umlal $ACC0,$IN23_3,${S2}[2] + fmov $IN23_1,x6 + + add $IN01_2,$IN01_2,$H2 + add x12,$padbit,x12,lsr#40 + umlal $ACC4,$IN23_4,${R0}[2] + add x13,$padbit,x13,lsr#40 + umlal $ACC3,$IN23_4,${S4}[2] + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + umlal $ACC2,$IN23_4,${S3}[2] + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + umlal $ACC1,$IN23_4,${S2}[2] + fmov $IN23_2,x8 + umlal $ACC0,$IN23_4,${S1}[2] + fmov $IN23_3,x10 + + //////////////////////////////////////////////////////////////// + // (hash+inp[0:1])*r^4 and accumulate + + add $IN01_0,$IN01_0,$H0 + fmov $IN23_4,x12 + umlal $ACC3,$IN01_2,${R1}[0] + ldp x8,x12,[$inp],#16 // inp[0:1] + umlal $ACC0,$IN01_2,${S3}[0] + ldp x9,x13,[$inp],#48 + umlal $ACC4,$IN01_2,${R2}[0] + umlal $ACC1,$IN01_2,${S4}[0] + umlal $ACC2,$IN01_2,${R0}[0] +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + + add $IN01_1,$IN01_1,$H1 + umlal $ACC3,$IN01_0,${R3}[0] + umlal $ACC4,$IN01_0,${R4}[0] + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + umlal $ACC2,$IN01_0,${R2}[0] + and x5,x9,#0x03ffffff + umlal $ACC0,$IN01_0,${R0}[0] + ubfx x6,x8,#26,#26 + umlal $ACC1,$IN01_0,${R1}[0] + ubfx x7,x9,#26,#26 + + add $IN01_3,$IN01_3,$H3 + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + umlal $ACC3,$IN01_1,${R2}[0] + extr x8,x12,x8,#52 + umlal $ACC4,$IN01_1,${R3}[0] + extr x9,x13,x9,#52 + umlal $ACC0,$IN01_1,${S4}[0] + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + umlal $ACC2,$IN01_1,${R1}[0] + fmov $IN01_0,x4 + umlal $ACC1,$IN01_1,${R0}[0] + and x8,x8,#0x03ffffff + + add $IN01_4,$IN01_4,$H4 + and x9,x9,#0x03ffffff + umlal $ACC3,$IN01_3,${R0}[0] + ubfx x10,x12,#14,#26 + umlal $ACC0,$IN01_3,${S2}[0] + ubfx x11,x13,#14,#26 + umlal $ACC4,$IN01_3,${R1}[0] + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + umlal $ACC1,$IN01_3,${S3}[0] + fmov $IN01_1,x6 + umlal $ACC2,$IN01_3,${S4}[0] + add x12,$padbit,x12,lsr#40 + + umlal $ACC3,$IN01_4,${S4}[0] + add x13,$padbit,x13,lsr#40 + umlal $ACC0,$IN01_4,${S1}[0] + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + umlal $ACC4,$IN01_4,${R0}[0] + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + umlal $ACC1,$IN01_4,${S2}[0] + fmov $IN01_2,x8 + umlal $ACC2,$IN01_4,${S3}[0] + fmov $IN01_3,x10 + fmov $IN01_4,x12 + + ///////////////////////////////////////////////////////////////// + // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein + // and P. Schwabe + // + // [see discussion in poly1305-armv4 module] + + ushr $T0.2d,$ACC3,#26 + xtn $H3,$ACC3 + ushr $T1.2d,$ACC0,#26 + and $ACC0,$ACC0,$MASK.2d + add $ACC4,$ACC4,$T0.2d // h3 -> h4 + bic $H3,#0xfc,lsl#24 // &=0x03ffffff + add $ACC1,$ACC1,$T1.2d // h0 -> h1 + + ushr $T0.2d,$ACC4,#26 + xtn $H4,$ACC4 + ushr $T1.2d,$ACC1,#26 + xtn $H1,$ACC1 + bic $H4,#0xfc,lsl#24 + add $ACC2,$ACC2,$T1.2d // h1 -> h2 + + add $ACC0,$ACC0,$T0.2d + shl $T0.2d,$T0.2d,#2 + shrn $T1.2s,$ACC2,#26 + xtn $H2,$ACC2 + add $ACC0,$ACC0,$T0.2d // h4 -> h0 + bic $H1,#0xfc,lsl#24 + add $H3,$H3,$T1.2s // h2 -> h3 + bic $H2,#0xfc,lsl#24 + + shrn $T0.2s,$ACC0,#26 + xtn $H0,$ACC0 + ushr $T1.2s,$H3,#26 + bic $H3,#0xfc,lsl#24 + bic $H0,#0xfc,lsl#24 + add $H1,$H1,$T0.2s // h0 -> h1 + add $H4,$H4,$T1.2s // h3 -> h4 + + b.hi .Loop_neon + +.Lskip_loop: + dup $IN23_2,${IN23_2}[0] + add $IN01_2,$IN01_2,$H2 + + //////////////////////////////////////////////////////////////// + // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 + + adds $len,$len,#32 + b.ne .Long_tail + + dup $IN23_2,${IN01_2}[0] + add $IN23_0,$IN01_0,$H0 + add $IN23_3,$IN01_3,$H3 + add $IN23_1,$IN01_1,$H1 + add $IN23_4,$IN01_4,$H4 + +.Long_tail: + dup $IN23_0,${IN23_0}[0] + umull2 $ACC0,$IN23_2,${S3} + umull2 $ACC3,$IN23_2,${R1} + umull2 $ACC4,$IN23_2,${R2} + umull2 $ACC2,$IN23_2,${R0} + umull2 $ACC1,$IN23_2,${S4} + + dup $IN23_1,${IN23_1}[0] + umlal2 $ACC0,$IN23_0,${R0} + umlal2 $ACC2,$IN23_0,${R2} + umlal2 $ACC3,$IN23_0,${R3} + umlal2 $ACC4,$IN23_0,${R4} + umlal2 $ACC1,$IN23_0,${R1} + + dup $IN23_3,${IN23_3}[0] + umlal2 $ACC0,$IN23_1,${S4} + umlal2 $ACC3,$IN23_1,${R2} + umlal2 $ACC2,$IN23_1,${R1} + umlal2 $ACC4,$IN23_1,${R3} + umlal2 $ACC1,$IN23_1,${R0} + + dup $IN23_4,${IN23_4}[0] + umlal2 $ACC3,$IN23_3,${R0} + umlal2 $ACC4,$IN23_3,${R1} + umlal2 $ACC0,$IN23_3,${S2} + umlal2 $ACC1,$IN23_3,${S3} + umlal2 $ACC2,$IN23_3,${S4} + + umlal2 $ACC3,$IN23_4,${S4} + umlal2 $ACC0,$IN23_4,${S1} + umlal2 $ACC4,$IN23_4,${R0} + umlal2 $ACC1,$IN23_4,${S2} + umlal2 $ACC2,$IN23_4,${S3} + + b.eq .Lshort_tail + + //////////////////////////////////////////////////////////////// + // (hash+inp[0:1])*r^4:r^3 and accumulate + + add $IN01_0,$IN01_0,$H0 + umlal $ACC3,$IN01_2,${R1} + umlal $ACC0,$IN01_2,${S3} + umlal $ACC4,$IN01_2,${R2} + umlal $ACC1,$IN01_2,${S4} + umlal $ACC2,$IN01_2,${R0} + + add $IN01_1,$IN01_1,$H1 + umlal $ACC3,$IN01_0,${R3} + umlal $ACC0,$IN01_0,${R0} + umlal $ACC4,$IN01_0,${R4} + umlal $ACC1,$IN01_0,${R1} + umlal $ACC2,$IN01_0,${R2} + + add $IN01_3,$IN01_3,$H3 + umlal $ACC3,$IN01_1,${R2} + umlal $ACC0,$IN01_1,${S4} + umlal $ACC4,$IN01_1,${R3} + umlal $ACC1,$IN01_1,${R0} + umlal $ACC2,$IN01_1,${R1} + + add $IN01_4,$IN01_4,$H4 + umlal $ACC3,$IN01_3,${R0} + umlal $ACC0,$IN01_3,${S2} + umlal $ACC4,$IN01_3,${R1} + umlal $ACC1,$IN01_3,${S3} + umlal $ACC2,$IN01_3,${S4} + + umlal $ACC3,$IN01_4,${S4} + umlal $ACC0,$IN01_4,${S1} + umlal $ACC4,$IN01_4,${R0} + umlal $ACC1,$IN01_4,${S2} + umlal $ACC2,$IN01_4,${S3} + +.Lshort_tail: + //////////////////////////////////////////////////////////////// + // horizontal add + + addp $ACC3,$ACC3,$ACC3 + ldp d8,d9,[sp,#16] // meet ABI requirements + addp $ACC0,$ACC0,$ACC0 + ldp d10,d11,[sp,#32] + addp $ACC4,$ACC4,$ACC4 + ldp d12,d13,[sp,#48] + addp $ACC1,$ACC1,$ACC1 + ldp d14,d15,[sp,#64] + addp $ACC2,$ACC2,$ACC2 + + //////////////////////////////////////////////////////////////// + // lazy reduction, but without narrowing + + ushr $T0.2d,$ACC3,#26 + and $ACC3,$ACC3,$MASK.2d + ushr $T1.2d,$ACC0,#26 + and $ACC0,$ACC0,$MASK.2d + + add $ACC4,$ACC4,$T0.2d // h3 -> h4 + add $ACC1,$ACC1,$T1.2d // h0 -> h1 + + ushr $T0.2d,$ACC4,#26 + and $ACC4,$ACC4,$MASK.2d + ushr $T1.2d,$ACC1,#26 + and $ACC1,$ACC1,$MASK.2d + add $ACC2,$ACC2,$T1.2d // h1 -> h2 + + add $ACC0,$ACC0,$T0.2d + shl $T0.2d,$T0.2d,#2 + ushr $T1.2d,$ACC2,#26 + and $ACC2,$ACC2,$MASK.2d + add $ACC0,$ACC0,$T0.2d // h4 -> h0 + add $ACC3,$ACC3,$T1.2d // h2 -> h3 + + ushr $T0.2d,$ACC0,#26 + and $ACC0,$ACC0,$MASK.2d + ushr $T1.2d,$ACC3,#26 + and $ACC3,$ACC3,$MASK.2d + add $ACC1,$ACC1,$T0.2d // h0 -> h1 + add $ACC4,$ACC4,$T1.2d // h3 -> h4 + + //////////////////////////////////////////////////////////////// + // write the result, can be partially reduced + + st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16 + st1 {$ACC4}[0],[$ctx] + +.Lno_data_neon: + ldr x29,[sp],#80 + ret +.size poly1305_blocks_neon,.-poly1305_blocks_neon + +.type poly1305_emit_neon,%function +.align 5 +poly1305_emit_neon: + ldr $is_base2_26,[$ctx,#24] + cbz $is_base2_26,poly1305_emit + + ldp w10,w11,[$ctx] // load hash value base 2^26 + ldp w12,w13,[$ctx,#8] + ldr w14,[$ctx,#16] + + add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64 + lsr $h1,x12,#12 + adds $h0,$h0,x12,lsl#52 + add $h1,$h1,x13,lsl#14 + adc $h1,$h1,xzr + lsr $h2,x14,#24 + adds $h1,$h1,x14,lsl#40 + adc $h2,$h2,xzr // can be partially reduced... + + ldp $t0,$t1,[$nonce] // load nonce + + and $d0,$h2,#-4 // ... so reduce + add $d0,$d0,$h2,lsr#2 + and $h2,$h2,#3 + adds $h0,$h0,$d0 + adcs $h1,$h1,xzr + adc $h2,$h2,xzr + + adds $d0,$h0,#5 // compare to modulus + adcs $d1,$h1,xzr + adc $d2,$h2,xzr + + tst $d2,#-4 // see if it's carried/borrowed + + csel $h0,$h0,$d0,eq + csel $h1,$h1,$d1,eq + +#ifdef __AARCH64EB__ + ror $t0,$t0,#32 // flip nonce words + ror $t1,$t1,#32 +#endif + adds $h0,$h0,$t0 // accumulate nonce + adc $h1,$h1,$t1 +#ifdef __AARCH64EB__ + rev $h0,$h0 // flip output bytes + rev $h1,$h1 +#endif + stp $h0,$h1,[$mac] // write result + + ret +.size poly1305_emit_neon,.-poly1305_emit_neon +#endif + +.align 5 +.Lzeros: +.long 0,0,0,0,0,0,0,0 +#ifndef __KERNEL__ +.LOPENSSL_armcap_P: +#ifdef __ILP32__ +.long OPENSSL_armcap_P-. +#else +.quad OPENSSL_armcap_P-. +#endif +#endif +.align 2 +___ + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or + s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or + (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or + (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or + (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or + (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or + (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1)); + + s/\.[124]([sd])\[/.$1\[/; + + print $_,"\n"; +} +close STDOUT; diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c b/net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c new file mode 100644 index 000000000000..527ccc3b59cc --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is based in part on Andrew Moon's poly1305-donna, which is in the + * public domain. + */ + +struct poly1305_internal { + u32 h[5]; + u32 r[5]; + u32 s[4]; +}; + +static void poly1305_init_generic(void *ctx, const u8 key[16]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + st->r[0] = (get_unaligned_le32(&key[0])) & 0x3ffffff; + st->r[1] = (get_unaligned_le32(&key[3]) >> 2) & 0x3ffff03; + st->r[2] = (get_unaligned_le32(&key[6]) >> 4) & 0x3ffc0ff; + st->r[3] = (get_unaligned_le32(&key[9]) >> 6) & 0x3f03fff; + st->r[4] = (get_unaligned_le32(&key[12]) >> 8) & 0x00fffff; + + /* s = 5*r */ + st->s[0] = st->r[1] * 5; + st->s[1] = st->r[2] * 5; + st->s[2] = st->r[3] * 5; + st->s[3] = st->r[4] * 5; + + /* h = 0 */ + st->h[0] = 0; + st->h[1] = 0; + st->h[2] = 0; + st->h[3] = 0; + st->h[4] = 0; +} + +static void poly1305_blocks_generic(void *ctx, const u8 *input, size_t len, + const u32 padbit) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + const u32 hibit = padbit << 24; + u32 r0, r1, r2, r3, r4; + u32 s1, s2, s3, s4; + u32 h0, h1, h2, h3, h4; + u64 d0, d1, d2, d3, d4; + u32 c; + + r0 = st->r[0]; + r1 = st->r[1]; + r2 = st->r[2]; + r3 = st->r[3]; + r4 = st->r[4]; + + s1 = st->s[0]; + s2 = st->s[1]; + s3 = st->s[2]; + s4 = st->s[3]; + + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + h3 = st->h[3]; + h4 = st->h[4]; + + while (len >= POLY1305_BLOCK_SIZE) { + /* h += m[i] */ + h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff; + h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff; + h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff; + h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff; + h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit; + + /* h *= r */ + d0 = ((u64)h0 * r0) + ((u64)h1 * s4) + + ((u64)h2 * s3) + ((u64)h3 * s2) + + ((u64)h4 * s1); + d1 = ((u64)h0 * r1) + ((u64)h1 * r0) + + ((u64)h2 * s4) + ((u64)h3 * s3) + + ((u64)h4 * s2); + d2 = ((u64)h0 * r2) + ((u64)h1 * r1) + + ((u64)h2 * r0) + ((u64)h3 * s4) + + ((u64)h4 * s3); + d3 = ((u64)h0 * r3) + ((u64)h1 * r2) + + ((u64)h2 * r1) + ((u64)h3 * r0) + + ((u64)h4 * s4); + d4 = ((u64)h0 * r4) + ((u64)h1 * r3) + + ((u64)h2 * r2) + ((u64)h3 * r1) + + ((u64)h4 * r0); + + /* (partial) h %= p */ + c = (u32)(d0 >> 26); + h0 = (u32)d0 & 0x3ffffff; + d1 += c; + c = (u32)(d1 >> 26); + h1 = (u32)d1 & 0x3ffffff; + d2 += c; + c = (u32)(d2 >> 26); + h2 = (u32)d2 & 0x3ffffff; + d3 += c; + c = (u32)(d3 >> 26); + h3 = (u32)d3 & 0x3ffffff; + d4 += c; + c = (u32)(d4 >> 26); + h4 = (u32)d4 & 0x3ffffff; + h0 += c * 5; + c = (h0 >> 26); + h0 = h0 & 0x3ffffff; + h1 += c; + + input += POLY1305_BLOCK_SIZE; + len -= POLY1305_BLOCK_SIZE; + } + + st->h[0] = h0; + st->h[1] = h1; + st->h[2] = h2; + st->h[3] = h3; + st->h[4] = h4; +} + +static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + u32 h0, h1, h2, h3, h4, c; + u32 g0, g1, g2, g3, g4; + u64 f; + u32 mask; + + /* fully carry h */ + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + h3 = st->h[3]; + h4 = st->h[4]; + + c = h1 >> 26; + h1 = h1 & 0x3ffffff; + h2 += c; + c = h2 >> 26; + h2 = h2 & 0x3ffffff; + h3 += c; + c = h3 >> 26; + h3 = h3 & 0x3ffffff; + h4 += c; + c = h4 >> 26; + h4 = h4 & 0x3ffffff; + h0 += c * 5; + c = h0 >> 26; + h0 = h0 & 0x3ffffff; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; + c = g0 >> 26; + g0 &= 0x3ffffff; + g1 = h1 + c; + c = g1 >> 26; + g1 &= 0x3ffffff; + g2 = h2 + c; + c = g2 >> 26; + g2 &= 0x3ffffff; + g3 = h3 + c; + c = g3 >> 26; + g3 &= 0x3ffffff; + g4 = h4 + c - (1UL << 26); + + /* select h if h < p, or h + -p if h >= p */ + mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; + g0 &= mask; + g1 &= mask; + g2 &= mask; + g3 &= mask; + g4 &= mask; + mask = ~mask; + + h0 = (h0 & mask) | g0; + h1 = (h1 & mask) | g1; + h2 = (h2 & mask) | g2; + h3 = (h3 & mask) | g3; + h4 = (h4 & mask) | g4; + + /* h = h % (2^128) */ + h0 = ((h0) | (h1 << 26)) & 0xffffffff; + h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff; + h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff; + h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff; + + /* mac = (h + nonce) % (2^128) */ + f = (u64)h0 + nonce[0]; + h0 = (u32)f; + f = (u64)h1 + nonce[1] + (f >> 32); + h1 = (u32)f; + f = (u64)h2 + nonce[2] + (f >> 32); + h2 = (u32)f; + f = (u64)h3 + nonce[3] + (f >> 32); + h3 = (u32)f; + + put_unaligned_le32(h0, &mac[0]); + put_unaligned_le32(h1, &mac[4]); + put_unaligned_le32(h2, &mac[8]); + put_unaligned_le32(h3, &mac[12]); +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c b/net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c new file mode 100644 index 000000000000..131f1dda1b1d --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is based in part on Andrew Moon's poly1305-donna, which is in the + * public domain. + */ + +typedef __uint128_t u128; + +struct poly1305_internal { + u64 r[3]; + u64 h[3]; + u64 s[2]; +}; + +static void poly1305_init_generic(void *ctx, const u8 key[16]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + u64 t0, t1; + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + t0 = get_unaligned_le64(&key[0]); + t1 = get_unaligned_le64(&key[8]); + + st->r[0] = t0 & 0xffc0fffffffULL; + st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL; + st->r[2] = ((t1 >> 24)) & 0x00ffffffc0fULL; + + /* s = 20*r */ + st->s[0] = st->r[1] * 20; + st->s[1] = st->r[2] * 20; + + /* h = 0 */ + st->h[0] = 0; + st->h[1] = 0; + st->h[2] = 0; +} + +static void poly1305_blocks_generic(void *ctx, const u8 *input, size_t len, + const u32 padbit) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + const u64 hibit = ((u64)padbit) << 40; + u64 r0, r1, r2; + u64 s1, s2; + u64 h0, h1, h2; + u64 c; + u128 d0, d1, d2, d; + + r0 = st->r[0]; + r1 = st->r[1]; + r2 = st->r[2]; + + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + + s1 = st->s[0]; + s2 = st->s[1]; + + while (len >= POLY1305_BLOCK_SIZE) { + u64 t0, t1; + + /* h += m[i] */ + t0 = get_unaligned_le64(&input[0]); + t1 = get_unaligned_le64(&input[8]); + + h0 += t0 & 0xfffffffffffULL; + h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL; + h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit; + + /* h *= r */ + d0 = (u128)h0 * r0; + d = (u128)h1 * s2; + d0 += d; + d = (u128)h2 * s1; + d0 += d; + d1 = (u128)h0 * r1; + d = (u128)h1 * r0; + d1 += d; + d = (u128)h2 * s2; + d1 += d; + d2 = (u128)h0 * r2; + d = (u128)h1 * r1; + d2 += d; + d = (u128)h2 * r0; + d2 += d; + + /* (partial) h %= p */ + c = (u64)(d0 >> 44); + h0 = (u64)d0 & 0xfffffffffffULL; + d1 += c; + c = (u64)(d1 >> 44); + h1 = (u64)d1 & 0xfffffffffffULL; + d2 += c; + c = (u64)(d2 >> 42); + h2 = (u64)d2 & 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 = h0 & 0xfffffffffffULL; + h1 += c; + + input += POLY1305_BLOCK_SIZE; + len -= POLY1305_BLOCK_SIZE; + } + + st->h[0] = h0; + st->h[1] = h1; + st->h[2] = h2; +} + +static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + u64 h0, h1, h2, c; + u64 g0, g1, g2; + u64 t0, t1; + + /* fully carry h */ + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += c; + c = h2 >> 42; + h2 &= 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += c; + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += c; + c = h2 >> 42; + h2 &= 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; + c = g0 >> 44; + g0 &= 0xfffffffffffULL; + g1 = h1 + c; + c = g1 >> 44; + g1 &= 0xfffffffffffULL; + g2 = h2 + c - (1ULL << 42); + + /* select h if h < p, or h + -p if h >= p */ + c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1; + g0 &= c; + g1 &= c; + g2 &= c; + c = ~c; + h0 = (h0 & c) | g0; + h1 = (h1 & c) | g1; + h2 = (h2 & c) | g2; + + /* h = (h + nonce) */ + t0 = ((u64)nonce[1] << 32) | nonce[0]; + t1 = ((u64)nonce[3] << 32) | nonce[2]; + + h0 += t0 & 0xfffffffffffULL; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c; + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c; + h2 &= 0x3ffffffffffULL; + + /* mac = h % (2^128) */ + h0 = h0 | (h1 << 44); + h1 = (h1 >> 20) | (h2 << 24); + + put_unaligned_le64(h0, &mac[0]); + put_unaligned_le64(h1, &mac[8]); +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c b/net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c new file mode 100644 index 000000000000..a540e9c4eee8 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +asmlinkage void poly1305_init_mips(void *ctx, const u8 key[16]); +asmlinkage void poly1305_blocks_mips(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_emit_mips(void *ctx, u8 mac[16], const u32 nonce[4]); + +static bool *const poly1305_nobs[] __initconst = { }; +static void __init poly1305_fpu_init(void) +{ +} + +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + poly1305_init_mips(ctx, key); + return true; +} + +static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + poly1305_blocks_mips(ctx, inp, len, padbit); + return true; +} + +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + poly1305_emit_mips(ctx, mac, nonce); + return true; +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-mips.S b/net/wireguard/crypto/zinc/poly1305/poly1305-mips.S new file mode 100644 index 000000000000..4291c156815b --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-mips.S @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2016-2018 René van Dorst All Rights Reserved. + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define MSB 0 +#define LSB 3 +#else +#define MSB 3 +#define LSB 0 +#endif + +#define POLY1305_BLOCK_SIZE 16 +.text +#define H0 $t0 +#define H1 $t1 +#define H2 $t2 +#define H3 $t3 +#define H4 $t4 + +#define R0 $t5 +#define R1 $t6 +#define R2 $t7 +#define R3 $t8 + +#define O0 $s0 +#define O1 $s4 +#define O2 $v1 +#define O3 $t9 +#define O4 $s5 + +#define S1 $s1 +#define S2 $s2 +#define S3 $s3 + +#define SC $at +#define CA $v0 + +/* Input arguments */ +#define poly $a0 +#define src $a1 +#define srclen $a2 +#define hibit $a3 + +/* Location in the opaque buffer + * R[0..3], CA, H[0..4] + */ +#define PTR_POLY1305_R(n) ( 0 + (n*4)) ## ($a0) +#define PTR_POLY1305_CA (16 ) ## ($a0) +#define PTR_POLY1305_H(n) (20 + (n*4)) ## ($a0) + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_STACK_SIZE 32 + +.set noat +.align 4 +.globl poly1305_blocks_mips +.ent poly1305_blocks_mips +poly1305_blocks_mips: + .frame $sp, POLY1305_STACK_SIZE, $ra + /* srclen &= 0xFFFFFFF0 */ + ins srclen, $zero, 0, 4 + + addiu $sp, -(POLY1305_STACK_SIZE) + + /* check srclen >= 16 bytes */ + beqz srclen, .Lpoly1305_blocks_mips_end + + /* Calculate last round based on src address pointer. + * last round src ptr (srclen) = src + (srclen & 0xFFFFFFF0) + */ + addu srclen, src + + lw R0, PTR_POLY1305_R(0) + lw R1, PTR_POLY1305_R(1) + lw R2, PTR_POLY1305_R(2) + lw R3, PTR_POLY1305_R(3) + + /* store the used save registers. */ + sw $s0, 0($sp) + sw $s1, 4($sp) + sw $s2, 8($sp) + sw $s3, 12($sp) + sw $s4, 16($sp) + sw $s5, 20($sp) + + /* load Hx and Carry */ + lw CA, PTR_POLY1305_CA + lw H0, PTR_POLY1305_H(0) + lw H1, PTR_POLY1305_H(1) + lw H2, PTR_POLY1305_H(2) + lw H3, PTR_POLY1305_H(3) + lw H4, PTR_POLY1305_H(4) + + /* Sx = Rx + (Rx >> 2) */ + srl S1, R1, 2 + srl S2, R2, 2 + srl S3, R3, 2 + addu S1, R1 + addu S2, R2 + addu S3, R3 + + addiu SC, $zero, 1 + +.Lpoly1305_loop: + lwl O0, 0+MSB(src) + lwl O1, 4+MSB(src) + lwl O2, 8+MSB(src) + lwl O3,12+MSB(src) + lwr O0, 0+LSB(src) + lwr O1, 4+LSB(src) + lwr O2, 8+LSB(src) + lwr O3,12+LSB(src) + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + wsbh O0 + wsbh O1 + wsbh O2 + wsbh O3 + rotr O0, 16 + rotr O1, 16 + rotr O2, 16 + rotr O3, 16 +#endif + + /* h0 = (u32)(d0 = (u64)h0 + inp[0] + c 'Carry_previous cycle'); */ + addu H0, CA + sltu CA, H0, CA + addu O0, H0 + sltu H0, O0, H0 + addu CA, H0 + + /* h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + inp[4]); */ + addu H1, CA + sltu CA, H1, CA + addu O1, H1 + sltu H1, O1, H1 + addu CA, H1 + + /* h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + inp[8]); */ + addu H2, CA + sltu CA, H2, CA + addu O2, H2 + sltu H2, O2, H2 + addu CA, H2 + + /* h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + inp[12]); */ + addu H3, CA + sltu CA, H3, CA + addu O3, H3 + sltu H3, O3, H3 + addu CA, H3 + + /* h4 += (u32)(d3 >> 32) + padbit; */ + addu H4, hibit + addu O4, H4, CA + + /* D0 */ + multu O0, R0 + maddu O1, S3 + maddu O2, S2 + maddu O3, S1 + mfhi CA + mflo H0 + + /* D1 */ + multu O0, R1 + maddu O1, R0 + maddu O2, S3 + maddu O3, S2 + maddu O4, S1 + maddu CA, SC + mfhi CA + mflo H1 + + /* D2 */ + multu O0, R2 + maddu O1, R1 + maddu O2, R0 + maddu O3, S3 + maddu O4, S2 + maddu CA, SC + mfhi CA + mflo H2 + + /* D4 */ + mul H4, O4, R0 + + /* D3 */ + multu O0, R3 + maddu O1, R2 + maddu O2, R1 + maddu O3, R0 + maddu O4, S3 + maddu CA, SC + mfhi CA + mflo H3 + + addiu src, POLY1305_BLOCK_SIZE + + /* h4 += (u32)(d3 >> 32); */ + addu O4, H4, CA + /* h4 &= 3 */ + andi H4, O4, 3 + /* c = (h4 >> 2) + (h4 & ~3U); */ + srl CA, O4, 2 + ins O4, $zero, 0, 2 + + addu CA, O4 + + /* able to do a 16 byte block. */ + bne src, srclen, .Lpoly1305_loop + + /* restore the used save registers. */ + lw $s0, 0($sp) + lw $s1, 4($sp) + lw $s2, 8($sp) + lw $s3, 12($sp) + lw $s4, 16($sp) + lw $s5, 20($sp) + + /* store Hx and Carry */ + sw CA, PTR_POLY1305_CA + sw H0, PTR_POLY1305_H(0) + sw H1, PTR_POLY1305_H(1) + sw H2, PTR_POLY1305_H(2) + sw H3, PTR_POLY1305_H(3) + sw H4, PTR_POLY1305_H(4) + +.Lpoly1305_blocks_mips_end: + addiu $sp, POLY1305_STACK_SIZE + + /* Jump Back */ + jr $ra +.end poly1305_blocks_mips +.set at + +/* Input arguments CTX=$a0, MAC=$a1, NONCE=$a2 */ +#define MAC $a1 +#define NONCE $a2 + +#define G0 $t5 +#define G1 $t6 +#define G2 $t7 +#define G3 $t8 +#define G4 $t9 + +.set noat +.align 4 +.globl poly1305_emit_mips +.ent poly1305_emit_mips +poly1305_emit_mips: + /* load Hx and Carry */ + lw CA, PTR_POLY1305_CA + lw H0, PTR_POLY1305_H(0) + lw H1, PTR_POLY1305_H(1) + lw H2, PTR_POLY1305_H(2) + lw H3, PTR_POLY1305_H(3) + lw H4, PTR_POLY1305_H(4) + + /* Add left over carry */ + addu H0, CA + sltu CA, H0, CA + addu H1, CA + sltu CA, H1, CA + addu H2, CA + sltu CA, H2, CA + addu H3, CA + sltu CA, H3, CA + addu H4, CA + + /* compare to modulus by computing h + -p */ + addiu G0, H0, 5 + sltu CA, G0, H0 + addu G1, H1, CA + sltu CA, G1, H1 + addu G2, H2, CA + sltu CA, G2, H2 + addu G3, H3, CA + sltu CA, G3, H3 + addu G4, H4, CA + + srl SC, G4, 2 + + /* if there was carry into 131st bit, h3:h0 = g3:g0 */ + movn H0, G0, SC + movn H1, G1, SC + movn H2, G2, SC + movn H3, G3, SC + + lwl G0, 0+MSB(NONCE) + lwl G1, 4+MSB(NONCE) + lwl G2, 8+MSB(NONCE) + lwl G3,12+MSB(NONCE) + lwr G0, 0+LSB(NONCE) + lwr G1, 4+LSB(NONCE) + lwr G2, 8+LSB(NONCE) + lwr G3,12+LSB(NONCE) + + /* mac = (h + nonce) % (2^128) */ + addu H0, G0 + sltu CA, H0, G0 + + /* H1 */ + addu H1, CA + sltu CA, H1, CA + addu H1, G1 + sltu G1, H1, G1 + addu CA, G1 + + /* H2 */ + addu H2, CA + sltu CA, H2, CA + addu H2, G2 + sltu G2, H2, G2 + addu CA, G2 + + /* H3 */ + addu H3, CA + addu H3, G3 + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + wsbh H0 + wsbh H1 + wsbh H2 + wsbh H3 + rotr H0, 16 + rotr H1, 16 + rotr H2, 16 + rotr H3, 16 +#endif + + /* store MAC */ + swl H0, 0+MSB(MAC) + swl H1, 4+MSB(MAC) + swl H2, 8+MSB(MAC) + swl H3,12+MSB(MAC) + swr H0, 0+LSB(MAC) + swr H1, 4+LSB(MAC) + swr H2, 8+LSB(MAC) + swr H3,12+LSB(MAC) + + jr $ra +.end poly1305_emit_mips + +#define PR0 $t0 +#define PR1 $t1 +#define PR2 $t2 +#define PR3 $t3 +#define PT0 $t4 + +/* Input arguments CTX=$a0, KEY=$a1 */ + +.align 4 +.globl poly1305_init_mips +.ent poly1305_init_mips +poly1305_init_mips: + lwl PR0, 0+MSB($a1) + lwl PR1, 4+MSB($a1) + lwl PR2, 8+MSB($a1) + lwl PR3,12+MSB($a1) + lwr PR0, 0+LSB($a1) + lwr PR1, 4+LSB($a1) + lwr PR2, 8+LSB($a1) + lwr PR3,12+LSB($a1) + + /* store Hx and Carry */ + sw $zero, PTR_POLY1305_CA + sw $zero, PTR_POLY1305_H(0) + sw $zero, PTR_POLY1305_H(1) + sw $zero, PTR_POLY1305_H(2) + sw $zero, PTR_POLY1305_H(3) + sw $zero, PTR_POLY1305_H(4) + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + wsbh PR0 + wsbh PR1 + wsbh PR2 + wsbh PR3 + rotr PR0, 16 + rotr PR1, 16 + rotr PR2, 16 + rotr PR3, 16 +#endif + + lui PT0, 0x0FFF + ori PT0, 0xFFFC + + /* AND 0x0fffffff; */ + ext PR0, PR0, 0, (32-4) + + /* AND 0x0ffffffc; */ + and PR1, PT0 + and PR2, PT0 + and PR3, PT0 + + /* store Rx */ + sw PR0, PTR_POLY1305_R(0) + sw PR1, PTR_POLY1305_R(1) + sw PR2, PTR_POLY1305_R(2) + sw PR3, PTR_POLY1305_R(3) + + /* Jump Back */ + jr $ra +.end poly1305_init_mips diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl new file mode 100644 index 000000000000..d30a03d79177 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl @@ -0,0 +1,467 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# Poly1305 hash for MIPS64. +# +# May 2016 +# +# Numbers are cycles per processed byte with poly1305_blocks alone. +# +# IALU/gcc +# R1x000 5.64/+120% (big-endian) +# Octeon II 3.80/+280% (little-endian) + +###################################################################### +# There is a number of MIPS ABI in use, O32 and N32/64 are most +# widely used. Then there is a new contender: NUBI. It appears that if +# one picks the latter, it's possible to arrange code in ABI neutral +# manner. Therefore let's stick to NUBI register layout: +# +($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25)); +($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); +($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31)); +# +# The return value is placed in $a0. Following coding rules facilitate +# interoperability: +# +# - never ever touch $tp, "thread pointer", former $gp [o32 can be +# excluded from the rule, because it's specified volatile]; +# - copy return value to $t0, former $v0 [or to $a0 if you're adapting +# old code]; +# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary; +# +# For reference here is register layout for N32/64 MIPS ABIs: +# +# ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); +# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); +# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); +# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); +# +# +# +###################################################################### + +$flavour = shift || "64"; # supported flavours are o32,n32,64,nubi32,nubi64 + +die "MIPS64 only" unless ($flavour =~ /64|n32/i); + +$v0 = ($flavour =~ /nubi/i) ? $a0 : $t0; +$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x0003f000" : "0x00030000"; + +($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3); +($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1); + +$code.=<<___; +#if (defined(_MIPS_ARCH_MIPS64R3) || defined(_MIPS_ARCH_MIPS64R5) || \\ + defined(_MIPS_ARCH_MIPS64R6)) \\ + && !defined(_MIPS_ARCH_MIPS64R2) +# define _MIPS_ARCH_MIPS64R2 +#endif + +#if defined(_MIPS_ARCH_MIPS64R6) +# define dmultu(rs,rt) +# define mflo(rd,rs,rt) dmulu rd,rs,rt +# define mfhi(rd,rs,rt) dmuhu rd,rs,rt +#else +# define dmultu(rs,rt) dmultu rs,rt +# define mflo(rd,rs,rt) mflo rd +# define mfhi(rd,rs,rt) mfhi rd +#endif + +#ifdef __KERNEL__ +# define poly1305_init poly1305_init_mips +# define poly1305_blocks poly1305_blocks_mips +# define poly1305_emit poly1305_emit_mips +#endif + +#if defined(__MIPSEB__) && !defined(MIPSEB) +# define MIPSEB +#endif + +#ifdef MIPSEB +# define MSB 0 +# define LSB 7 +#else +# define MSB 7 +# define LSB 0 +#endif + +.text +.set noat +.set noreorder + +.align 5 +.globl poly1305_init +.ent poly1305_init +poly1305_init: + .frame $sp,0,$ra + .set reorder + + sd $zero,0($ctx) + sd $zero,8($ctx) + sd $zero,16($ctx) + + beqz $inp,.Lno_key + +#if defined(_MIPS_ARCH_MIPS64R6) + ld $in0,0($inp) + ld $in1,8($inp) +#else + ldl $in0,0+MSB($inp) + ldl $in1,8+MSB($inp) + ldr $in0,0+LSB($inp) + ldr $in1,8+LSB($inp) +#endif +#ifdef MIPSEB +# if defined(_MIPS_ARCH_MIPS64R2) + dsbh $in0,$in0 # byte swap + dsbh $in1,$in1 + dshd $in0,$in0 + dshd $in1,$in1 +# else + ori $tmp0,$zero,0xFF + dsll $tmp2,$tmp0,32 + or $tmp0,$tmp2 # 0x000000FF000000FF + + and $tmp1,$in0,$tmp0 # byte swap + and $tmp3,$in1,$tmp0 + dsrl $tmp2,$in0,24 + dsrl $tmp4,$in1,24 + dsll $tmp1,24 + dsll $tmp3,24 + and $tmp2,$tmp0 + and $tmp4,$tmp0 + dsll $tmp0,8 # 0x0000FF000000FF00 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + and $tmp2,$in0,$tmp0 + and $tmp4,$in1,$tmp0 + dsrl $in0,8 + dsrl $in1,8 + dsll $tmp2,8 + dsll $tmp4,8 + and $in0,$tmp0 + and $in1,$tmp0 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + or $in0,$tmp1 + or $in1,$tmp3 + dsrl $tmp1,$in0,32 + dsrl $tmp3,$in1,32 + dsll $in0,32 + dsll $in1,32 + or $in0,$tmp1 + or $in1,$tmp3 +# endif +#endif + li $tmp0,1 + dsll $tmp0,32 + daddiu $tmp0,-63 + dsll $tmp0,28 + daddiu $tmp0,-1 # 0ffffffc0fffffff + + and $in0,$tmp0 + daddiu $tmp0,-3 # 0ffffffc0ffffffc + and $in1,$tmp0 + + sd $in0,24($ctx) + dsrl $tmp0,$in1,2 + sd $in1,32($ctx) + daddu $tmp0,$in1 # s1 = r1 + (r1 >> 2) + sd $tmp0,40($ctx) + +.Lno_key: + li $v0,0 # return 0 + jr $ra +.end poly1305_init +___ +{ +my ($h0,$h1,$h2,$r0,$r1,$s1,$d0,$d1,$d2) = + ($s0,$s1,$s2,$s3,$s4,$s5,$in0,$in1,$t2); + +$code.=<<___; +.align 5 +.globl poly1305_blocks +.ent poly1305_blocks +poly1305_blocks: + .set noreorder + dsrl $len,4 # number of complete blocks + bnez $len,poly1305_blocks_internal + nop + jr $ra + nop +.end poly1305_blocks + +.align 5 +.ent poly1305_blocks_internal +poly1305_blocks_internal: + .frame $sp,6*8,$ra + .mask $SAVED_REGS_MASK,-8 + .set noreorder + dsubu $sp,6*8 + sd $s5,40($sp) + sd $s4,32($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue + sd $s3,24($sp) + sd $s2,16($sp) + sd $s1,8($sp) + sd $s0,0($sp) +___ +$code.=<<___; + .set reorder + + ld $h0,0($ctx) # load hash value + ld $h1,8($ctx) + ld $h2,16($ctx) + + ld $r0,24($ctx) # load key + ld $r1,32($ctx) + ld $s1,40($ctx) + +.Loop: +#if defined(_MIPS_ARCH_MIPS64R6) + ld $in0,0($inp) # load input + ld $in1,8($inp) +#else + ldl $in0,0+MSB($inp) # load input + ldl $in1,8+MSB($inp) + ldr $in0,0+LSB($inp) + ldr $in1,8+LSB($inp) +#endif + daddiu $len,-1 + daddiu $inp,16 +#ifdef MIPSEB +# if defined(_MIPS_ARCH_MIPS64R2) + dsbh $in0,$in0 # byte swap + dsbh $in1,$in1 + dshd $in0,$in0 + dshd $in1,$in1 +# else + ori $tmp0,$zero,0xFF + dsll $tmp2,$tmp0,32 + or $tmp0,$tmp2 # 0x000000FF000000FF + + and $tmp1,$in0,$tmp0 # byte swap + and $tmp3,$in1,$tmp0 + dsrl $tmp2,$in0,24 + dsrl $tmp4,$in1,24 + dsll $tmp1,24 + dsll $tmp3,24 + and $tmp2,$tmp0 + and $tmp4,$tmp0 + dsll $tmp0,8 # 0x0000FF000000FF00 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + and $tmp2,$in0,$tmp0 + and $tmp4,$in1,$tmp0 + dsrl $in0,8 + dsrl $in1,8 + dsll $tmp2,8 + dsll $tmp4,8 + and $in0,$tmp0 + and $in1,$tmp0 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + or $in0,$tmp1 + or $in1,$tmp3 + dsrl $tmp1,$in0,32 + dsrl $tmp3,$in1,32 + dsll $in0,32 + dsll $in1,32 + or $in0,$tmp1 + or $in1,$tmp3 +# endif +#endif + daddu $h0,$in0 # accumulate input + daddu $h1,$in1 + sltu $tmp0,$h0,$in0 + sltu $tmp1,$h1,$in1 + daddu $h1,$tmp0 + + dmultu ($r0,$h0) # h0*r0 + daddu $h2,$padbit + sltu $tmp0,$h1,$tmp0 + mflo ($d0,$r0,$h0) + mfhi ($d1,$r0,$h0) + + dmultu ($s1,$h1) # h1*5*r1 + daddu $tmp0,$tmp1 + daddu $h2,$tmp0 + mflo ($tmp0,$s1,$h1) + mfhi ($tmp1,$s1,$h1) + + dmultu ($r1,$h0) # h0*r1 + daddu $d0,$tmp0 + daddu $d1,$tmp1 + mflo ($tmp2,$r1,$h0) + mfhi ($d2,$r1,$h0) + sltu $tmp0,$d0,$tmp0 + daddu $d1,$tmp0 + + dmultu ($r0,$h1) # h1*r0 + daddu $d1,$tmp2 + sltu $tmp2,$d1,$tmp2 + mflo ($tmp0,$r0,$h1) + mfhi ($tmp1,$r0,$h1) + daddu $d2,$tmp2 + + dmultu ($s1,$h2) # h2*5*r1 + daddu $d1,$tmp0 + daddu $d2,$tmp1 + mflo ($tmp2,$s1,$h2) + + dmultu ($r0,$h2) # h2*r0 + sltu $tmp0,$d1,$tmp0 + daddu $d2,$tmp0 + mflo ($tmp3,$r0,$h2) + + daddu $d1,$tmp2 + daddu $d2,$tmp3 + sltu $tmp2,$d1,$tmp2 + daddu $d2,$tmp2 + + li $tmp0,-4 # final reduction + and $tmp0,$d2 + dsrl $tmp1,$d2,2 + andi $h2,$d2,3 + daddu $tmp0,$tmp1 + daddu $h0,$d0,$tmp0 + sltu $tmp0,$h0,$tmp0 + daddu $h1,$d1,$tmp0 + sltu $tmp0,$h1,$tmp0 + daddu $h2,$h2,$tmp0 + + bnez $len,.Loop + + sd $h0,0($ctx) # store hash value + sd $h1,8($ctx) + sd $h2,16($ctx) + + .set noreorder + ld $s5,40($sp) # epilogue + ld $s4,32($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi epilogue + ld $s3,24($sp) + ld $s2,16($sp) + ld $s1,8($sp) + ld $s0,0($sp) +___ +$code.=<<___; + jr $ra + daddu $sp,6*8 +.end poly1305_blocks_internal +___ +} +{ +my ($ctx,$mac,$nonce) = ($a0,$a1,$a2); + +$code.=<<___; +.align 5 +.globl poly1305_emit +.ent poly1305_emit +poly1305_emit: + .frame $sp,0,$ra + .set reorder + + ld $tmp0,0($ctx) + ld $tmp1,8($ctx) + ld $tmp2,16($ctx) + + daddiu $in0,$tmp0,5 # compare to modulus + sltiu $tmp3,$in0,5 + daddu $in1,$tmp1,$tmp3 + sltu $tmp3,$in1,$tmp3 + daddu $tmp2,$tmp2,$tmp3 + + dsrl $tmp2,2 # see if it carried/borrowed + dsubu $tmp2,$zero,$tmp2 + nor $tmp3,$zero,$tmp2 + + and $in0,$tmp2 + and $tmp0,$tmp3 + and $in1,$tmp2 + and $tmp1,$tmp3 + or $in0,$tmp0 + or $in1,$tmp1 + + lwu $tmp0,0($nonce) # load nonce + lwu $tmp1,4($nonce) + lwu $tmp2,8($nonce) + lwu $tmp3,12($nonce) + dsll $tmp1,32 + dsll $tmp3,32 + or $tmp0,$tmp1 + or $tmp2,$tmp3 + + daddu $in0,$tmp0 # accumulate nonce + daddu $in1,$tmp2 + sltu $tmp0,$in0,$tmp0 + daddu $in1,$tmp0 + + dsrl $tmp0,$in0,8 # write mac value + dsrl $tmp1,$in0,16 + dsrl $tmp2,$in0,24 + sb $in0,0($mac) + dsrl $tmp3,$in0,32 + sb $tmp0,1($mac) + dsrl $tmp0,$in0,40 + sb $tmp1,2($mac) + dsrl $tmp1,$in0,48 + sb $tmp2,3($mac) + dsrl $tmp2,$in0,56 + sb $tmp3,4($mac) + dsrl $tmp3,$in1,8 + sb $tmp0,5($mac) + dsrl $tmp0,$in1,16 + sb $tmp1,6($mac) + dsrl $tmp1,$in1,24 + sb $tmp2,7($mac) + + sb $in1,8($mac) + dsrl $tmp2,$in1,32 + sb $tmp3,9($mac) + dsrl $tmp3,$in1,40 + sb $tmp0,10($mac) + dsrl $tmp0,$in1,48 + sb $tmp1,11($mac) + dsrl $tmp1,$in1,56 + sb $tmp2,12($mac) + sb $tmp3,13($mac) + sb $tmp0,14($mac) + sb $tmp1,15($mac) + + jr $ra +.end poly1305_emit +.rdata +.align 2 +___ +} + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +$output=pop and open STDOUT,">$output"; +print $code; +close STDOUT; + diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c new file mode 100644 index 000000000000..ce48a42f7654 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#include + +asmlinkage void poly1305_init_x86_64(void *ctx, + const u8 key[POLY1305_KEY_SIZE]); +asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, + const size_t len, const u32 padbit); +asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4]); +asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4]); +asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, + const size_t len, const u32 padbit); + +static bool poly1305_use_avx __ro_after_init; +static bool poly1305_use_avx2 __ro_after_init; +static bool poly1305_use_avx512 __ro_after_init; +static bool *const poly1305_nobs[] __initconst = { + &poly1305_use_avx, &poly1305_use_avx2, &poly1305_use_avx512 }; + +static void __init poly1305_fpu_init(void) +{ + poly1305_use_avx = + boot_cpu_has(X86_FEATURE_AVX) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); + poly1305_use_avx2 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); +#ifndef COMPAT_CANNOT_USE_AVX512 + poly1305_use_avx512 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL) && + /* Skylake downclocks unacceptably much when using zmm. */ + boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X; +#endif +} + +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + poly1305_init_x86_64(ctx, key); + return true; +} + +struct poly1305_arch_internal { + union { + struct { + u32 h[5]; + u32 is_base2_26; + }; + u64 hs[3]; + }; + u64 r[2]; + u64 pad; + struct { u32 r2, r1, r4, r3; } rn[9]; +}; + +/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit + * the unfortunate situation of using AVX and then having to go back to scalar + * -- because the user is silly and has called the update function from two + * separate contexts -- then we need to convert back to the original base before + * proceeding. It is possible to reason that the initial reduction below is + * sufficient given the implementation invariants. However, for an avoidance of + * doubt and because this is not performance critical, we do the full reduction + * anyway. + */ +static void convert_to_base2_64(void *ctx) +{ + struct poly1305_arch_internal *state = ctx; + u32 cy; + + if (!state->is_base2_26) + return; + + cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy; + cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy; + cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy; + cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy; + state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0]; + state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12); + state->hs[2] = state->h[4] >> 24; +#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) + cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL); + state->hs[2] &= 3; + state->hs[0] += cy; + state->hs[1] += (cy = ULT(state->hs[0], cy)); + state->hs[2] += ULT(state->hs[1], cy); +#undef ULT + state->is_base2_26 = 0; +} + +static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + struct poly1305_arch_internal *state = ctx; + + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || + PAGE_SIZE % POLY1305_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_AS_AVX) || !poly1305_use_avx || + (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || + !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_blocks_x86_64(ctx, inp, len, padbit); + return true; + } + + for (;;) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + if (IS_ENABLED(CONFIG_AS_AVX512) && poly1305_use_avx512) + poly1305_blocks_avx512(ctx, inp, bytes, padbit); + else if (IS_ENABLED(CONFIG_AS_AVX2) && poly1305_use_avx2) + poly1305_blocks_avx2(ctx, inp, bytes, padbit); + else + poly1305_blocks_avx(ctx, inp, bytes, padbit); + len -= bytes; + if (!len) + break; + inp += bytes; + simd_relax(simd_context); + } + + return true; +} + +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + struct poly1305_arch_internal *state = ctx; + + if (!IS_ENABLED(CONFIG_AS_AVX) || !poly1305_use_avx || + !state->is_base2_26 || !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_emit_x86_64(ctx, mac, nonce); + } else + poly1305_emit_avx(ctx, mac, nonce); + return true; +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl new file mode 100644 index 000000000000..f994855cdbe2 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl @@ -0,0 +1,4266 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# Copyright (C) 2017-2018 Samuel Neves . All Rights Reserved. +# Copyright (C) 2017-2019 Jason A. Donenfeld . All Rights Reserved. +# Copyright (C) 2006-2017 CRYPTOGAMS by . All Rights Reserved. +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# This module implements Poly1305 hash for x86_64. +# +# March 2015 +# +# Initial release. +# +# December 2016 +# +# Add AVX512F+VL+BW code path. +# +# November 2017 +# +# Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be +# executed even on Knights Landing. Trigger for modification was +# observation that AVX512 code paths can negatively affect overall +# Skylake-X system performance. Since we are likely to suppress +# AVX512F capability flag [at least on Skylake-X], conversion serves +# as kind of "investment protection". Note that next *lake processor, +# Cannonlake, has AVX512IFMA code path to execute... +# +# Numbers are cycles per processed byte with poly1305_blocks alone, +# measured with rdtsc at fixed clock frequency. +# +# IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512 +# P4 4.46/+120% - +# Core 2 2.41/+90% - +# Westmere 1.88/+120% - +# Sandy Bridge 1.39/+140% 1.10 +# Haswell 1.14/+175% 1.11 0.65 +# Skylake[-X] 1.13/+120% 0.96 0.51 [0.35] +# Silvermont 2.83/+95% - +# Knights L 3.60/? 1.65 1.10 0.41(***) +# Goldmont 1.70/+180% - +# VIA Nano 1.82/+150% - +# Sledgehammer 1.38/+160% - +# Bulldozer 2.30/+130% 0.97 +# Ryzen 1.15/+200% 1.08 1.18 +# +# (*) improvement coefficients relative to clang are more modest and +# are ~50% on most processors, in both cases we are comparing to +# __int128 code; +# (**) SSE2 implementation was attempted, but among non-AVX processors +# it was faster than integer-only code only on older Intel P4 and +# Core processors, 50-30%, less newer processor is, but slower on +# contemporary ones, for example almost 2x slower on Atom, and as +# former are naturally disappearing, SSE2 is deemed unnecessary; +# (***) strangely enough performance seems to vary from core to core, +# listed result is best case; + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); +$kernel=0; $kernel=1 if (!$flavour && !$output); + +if (!$kernel) { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or + die "can't locate x86_64-xlate.pl"; + + open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; + *STDOUT=*OUT; + + if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25); + } + + if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { + $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12); + $avx += 1 if ($1==2.11 && $2>=8); + } + + if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./) { + $avx = ($1>=10) + ($1>=11); + } + + if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { + $avx = ($2>=3.0) + ($2>3.0); + } +} else { + $avx = 4; # The kernel uses ifdefs for this. +} + +sub declare_function() { + my ($name, $align, $nargs) = @_; + if($kernel) { + $code .= ".align $align\n"; + $code .= "SYM_FUNC_START($name)\n"; + $code .= ".L$name:\n"; + } else { + $code .= ".globl $name\n"; + $code .= ".type $name,\@function,$nargs\n"; + $code .= ".align $align\n"; + $code .= "$name:\n"; + } +} + +sub end_function() { + my ($name) = @_; + if($kernel) { + $code .= "SYM_FUNC_END($name)\n"; + } else { + $code .= ".size $name,.-$name\n"; + } +} + +$code.=<<___ if $kernel; +#include +___ + +if ($avx) { +$code.=<<___ if $kernel; +.section .rodata +___ +$code.=<<___; +.align 64 +.Lconst: +.Lmask24: +.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 +.L129: +.long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0 +.Lmask26: +.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 +.Lpermd_avx2: +.long 2,2,2,3,2,0,2,1 +.Lpermd_avx512: +.long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7 + +.L2_44_inp_permd: +.long 0,1,1,2,2,3,7,7 +.L2_44_inp_shift: +.quad 0,12,24,64 +.L2_44_mask: +.quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff +.L2_44_shift_rgt: +.quad 44,44,42,64 +.L2_44_shift_lft: +.quad 8,8,10,64 + +.align 64 +.Lx_mask44: +.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff +.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff +.Lx_mask42: +.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff +.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff +___ +} +$code.=<<___ if (!$kernel); +.asciz "Poly1305 for x86_64, CRYPTOGAMS by " +.align 16 +___ + +my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx"); +my ($mac,$nonce)=($inp,$len); # *_emit arguments +my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13"); +my ($h0,$h1,$h2)=("%r14","%rbx","%r10"); + +sub poly1305_iteration { +# input: copy of $r1 in %rax, $h0-$h2, $r0-$r1 +# output: $h0-$h2 *= $r0-$r1 +$code.=<<___; + mulq $h0 # h0*r1 + mov %rax,$d2 + mov $r0,%rax + mov %rdx,$d3 + + mulq $h0 # h0*r0 + mov %rax,$h0 # future $h0 + mov $r0,%rax + mov %rdx,$d1 + + mulq $h1 # h1*r0 + add %rax,$d2 + mov $s1,%rax + adc %rdx,$d3 + + mulq $h1 # h1*s1 + mov $h2,$h1 # borrow $h1 + add %rax,$h0 + adc %rdx,$d1 + + imulq $s1,$h1 # h2*s1 + add $h1,$d2 + mov $d1,$h1 + adc \$0,$d3 + + imulq $r0,$h2 # h2*r0 + add $d2,$h1 + mov \$-4,%rax # mask value + adc $h2,$d3 + + and $d3,%rax # last reduction step + mov $d3,$h2 + shr \$2,$d3 + and \$3,$h2 + add $d3,%rax + add %rax,$h0 + adc \$0,$h1 + adc \$0,$h2 +___ +} + +######################################################################## +# Layout of opaque area is following. +# +# unsigned __int64 h[3]; # current hash value base 2^64 +# unsigned __int64 r[2]; # key value base 2^64 + +$code.=<<___; +.text +___ +$code.=<<___ if (!$kernel); +.extern OPENSSL_ia32cap_P + +.globl poly1305_init_x86_64 +.hidden poly1305_init_x86_64 +.globl poly1305_blocks_x86_64 +.hidden poly1305_blocks_x86_64 +.globl poly1305_emit_x86_64 +.hidden poly1305_emit_x86_64 +___ +&declare_function("poly1305_init_x86_64", 32, 3); +$code.=<<___; + xor %rax,%rax + mov %rax,0($ctx) # initialize hash value + mov %rax,8($ctx) + mov %rax,16($ctx) + + cmp \$0,$inp + je .Lno_key +___ +$code.=<<___ if (!$kernel); + lea poly1305_blocks_x86_64(%rip),%r10 + lea poly1305_emit_x86_64(%rip),%r11 +___ +$code.=<<___ if (!$kernel && $avx); + mov OPENSSL_ia32cap_P+4(%rip),%r9 + lea poly1305_blocks_avx(%rip),%rax + lea poly1305_emit_avx(%rip),%rcx + bt \$`60-32`,%r9 # AVX? + cmovc %rax,%r10 + cmovc %rcx,%r11 +___ +$code.=<<___ if (!$kernel && $avx>1); + lea poly1305_blocks_avx2(%rip),%rax + bt \$`5+32`,%r9 # AVX2? + cmovc %rax,%r10 +___ +$code.=<<___ if (!$kernel && $avx>3); + mov \$`(1<<31|1<<21|1<<16)`,%rax + shr \$32,%r9 + and %rax,%r9 + cmp %rax,%r9 + je .Linit_base2_44 +___ +$code.=<<___; + mov \$0x0ffffffc0fffffff,%rax + mov \$0x0ffffffc0ffffffc,%rcx + and 0($inp),%rax + and 8($inp),%rcx + mov %rax,24($ctx) + mov %rcx,32($ctx) +___ +$code.=<<___ if (!$kernel && $flavour !~ /elf32/); + mov %r10,0(%rdx) + mov %r11,8(%rdx) +___ +$code.=<<___ if (!$kernel && $flavour =~ /elf32/); + mov %r10d,0(%rdx) + mov %r11d,4(%rdx) +___ +$code.=<<___; + mov \$1,%eax +.Lno_key: + ret +___ +&end_function("poly1305_init_x86_64"); + +&declare_function("poly1305_blocks_x86_64", 32, 4); +$code.=<<___; +.cfi_startproc +.Lblocks: + shr \$4,$len + jz .Lno_data # too short + + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 + push $ctx +.cfi_push $ctx +.Lblocks_body: + + mov $len,%r15 # reassign $len + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + mov 0($ctx),$h0 # load hash value + mov 8($ctx),$h1 + mov 16($ctx),$h2 + + mov $s1,$r1 + shr \$2,$s1 + mov $r1,%rax + add $r1,$s1 # s1 = r1 + (r1 >> 2) + jmp .Loop + +.align 32 +.Loop: + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 +___ + + &poly1305_iteration(); + +$code.=<<___; + mov $r1,%rax + dec %r15 # len-=16 + jnz .Loop + + mov 0(%rsp),$ctx +.cfi_restore $ctx + + mov $h0,0($ctx) # store hash value + mov $h1,8($ctx) + mov $h2,16($ctx) + + mov 8(%rsp),%r15 +.cfi_restore %r15 + mov 16(%rsp),%r14 +.cfi_restore %r14 + mov 24(%rsp),%r13 +.cfi_restore %r13 + mov 32(%rsp),%r12 +.cfi_restore %r12 + mov 40(%rsp),%rbx +.cfi_restore %rbx + lea 48(%rsp),%rsp +.cfi_adjust_cfa_offset -48 +.Lno_data: +.Lblocks_epilogue: + ret +.cfi_endproc +___ +&end_function("poly1305_blocks_x86_64"); + +&declare_function("poly1305_emit_x86_64", 32, 3); +$code.=<<___; +.Lemit: + mov 0($ctx),%r8 # load hash value + mov 8($ctx),%r9 + mov 16($ctx),%r10 + + mov %r8,%rax + add \$5,%r8 # compare to modulus + mov %r9,%rcx + adc \$0,%r9 + adc \$0,%r10 + shr \$2,%r10 # did 130-bit value overflow? + cmovnz %r8,%rax + cmovnz %r9,%rcx + + add 0($nonce),%rax # accumulate nonce + adc 8($nonce),%rcx + mov %rax,0($mac) # write result + mov %rcx,8($mac) + + ret +___ +&end_function("poly1305_emit_x86_64"); +if ($avx) { + +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX\n"; +} + +######################################################################## +# Layout of opaque area is following. +# +# unsigned __int32 h[5]; # current hash value base 2^26 +# unsigned __int32 is_base2_26; +# unsigned __int64 r[2]; # key value base 2^64 +# unsigned __int64 pad; +# struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9]; +# +# where r^n are base 2^26 digits of degrees of multiplier key. There are +# 5 digits, but last four are interleaved with multiples of 5, totalling +# in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4. + +my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = + map("%xmm$_",(0..15)); + +$code.=<<___; +.type __poly1305_block,\@abi-omnipotent +.align 32 +__poly1305_block: + push $ctx +___ + &poly1305_iteration(); +$code.=<<___; + pop $ctx + ret +.size __poly1305_block,.-__poly1305_block + +.type __poly1305_init_avx,\@abi-omnipotent +.align 32 +__poly1305_init_avx: + push %rbp + mov %rsp,%rbp + mov $r0,$h0 + mov $r1,$h1 + xor $h2,$h2 + + lea 48+64($ctx),$ctx # size optimization + + mov $r1,%rax + call __poly1305_block # r^2 + + mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26 + mov \$0x3ffffff,%edx + mov $h0,$d1 + and $h0#d,%eax + mov $r0,$d2 + and $r0#d,%edx + mov %eax,`16*0+0-64`($ctx) + shr \$26,$d1 + mov %edx,`16*0+4-64`($ctx) + shr \$26,$d2 + + mov \$0x3ffffff,%eax + mov \$0x3ffffff,%edx + and $d1#d,%eax + and $d2#d,%edx + mov %eax,`16*1+0-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov %edx,`16*1+4-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + mov %eax,`16*2+0-64`($ctx) + shr \$26,$d1 + mov %edx,`16*2+4-64`($ctx) + shr \$26,$d2 + + mov $h1,%rax + mov $r1,%rdx + shl \$12,%rax + shl \$12,%rdx + or $d1,%rax + or $d2,%rdx + and \$0x3ffffff,%eax + and \$0x3ffffff,%edx + mov %eax,`16*3+0-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov %edx,`16*3+4-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + mov %eax,`16*4+0-64`($ctx) + mov $h1,$d1 + mov %edx,`16*4+4-64`($ctx) + mov $r1,$d2 + + mov \$0x3ffffff,%eax + mov \$0x3ffffff,%edx + shr \$14,$d1 + shr \$14,$d2 + and $d1#d,%eax + and $d2#d,%edx + mov %eax,`16*5+0-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov %edx,`16*5+4-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + mov %eax,`16*6+0-64`($ctx) + shr \$26,$d1 + mov %edx,`16*6+4-64`($ctx) + shr \$26,$d2 + + mov $h2,%rax + shl \$24,%rax + or %rax,$d1 + mov $d1#d,`16*7+0-64`($ctx) + lea ($d1,$d1,4),$d1 # *5 + mov $d2#d,`16*7+4-64`($ctx) + lea ($d2,$d2,4),$d2 # *5 + mov $d1#d,`16*8+0-64`($ctx) + mov $d2#d,`16*8+4-64`($ctx) + + mov $r1,%rax + call __poly1305_block # r^3 + + mov \$0x3ffffff,%eax # save r^3 base 2^26 + mov $h0,$d1 + and $h0#d,%eax + shr \$26,$d1 + mov %eax,`16*0+12-64`($ctx) + + mov \$0x3ffffff,%edx + and $d1#d,%edx + mov %edx,`16*1+12-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*2+12-64`($ctx) + + mov $h1,%rax + shl \$12,%rax + or $d1,%rax + and \$0x3ffffff,%eax + mov %eax,`16*3+12-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov $h1,$d1 + mov %eax,`16*4+12-64`($ctx) + + mov \$0x3ffffff,%edx + shr \$14,$d1 + and $d1#d,%edx + mov %edx,`16*5+12-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*6+12-64`($ctx) + + mov $h2,%rax + shl \$24,%rax + or %rax,$d1 + mov $d1#d,`16*7+12-64`($ctx) + lea ($d1,$d1,4),$d1 # *5 + mov $d1#d,`16*8+12-64`($ctx) + + mov $r1,%rax + call __poly1305_block # r^4 + + mov \$0x3ffffff,%eax # save r^4 base 2^26 + mov $h0,$d1 + and $h0#d,%eax + shr \$26,$d1 + mov %eax,`16*0+8-64`($ctx) + + mov \$0x3ffffff,%edx + and $d1#d,%edx + mov %edx,`16*1+8-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*2+8-64`($ctx) + + mov $h1,%rax + shl \$12,%rax + or $d1,%rax + and \$0x3ffffff,%eax + mov %eax,`16*3+8-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov $h1,$d1 + mov %eax,`16*4+8-64`($ctx) + + mov \$0x3ffffff,%edx + shr \$14,$d1 + and $d1#d,%edx + mov %edx,`16*5+8-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*6+8-64`($ctx) + + mov $h2,%rax + shl \$24,%rax + or %rax,$d1 + mov $d1#d,`16*7+8-64`($ctx) + lea ($d1,$d1,4),$d1 # *5 + mov $d1#d,`16*8+8-64`($ctx) + + lea -48-64($ctx),$ctx # size [de-]optimization + pop %rbp + ret +.size __poly1305_init_avx,.-__poly1305_init_avx +___ + +&declare_function("poly1305_blocks_avx", 32, 4); +$code.=<<___; +.cfi_startproc + mov 20($ctx),%r8d # is_base2_26 + cmp \$128,$len + jae .Lblocks_avx + test %r8d,%r8d + jz .Lblocks + +.Lblocks_avx: + and \$-16,$len + jz .Lno_data_avx + + vzeroupper + + test %r8d,%r8d + jz .Lbase2_64_avx + + test \$31,$len + jz .Leven_avx + + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lblocks_avx_body: + + mov $len,%r15 # reassign $len + + mov 0($ctx),$d1 # load hash value + mov 8($ctx),$d2 + mov 16($ctx),$h2#d + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + ################################# base 2^26 -> base 2^64 + mov $d1#d,$h0#d + and \$`-1*(1<<31)`,$d1 + mov $d2,$r1 # borrow $r1 + mov $d2#d,$h1#d + and \$`-1*(1<<31)`,$d2 + + shr \$6,$d1 + shl \$52,$r1 + add $d1,$h0 + shr \$12,$h1 + shr \$18,$d2 + add $r1,$h0 + adc $d2,$h1 + + mov $h2,$d1 + shl \$40,$d1 + shr \$24,$h2 + add $d1,$h1 + adc \$0,$h2 # can be partially reduced... + + mov \$-4,$d2 # ... so reduce + mov $h2,$d1 + and $h2,$d2 + shr \$2,$d1 + and \$3,$h2 + add $d2,$d1 # =*5 + add $d1,$h0 + adc \$0,$h1 + adc \$0,$h2 + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + + call __poly1305_block + + test $padbit,$padbit # if $padbit is zero, + jz .Lstore_base2_64_avx # store hash in base 2^64 format + + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$r0 + mov $h1,$r1 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$r0 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $r0,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$r1 + and \$0x3ffffff,$h1 # h[3] + or $r1,$h2 # h[4] + + sub \$16,%r15 + jz .Lstore_base2_26_avx + + vmovd %rax#d,$H0 + vmovd %rdx#d,$H1 + vmovd $h0#d,$H2 + vmovd $h1#d,$H3 + vmovd $h2#d,$H4 + jmp .Lproceed_avx + +.align 32 +.Lstore_base2_64_avx: + mov $h0,0($ctx) + mov $h1,8($ctx) + mov $h2,16($ctx) # note that is_base2_26 is zeroed + jmp .Ldone_avx + +.align 16 +.Lstore_base2_26_avx: + mov %rax#d,0($ctx) # store hash value base 2^26 + mov %rdx#d,4($ctx) + mov $h0#d,8($ctx) + mov $h1#d,12($ctx) + mov $h2#d,16($ctx) +.align 16 +.Ldone_avx: + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lno_data_avx: +.Lblocks_avx_epilogue: + ret +.cfi_endproc + +.align 32 +.Lbase2_64_avx: +.cfi_startproc + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lbase2_64_avx_body: + + mov $len,%r15 # reassign $len + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + mov 0($ctx),$h0 # load hash value + mov 8($ctx),$h1 + mov 16($ctx),$h2#d + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + + test \$31,$len + jz .Linit_avx + + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + sub \$16,%r15 + + call __poly1305_block + +.Linit_avx: + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$d1 + mov $h1,$d2 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$d1 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $d1,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$d2 + and \$0x3ffffff,$h1 # h[3] + or $d2,$h2 # h[4] + + vmovd %rax#d,$H0 + vmovd %rdx#d,$H1 + vmovd $h0#d,$H2 + vmovd $h1#d,$H3 + vmovd $h2#d,$H4 + movl \$1,20($ctx) # set is_base2_26 + + call __poly1305_init_avx + +.Lproceed_avx: + mov %r15,$len + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lbase2_64_avx_epilogue: + jmp .Ldo_avx +.cfi_endproc + +.align 32 +.Leven_avx: +.cfi_startproc + vmovd 4*0($ctx),$H0 # load hash value + vmovd 4*1($ctx),$H1 + vmovd 4*2($ctx),$H2 + vmovd 4*3($ctx),$H3 + vmovd 4*4($ctx),$H4 + +.Ldo_avx: +___ +$code.=<<___ if (!$win64); + lea 8(%rsp),%r10 +.cfi_def_cfa_register %r10 + and \$-32,%rsp + sub \$-8,%rsp + lea -0x58(%rsp),%r11 + sub \$0x178,%rsp + +___ +$code.=<<___ if ($win64); + lea -0xf8(%rsp),%r11 + sub \$0x218,%rsp + vmovdqa %xmm6,0x50(%r11) + vmovdqa %xmm7,0x60(%r11) + vmovdqa %xmm8,0x70(%r11) + vmovdqa %xmm9,0x80(%r11) + vmovdqa %xmm10,0x90(%r11) + vmovdqa %xmm11,0xa0(%r11) + vmovdqa %xmm12,0xb0(%r11) + vmovdqa %xmm13,0xc0(%r11) + vmovdqa %xmm14,0xd0(%r11) + vmovdqa %xmm15,0xe0(%r11) +.Ldo_avx_body: +___ +$code.=<<___; + sub \$64,$len + lea -32($inp),%rax + cmovc %rax,$inp + + vmovdqu `16*3`($ctx),$D4 # preload r0^2 + lea `16*3+64`($ctx),$ctx # size optimization + lea .Lconst(%rip),%rcx + + ################################################################ + # load input + vmovdqu 16*2($inp),$T0 + vmovdqu 16*3($inp),$T1 + vmovdqa 64(%rcx),$MASK # .Lmask26 + + vpsrldq \$6,$T0,$T2 # splat input + vpsrldq \$6,$T1,$T3 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpunpcklqdq $T3,$T2,$T3 # 2:3 + + vpsrlq \$40,$T4,$T4 # 4 + vpsrlq \$26,$T0,$T1 + vpand $MASK,$T0,$T0 # 0 + vpsrlq \$4,$T3,$T2 + vpand $MASK,$T1,$T1 # 1 + vpsrlq \$30,$T3,$T3 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + jbe .Lskip_loop_avx + + # expand and copy pre-calculated table to stack + vmovdqu `16*1-64`($ctx),$D1 + vmovdqu `16*2-64`($ctx),$D2 + vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434 + vpshufd \$0x44,$D4,$D0 # xx12 -> 1212 + vmovdqa $D3,-0x90(%r11) + vmovdqa $D0,0x00(%rsp) + vpshufd \$0xEE,$D1,$D4 + vmovdqu `16*3-64`($ctx),$D0 + vpshufd \$0x44,$D1,$D1 + vmovdqa $D4,-0x80(%r11) + vmovdqa $D1,0x10(%rsp) + vpshufd \$0xEE,$D2,$D3 + vmovdqu `16*4-64`($ctx),$D1 + vpshufd \$0x44,$D2,$D2 + vmovdqa $D3,-0x70(%r11) + vmovdqa $D2,0x20(%rsp) + vpshufd \$0xEE,$D0,$D4 + vmovdqu `16*5-64`($ctx),$D2 + vpshufd \$0x44,$D0,$D0 + vmovdqa $D4,-0x60(%r11) + vmovdqa $D0,0x30(%rsp) + vpshufd \$0xEE,$D1,$D3 + vmovdqu `16*6-64`($ctx),$D0 + vpshufd \$0x44,$D1,$D1 + vmovdqa $D3,-0x50(%r11) + vmovdqa $D1,0x40(%rsp) + vpshufd \$0xEE,$D2,$D4 + vmovdqu `16*7-64`($ctx),$D1 + vpshufd \$0x44,$D2,$D2 + vmovdqa $D4,-0x40(%r11) + vmovdqa $D2,0x50(%rsp) + vpshufd \$0xEE,$D0,$D3 + vmovdqu `16*8-64`($ctx),$D2 + vpshufd \$0x44,$D0,$D0 + vmovdqa $D3,-0x30(%r11) + vmovdqa $D0,0x60(%rsp) + vpshufd \$0xEE,$D1,$D4 + vpshufd \$0x44,$D1,$D1 + vmovdqa $D4,-0x20(%r11) + vmovdqa $D1,0x70(%rsp) + vpshufd \$0xEE,$D2,$D3 + vmovdqa 0x00(%rsp),$D4 # preload r0^2 + vpshufd \$0x44,$D2,$D2 + vmovdqa $D3,-0x10(%r11) + vmovdqa $D2,0x80(%rsp) + + jmp .Loop_avx + +.align 32 +.Loop_avx: + ################################################################ + # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 + # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r + # \___________________/ + # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 + # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r + # \___________________/ \____________________/ + # + # Note that we start with inp[2:3]*r^2. This is because it + # doesn't depend on reduction in previous iteration. + ################################################################ + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + # + # though note that $Tx and $Hx are "reversed" in this section, + # and $D4 is preloaded with r0^2... + + vpmuludq $T0,$D4,$D0 # d0 = h0*r0 + vpmuludq $T1,$D4,$D1 # d1 = h1*r0 + vmovdqa $H2,0x20(%r11) # offload hash + vpmuludq $T2,$D4,$D2 # d3 = h2*r0 + vmovdqa 0x10(%rsp),$H2 # r1^2 + vpmuludq $T3,$D4,$D3 # d3 = h3*r0 + vpmuludq $T4,$D4,$D4 # d4 = h4*r0 + + vmovdqa $H0,0x00(%r11) # + vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 + vmovdqa $H1,0x10(%r11) # + vpmuludq $T3,$H2,$H1 # h3*r1 + vpaddq $H0,$D0,$D0 # d0 += h4*s1 + vpaddq $H1,$D4,$D4 # d4 += h3*r1 + vmovdqa $H3,0x30(%r11) # + vpmuludq $T2,$H2,$H0 # h2*r1 + vpmuludq $T1,$H2,$H1 # h1*r1 + vpaddq $H0,$D3,$D3 # d3 += h2*r1 + vmovdqa 0x30(%rsp),$H3 # r2^2 + vpaddq $H1,$D2,$D2 # d2 += h1*r1 + vmovdqa $H4,0x40(%r11) # + vpmuludq $T0,$H2,$H2 # h0*r1 + vpmuludq $T2,$H3,$H0 # h2*r2 + vpaddq $H2,$D1,$D1 # d1 += h0*r1 + + vmovdqa 0x40(%rsp),$H4 # s2^2 + vpaddq $H0,$D4,$D4 # d4 += h2*r2 + vpmuludq $T1,$H3,$H1 # h1*r2 + vpmuludq $T0,$H3,$H3 # h0*r2 + vpaddq $H1,$D3,$D3 # d3 += h1*r2 + vmovdqa 0x50(%rsp),$H2 # r3^2 + vpaddq $H3,$D2,$D2 # d2 += h0*r2 + vpmuludq $T4,$H4,$H0 # h4*s2 + vpmuludq $T3,$H4,$H4 # h3*s2 + vpaddq $H0,$D1,$D1 # d1 += h4*s2 + vmovdqa 0x60(%rsp),$H3 # s3^2 + vpaddq $H4,$D0,$D0 # d0 += h3*s2 + + vmovdqa 0x80(%rsp),$H4 # s4^2 + vpmuludq $T1,$H2,$H1 # h1*r3 + vpmuludq $T0,$H2,$H2 # h0*r3 + vpaddq $H1,$D4,$D4 # d4 += h1*r3 + vpaddq $H2,$D3,$D3 # d3 += h0*r3 + vpmuludq $T4,$H3,$H0 # h4*s3 + vpmuludq $T3,$H3,$H1 # h3*s3 + vpaddq $H0,$D2,$D2 # d2 += h4*s3 + vmovdqu 16*0($inp),$H0 # load input + vpaddq $H1,$D1,$D1 # d1 += h3*s3 + vpmuludq $T2,$H3,$H3 # h2*s3 + vpmuludq $T2,$H4,$T2 # h2*s4 + vpaddq $H3,$D0,$D0 # d0 += h2*s3 + + vmovdqu 16*1($inp),$H1 # + vpaddq $T2,$D1,$D1 # d1 += h2*s4 + vpmuludq $T3,$H4,$T3 # h3*s4 + vpmuludq $T4,$H4,$T4 # h4*s4 + vpsrldq \$6,$H0,$H2 # splat input + vpaddq $T3,$D2,$D2 # d2 += h3*s4 + vpaddq $T4,$D3,$D3 # d3 += h4*s4 + vpsrldq \$6,$H1,$H3 # + vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4 + vpmuludq $T1,$H4,$T0 # h1*s4 + vpunpckhqdq $H1,$H0,$H4 # 4 + vpaddq $T4,$D4,$D4 # d4 += h0*r4 + vmovdqa -0x90(%r11),$T4 # r0^4 + vpaddq $T0,$D0,$D0 # d0 += h1*s4 + + vpunpcklqdq $H1,$H0,$H0 # 0:1 + vpunpcklqdq $H3,$H2,$H3 # 2:3 + + #vpsrlq \$40,$H4,$H4 # 4 + vpsrldq \$`40/8`,$H4,$H4 # 4 + vpsrlq \$26,$H0,$H1 + vpand $MASK,$H0,$H0 # 0 + vpsrlq \$4,$H3,$H2 + vpand $MASK,$H1,$H1 # 1 + vpand 0(%rcx),$H4,$H4 # .Lmask24 + vpsrlq \$30,$H3,$H3 + vpand $MASK,$H2,$H2 # 2 + vpand $MASK,$H3,$H3 # 3 + vpor 32(%rcx),$H4,$H4 # padbit, yes, always + + vpaddq 0x00(%r11),$H0,$H0 # add hash value + vpaddq 0x10(%r11),$H1,$H1 + vpaddq 0x20(%r11),$H2,$H2 + vpaddq 0x30(%r11),$H3,$H3 + vpaddq 0x40(%r11),$H4,$H4 + + lea 16*2($inp),%rax + lea 16*4($inp),$inp + sub \$64,$len + cmovc %rax,$inp + + ################################################################ + # Now we accumulate (inp[0:1]+hash)*r^4 + ################################################################ + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + + vpmuludq $H0,$T4,$T0 # h0*r0 + vpmuludq $H1,$T4,$T1 # h1*r0 + vpaddq $T0,$D0,$D0 + vpaddq $T1,$D1,$D1 + vmovdqa -0x80(%r11),$T2 # r1^4 + vpmuludq $H2,$T4,$T0 # h2*r0 + vpmuludq $H3,$T4,$T1 # h3*r0 + vpaddq $T0,$D2,$D2 + vpaddq $T1,$D3,$D3 + vpmuludq $H4,$T4,$T4 # h4*r0 + vpmuludq -0x70(%r11),$H4,$T0 # h4*s1 + vpaddq $T4,$D4,$D4 + + vpaddq $T0,$D0,$D0 # d0 += h4*s1 + vpmuludq $H2,$T2,$T1 # h2*r1 + vpmuludq $H3,$T2,$T0 # h3*r1 + vpaddq $T1,$D3,$D3 # d3 += h2*r1 + vmovdqa -0x60(%r11),$T3 # r2^4 + vpaddq $T0,$D4,$D4 # d4 += h3*r1 + vpmuludq $H1,$T2,$T1 # h1*r1 + vpmuludq $H0,$T2,$T2 # h0*r1 + vpaddq $T1,$D2,$D2 # d2 += h1*r1 + vpaddq $T2,$D1,$D1 # d1 += h0*r1 + + vmovdqa -0x50(%r11),$T4 # s2^4 + vpmuludq $H2,$T3,$T0 # h2*r2 + vpmuludq $H1,$T3,$T1 # h1*r2 + vpaddq $T0,$D4,$D4 # d4 += h2*r2 + vpaddq $T1,$D3,$D3 # d3 += h1*r2 + vmovdqa -0x40(%r11),$T2 # r3^4 + vpmuludq $H0,$T3,$T3 # h0*r2 + vpmuludq $H4,$T4,$T0 # h4*s2 + vpaddq $T3,$D2,$D2 # d2 += h0*r2 + vpaddq $T0,$D1,$D1 # d1 += h4*s2 + vmovdqa -0x30(%r11),$T3 # s3^4 + vpmuludq $H3,$T4,$T4 # h3*s2 + vpmuludq $H1,$T2,$T1 # h1*r3 + vpaddq $T4,$D0,$D0 # d0 += h3*s2 + + vmovdqa -0x10(%r11),$T4 # s4^4 + vpaddq $T1,$D4,$D4 # d4 += h1*r3 + vpmuludq $H0,$T2,$T2 # h0*r3 + vpmuludq $H4,$T3,$T0 # h4*s3 + vpaddq $T2,$D3,$D3 # d3 += h0*r3 + vpaddq $T0,$D2,$D2 # d2 += h4*s3 + vmovdqu 16*2($inp),$T0 # load input + vpmuludq $H3,$T3,$T2 # h3*s3 + vpmuludq $H2,$T3,$T3 # h2*s3 + vpaddq $T2,$D1,$D1 # d1 += h3*s3 + vmovdqu 16*3($inp),$T1 # + vpaddq $T3,$D0,$D0 # d0 += h2*s3 + + vpmuludq $H2,$T4,$H2 # h2*s4 + vpmuludq $H3,$T4,$H3 # h3*s4 + vpsrldq \$6,$T0,$T2 # splat input + vpaddq $H2,$D1,$D1 # d1 += h2*s4 + vpmuludq $H4,$T4,$H4 # h4*s4 + vpsrldq \$6,$T1,$T3 # + vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4 + vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4 + vpmuludq -0x20(%r11),$H0,$H4 # h0*r4 + vpmuludq $H1,$T4,$H0 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 + vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 + + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpunpcklqdq $T3,$T2,$T3 # 2:3 + + #vpsrlq \$40,$T4,$T4 # 4 + vpsrldq \$`40/8`,$T4,$T4 # 4 + vpsrlq \$26,$T0,$T1 + vmovdqa 0x00(%rsp),$D4 # preload r0^2 + vpand $MASK,$T0,$T0 # 0 + vpsrlq \$4,$T3,$T2 + vpand $MASK,$T1,$T1 # 1 + vpand 0(%rcx),$T4,$T4 # .Lmask24 + vpsrlq \$30,$T3,$T3 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + ################################################################ + # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein + # and P. Schwabe + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$D1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D0 + vpand $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D0,$H0,$H0 + vpsllq \$2,$D0,$D0 + vpaddq $D0,$H0,$H0 # h4 -> h0 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + ja .Loop_avx + +.Lskip_loop_avx: + ################################################################ + # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 + + vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2 + add \$32,$len + jnz .Long_tail_avx + + vpaddq $H2,$T2,$T2 + vpaddq $H0,$T0,$T0 + vpaddq $H1,$T1,$T1 + vpaddq $H3,$T3,$T3 + vpaddq $H4,$T4,$T4 + +.Long_tail_avx: + vmovdqa $H2,0x20(%r11) + vmovdqa $H0,0x00(%r11) + vmovdqa $H1,0x10(%r11) + vmovdqa $H3,0x30(%r11) + vmovdqa $H4,0x40(%r11) + + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + + vpmuludq $T2,$D4,$D2 # d2 = h2*r0 + vpmuludq $T0,$D4,$D0 # d0 = h0*r0 + vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n + vpmuludq $T1,$D4,$D1 # d1 = h1*r0 + vpmuludq $T3,$D4,$D3 # d3 = h3*r0 + vpmuludq $T4,$D4,$D4 # d4 = h4*r0 + + vpmuludq $T3,$H2,$H0 # h3*r1 + vpaddq $H0,$D4,$D4 # d4 += h3*r1 + vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n + vpmuludq $T2,$H2,$H1 # h2*r1 + vpaddq $H1,$D3,$D3 # d3 += h2*r1 + vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n + vpmuludq $T1,$H2,$H0 # h1*r1 + vpaddq $H0,$D2,$D2 # d2 += h1*r1 + vpmuludq $T0,$H2,$H2 # h0*r1 + vpaddq $H2,$D1,$D1 # d1 += h0*r1 + vpmuludq $T4,$H3,$H3 # h4*s1 + vpaddq $H3,$D0,$D0 # d0 += h4*s1 + + vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n + vpmuludq $T2,$H4,$H1 # h2*r2 + vpaddq $H1,$D4,$D4 # d4 += h2*r2 + vpmuludq $T1,$H4,$H0 # h1*r2 + vpaddq $H0,$D3,$D3 # d3 += h1*r2 + vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n + vpmuludq $T0,$H4,$H4 # h0*r2 + vpaddq $H4,$D2,$D2 # d2 += h0*r2 + vpmuludq $T4,$H2,$H1 # h4*s2 + vpaddq $H1,$D1,$D1 # d1 += h4*s2 + vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n + vpmuludq $T3,$H2,$H2 # h3*s2 + vpaddq $H2,$D0,$D0 # d0 += h3*s2 + + vpmuludq $T1,$H3,$H0 # h1*r3 + vpaddq $H0,$D4,$D4 # d4 += h1*r3 + vpmuludq $T0,$H3,$H3 # h0*r3 + vpaddq $H3,$D3,$D3 # d3 += h0*r3 + vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n + vpmuludq $T4,$H4,$H1 # h4*s3 + vpaddq $H1,$D2,$D2 # d2 += h4*s3 + vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n + vpmuludq $T3,$H4,$H0 # h3*s3 + vpaddq $H0,$D1,$D1 # d1 += h3*s3 + vpmuludq $T2,$H4,$H4 # h2*s3 + vpaddq $H4,$D0,$D0 # d0 += h2*s3 + + vpmuludq $T0,$H2,$H2 # h0*r4 + vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4 + vpmuludq $T4,$H3,$H1 # h4*s4 + vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4 + vpmuludq $T3,$H3,$H0 # h3*s4 + vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4 + vpmuludq $T2,$H3,$H1 # h2*s4 + vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4 + vpmuludq $T1,$H3,$H3 # h1*s4 + vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4 + + jz .Lshort_tail_avx + + vmovdqu 16*0($inp),$H0 # load input + vmovdqu 16*1($inp),$H1 + + vpsrldq \$6,$H0,$H2 # splat input + vpsrldq \$6,$H1,$H3 + vpunpckhqdq $H1,$H0,$H4 # 4 + vpunpcklqdq $H1,$H0,$H0 # 0:1 + vpunpcklqdq $H3,$H2,$H3 # 2:3 + + vpsrlq \$40,$H4,$H4 # 4 + vpsrlq \$26,$H0,$H1 + vpand $MASK,$H0,$H0 # 0 + vpsrlq \$4,$H3,$H2 + vpand $MASK,$H1,$H1 # 1 + vpsrlq \$30,$H3,$H3 + vpand $MASK,$H2,$H2 # 2 + vpand $MASK,$H3,$H3 # 3 + vpor 32(%rcx),$H4,$H4 # padbit, yes, always + + vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4 + vpaddq 0x00(%r11),$H0,$H0 + vpaddq 0x10(%r11),$H1,$H1 + vpaddq 0x20(%r11),$H2,$H2 + vpaddq 0x30(%r11),$H3,$H3 + vpaddq 0x40(%r11),$H4,$H4 + + ################################################################ + # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate + + vpmuludq $H0,$T4,$T0 # h0*r0 + vpaddq $T0,$D0,$D0 # d0 += h0*r0 + vpmuludq $H1,$T4,$T1 # h1*r0 + vpaddq $T1,$D1,$D1 # d1 += h1*r0 + vpmuludq $H2,$T4,$T0 # h2*r0 + vpaddq $T0,$D2,$D2 # d2 += h2*r0 + vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n + vpmuludq $H3,$T4,$T1 # h3*r0 + vpaddq $T1,$D3,$D3 # d3 += h3*r0 + vpmuludq $H4,$T4,$T4 # h4*r0 + vpaddq $T4,$D4,$D4 # d4 += h4*r0 + + vpmuludq $H3,$T2,$T0 # h3*r1 + vpaddq $T0,$D4,$D4 # d4 += h3*r1 + vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1 + vpmuludq $H2,$T2,$T1 # h2*r1 + vpaddq $T1,$D3,$D3 # d3 += h2*r1 + vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2 + vpmuludq $H1,$T2,$T0 # h1*r1 + vpaddq $T0,$D2,$D2 # d2 += h1*r1 + vpmuludq $H0,$T2,$T2 # h0*r1 + vpaddq $T2,$D1,$D1 # d1 += h0*r1 + vpmuludq $H4,$T3,$T3 # h4*s1 + vpaddq $T3,$D0,$D0 # d0 += h4*s1 + + vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2 + vpmuludq $H2,$T4,$T1 # h2*r2 + vpaddq $T1,$D4,$D4 # d4 += h2*r2 + vpmuludq $H1,$T4,$T0 # h1*r2 + vpaddq $T0,$D3,$D3 # d3 += h1*r2 + vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3 + vpmuludq $H0,$T4,$T4 # h0*r2 + vpaddq $T4,$D2,$D2 # d2 += h0*r2 + vpmuludq $H4,$T2,$T1 # h4*s2 + vpaddq $T1,$D1,$D1 # d1 += h4*s2 + vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3 + vpmuludq $H3,$T2,$T2 # h3*s2 + vpaddq $T2,$D0,$D0 # d0 += h3*s2 + + vpmuludq $H1,$T3,$T0 # h1*r3 + vpaddq $T0,$D4,$D4 # d4 += h1*r3 + vpmuludq $H0,$T3,$T3 # h0*r3 + vpaddq $T3,$D3,$D3 # d3 += h0*r3 + vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4 + vpmuludq $H4,$T4,$T1 # h4*s3 + vpaddq $T1,$D2,$D2 # d2 += h4*s3 + vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4 + vpmuludq $H3,$T4,$T0 # h3*s3 + vpaddq $T0,$D1,$D1 # d1 += h3*s3 + vpmuludq $H2,$T4,$T4 # h2*s3 + vpaddq $T4,$D0,$D0 # d0 += h2*s3 + + vpmuludq $H0,$T2,$T2 # h0*r4 + vpaddq $T2,$D4,$D4 # d4 += h0*r4 + vpmuludq $H4,$T3,$T1 # h4*s4 + vpaddq $T1,$D3,$D3 # d3 += h4*s4 + vpmuludq $H3,$T3,$T0 # h3*s4 + vpaddq $T0,$D2,$D2 # d2 += h3*s4 + vpmuludq $H2,$T3,$T1 # h2*s4 + vpaddq $T1,$D1,$D1 # d1 += h2*s4 + vpmuludq $H1,$T3,$T3 # h1*s4 + vpaddq $T3,$D0,$D0 # d0 += h1*s4 + +.Lshort_tail_avx: + ################################################################ + # horizontal addition + + vpsrldq \$8,$D4,$T4 + vpsrldq \$8,$D3,$T3 + vpsrldq \$8,$D1,$T1 + vpsrldq \$8,$D0,$T0 + vpsrldq \$8,$D2,$T2 + vpaddq $T3,$D3,$D3 + vpaddq $T4,$D4,$D4 + vpaddq $T0,$D0,$D0 + vpaddq $T1,$D1,$D1 + vpaddq $T2,$D2,$D2 + + ################################################################ + # lazy reduction + + vpsrlq \$26,$D3,$H3 + vpand $MASK,$D3,$D3 + vpaddq $H3,$D4,$D4 # h3 -> h4 + + vpsrlq \$26,$D0,$H0 + vpand $MASK,$D0,$D0 + vpaddq $H0,$D1,$D1 # h0 -> h1 + + vpsrlq \$26,$D4,$H4 + vpand $MASK,$D4,$D4 + + vpsrlq \$26,$D1,$H1 + vpand $MASK,$D1,$D1 + vpaddq $H1,$D2,$D2 # h1 -> h2 + + vpaddq $H4,$D0,$D0 + vpsllq \$2,$H4,$H4 + vpaddq $H4,$D0,$D0 # h4 -> h0 + + vpsrlq \$26,$D2,$H2 + vpand $MASK,$D2,$D2 + vpaddq $H2,$D3,$D3 # h2 -> h3 + + vpsrlq \$26,$D0,$H0 + vpand $MASK,$D0,$D0 + vpaddq $H0,$D1,$D1 # h0 -> h1 + + vpsrlq \$26,$D3,$H3 + vpand $MASK,$D3,$D3 + vpaddq $H3,$D4,$D4 # h3 -> h4 + + vmovd $D0,`4*0-48-64`($ctx) # save partially reduced + vmovd $D1,`4*1-48-64`($ctx) + vmovd $D2,`4*2-48-64`($ctx) + vmovd $D3,`4*3-48-64`($ctx) + vmovd $D4,`4*4-48-64`($ctx) +___ +$code.=<<___ if ($win64); + vmovdqa 0x50(%r11),%xmm6 + vmovdqa 0x60(%r11),%xmm7 + vmovdqa 0x70(%r11),%xmm8 + vmovdqa 0x80(%r11),%xmm9 + vmovdqa 0x90(%r11),%xmm10 + vmovdqa 0xa0(%r11),%xmm11 + vmovdqa 0xb0(%r11),%xmm12 + vmovdqa 0xc0(%r11),%xmm13 + vmovdqa 0xd0(%r11),%xmm14 + vmovdqa 0xe0(%r11),%xmm15 + lea 0xf8(%r11),%rsp +.Ldo_avx_epilogue: +___ +$code.=<<___ if (!$win64); + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +___ +$code.=<<___; + vzeroupper + ret +.cfi_endproc +___ +&end_function("poly1305_blocks_avx"); + +&declare_function("poly1305_emit_avx", 32, 3); +$code.=<<___; + cmpl \$0,20($ctx) # is_base2_26? + je .Lemit + + mov 0($ctx),%eax # load hash value base 2^26 + mov 4($ctx),%ecx + mov 8($ctx),%r8d + mov 12($ctx),%r11d + mov 16($ctx),%r10d + + shl \$26,%rcx # base 2^26 -> base 2^64 + mov %r8,%r9 + shl \$52,%r8 + add %rcx,%rax + shr \$12,%r9 + add %rax,%r8 # h0 + adc \$0,%r9 + + shl \$14,%r11 + mov %r10,%rax + shr \$24,%r10 + add %r11,%r9 + shl \$40,%rax + add %rax,%r9 # h1 + adc \$0,%r10 # h2 + + mov %r10,%rax # could be partially reduced, so reduce + mov %r10,%rcx + and \$3,%r10 + shr \$2,%rax + and \$-4,%rcx + add %rcx,%rax + add %rax,%r8 + adc \$0,%r9 + adc \$0,%r10 + + mov %r8,%rax + add \$5,%r8 # compare to modulus + mov %r9,%rcx + adc \$0,%r9 + adc \$0,%r10 + shr \$2,%r10 # did 130-bit value overflow? + cmovnz %r8,%rax + cmovnz %r9,%rcx + + add 0($nonce),%rax # accumulate nonce + adc 8($nonce),%rcx + mov %rax,0($mac) # write result + mov %rcx,8($mac) + + ret +___ +&end_function("poly1305_emit_avx"); + +if ($kernel) { + $code .= "#endif\n"; +} + +if ($avx>1) { + +if ($kernel) { + $code .= "#ifdef CONFIG_AS_AVX2\n"; +} + +my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = + map("%ymm$_",(0..15)); +my $S4=$MASK; + +sub poly1305_blocks_avxN { + my ($avx512) = @_; + my $suffix = $avx512 ? "_avx512" : ""; +$code.=<<___; +.cfi_startproc + mov 20($ctx),%r8d # is_base2_26 + cmp \$128,$len + jae .Lblocks_avx2$suffix + test %r8d,%r8d + jz .Lblocks + +.Lblocks_avx2$suffix: + and \$-16,$len + jz .Lno_data_avx2$suffix + + vzeroupper + + test %r8d,%r8d + jz .Lbase2_64_avx2$suffix + + test \$63,$len + jz .Leven_avx2$suffix + + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lblocks_avx2_body$suffix: + + mov $len,%r15 # reassign $len + + mov 0($ctx),$d1 # load hash value + mov 8($ctx),$d2 + mov 16($ctx),$h2#d + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + ################################# base 2^26 -> base 2^64 + mov $d1#d,$h0#d + and \$`-1*(1<<31)`,$d1 + mov $d2,$r1 # borrow $r1 + mov $d2#d,$h1#d + and \$`-1*(1<<31)`,$d2 + + shr \$6,$d1 + shl \$52,$r1 + add $d1,$h0 + shr \$12,$h1 + shr \$18,$d2 + add $r1,$h0 + adc $d2,$h1 + + mov $h2,$d1 + shl \$40,$d1 + shr \$24,$h2 + add $d1,$h1 + adc \$0,$h2 # can be partially reduced... + + mov \$-4,$d2 # ... so reduce + mov $h2,$d1 + and $h2,$d2 + shr \$2,$d1 + and \$3,$h2 + add $d2,$d1 # =*5 + add $d1,$h0 + adc \$0,$h1 + adc \$0,$h2 + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + +.Lbase2_26_pre_avx2$suffix: + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + sub \$16,%r15 + + call __poly1305_block + mov $r1,%rax + + test \$63,%r15 + jnz .Lbase2_26_pre_avx2$suffix + + test $padbit,$padbit # if $padbit is zero, + jz .Lstore_base2_64_avx2$suffix # store hash in base 2^64 format + + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$r0 + mov $h1,$r1 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$r0 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $r0,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$r1 + and \$0x3ffffff,$h1 # h[3] + or $r1,$h2 # h[4] + + test %r15,%r15 + jz .Lstore_base2_26_avx2$suffix + + vmovd %rax#d,%x#$H0 + vmovd %rdx#d,%x#$H1 + vmovd $h0#d,%x#$H2 + vmovd $h1#d,%x#$H3 + vmovd $h2#d,%x#$H4 + jmp .Lproceed_avx2$suffix + +.align 32 +.Lstore_base2_64_avx2$suffix: + mov $h0,0($ctx) + mov $h1,8($ctx) + mov $h2,16($ctx) # note that is_base2_26 is zeroed + jmp .Ldone_avx2$suffix + +.align 16 +.Lstore_base2_26_avx2$suffix: + mov %rax#d,0($ctx) # store hash value base 2^26 + mov %rdx#d,4($ctx) + mov $h0#d,8($ctx) + mov $h1#d,12($ctx) + mov $h2#d,16($ctx) +.align 16 +.Ldone_avx2$suffix: + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lno_data_avx2$suffix: +.Lblocks_avx2_epilogue$suffix: + ret +.cfi_endproc + +.align 32 +.Lbase2_64_avx2$suffix: +.cfi_startproc + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lbase2_64_avx2_body$suffix: + + mov $len,%r15 # reassign $len + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + mov 0($ctx),$h0 # load hash value + mov 8($ctx),$h1 + mov 16($ctx),$h2#d + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + + test \$63,$len + jz .Linit_avx2$suffix + +.Lbase2_64_pre_avx2$suffix: + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + sub \$16,%r15 + + call __poly1305_block + mov $r1,%rax + + test \$63,%r15 + jnz .Lbase2_64_pre_avx2$suffix + +.Linit_avx2$suffix: + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$d1 + mov $h1,$d2 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$d1 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $d1,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$d2 + and \$0x3ffffff,$h1 # h[3] + or $d2,$h2 # h[4] + + vmovd %rax#d,%x#$H0 + vmovd %rdx#d,%x#$H1 + vmovd $h0#d,%x#$H2 + vmovd $h1#d,%x#$H3 + vmovd $h2#d,%x#$H4 + movl \$1,20($ctx) # set is_base2_26 + + call __poly1305_init_avx + +.Lproceed_avx2$suffix: + mov %r15,$len # restore $len +___ +$code.=<<___ if (!$kernel); + mov OPENSSL_ia32cap_P+8(%rip),%r9d + mov \$`(1<<31|1<<30|1<<16)`,%r11d +___ +$code.=<<___; + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lbase2_64_avx2_epilogue$suffix: + jmp .Ldo_avx2$suffix +.cfi_endproc + +.align 32 +.Leven_avx2$suffix: +.cfi_startproc +___ +$code.=<<___ if (!$kernel); + mov OPENSSL_ia32cap_P+8(%rip),%r9d +___ +$code.=<<___; + vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26 + vmovd 4*1($ctx),%x#$H1 + vmovd 4*2($ctx),%x#$H2 + vmovd 4*3($ctx),%x#$H3 + vmovd 4*4($ctx),%x#$H4 + +.Ldo_avx2$suffix: +___ +$code.=<<___ if (!$kernel && $avx>2); + cmp \$512,$len + jb .Lskip_avx512 + and %r11d,%r9d + test \$`1<<16`,%r9d # check for AVX512F + jnz .Lblocks_avx512 +.Lskip_avx512$suffix: +___ +$code.=<<___ if ($avx > 2 && $avx512 && $kernel); + cmp \$512,$len + jae .Lblocks_avx512 +___ +$code.=<<___ if (!$win64); + lea 8(%rsp),%r10 +.cfi_def_cfa_register %r10 + sub \$0x128,%rsp +___ +$code.=<<___ if ($win64); + lea 8(%rsp),%r10 + sub \$0x1c8,%rsp + vmovdqa %xmm6,-0xb0(%r10) + vmovdqa %xmm7,-0xa0(%r10) + vmovdqa %xmm8,-0x90(%r10) + vmovdqa %xmm9,-0x80(%r10) + vmovdqa %xmm10,-0x70(%r10) + vmovdqa %xmm11,-0x60(%r10) + vmovdqa %xmm12,-0x50(%r10) + vmovdqa %xmm13,-0x40(%r10) + vmovdqa %xmm14,-0x30(%r10) + vmovdqa %xmm15,-0x20(%r10) +.Ldo_avx2_body$suffix: +___ +$code.=<<___; + lea .Lconst(%rip),%rcx + lea 48+64($ctx),$ctx # size optimization + vmovdqa 96(%rcx),$T0 # .Lpermd_avx2 + + # expand and copy pre-calculated table to stack + vmovdqu `16*0-64`($ctx),%x#$T2 + and \$-512,%rsp + vmovdqu `16*1-64`($ctx),%x#$T3 + vmovdqu `16*2-64`($ctx),%x#$T4 + vmovdqu `16*3-64`($ctx),%x#$D0 + vmovdqu `16*4-64`($ctx),%x#$D1 + vmovdqu `16*5-64`($ctx),%x#$D2 + lea 0x90(%rsp),%rax # size optimization + vmovdqu `16*6-64`($ctx),%x#$D3 + vpermd $T2,$T0,$T2 # 00003412 -> 14243444 + vmovdqu `16*7-64`($ctx),%x#$D4 + vpermd $T3,$T0,$T3 + vmovdqu `16*8-64`($ctx),%x#$MASK + vpermd $T4,$T0,$T4 + vmovdqa $T2,0x00(%rsp) + vpermd $D0,$T0,$D0 + vmovdqa $T3,0x20-0x90(%rax) + vpermd $D1,$T0,$D1 + vmovdqa $T4,0x40-0x90(%rax) + vpermd $D2,$T0,$D2 + vmovdqa $D0,0x60-0x90(%rax) + vpermd $D3,$T0,$D3 + vmovdqa $D1,0x80-0x90(%rax) + vpermd $D4,$T0,$D4 + vmovdqa $D2,0xa0-0x90(%rax) + vpermd $MASK,$T0,$MASK + vmovdqa $D3,0xc0-0x90(%rax) + vmovdqa $D4,0xe0-0x90(%rax) + vmovdqa $MASK,0x100-0x90(%rax) + vmovdqa 64(%rcx),$MASK # .Lmask26 + + ################################################################ + # load input + vmovdqu 16*0($inp),%x#$T0 + vmovdqu 16*1($inp),%x#$T1 + vinserti128 \$1,16*2($inp),$T0,$T0 + vinserti128 \$1,16*3($inp),$T1,$T1 + lea 16*4($inp),$inp + + vpsrldq \$6,$T0,$T2 # splat input + vpsrldq \$6,$T1,$T3 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpunpcklqdq $T3,$T2,$T2 # 2:3 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + + vpsrlq \$30,$T2,$T3 + vpsrlq \$4,$T2,$T2 + vpsrlq \$26,$T0,$T1 + vpsrlq \$40,$T4,$T4 # 4 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T0,$T0 # 0 + vpand $MASK,$T1,$T1 # 1 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + vpaddq $H2,$T2,$H2 # accumulate input + sub \$64,$len + jz .Ltail_avx2$suffix + jmp .Loop_avx2$suffix + +.align 32 +.Loop_avx2$suffix: + ################################################################ + # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4 + # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3 + # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2 + # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1 + # \________/\__________/ + ################################################################ + #vpaddq $H2,$T2,$H2 # accumulate input + vpaddq $H0,$T0,$H0 + vmovdqa `32*0`(%rsp),$T0 # r0^4 + vpaddq $H1,$T1,$H1 + vmovdqa `32*1`(%rsp),$T1 # r1^4 + vpaddq $H3,$T3,$H3 + vmovdqa `32*3`(%rsp),$T2 # r2^4 + vpaddq $H4,$T4,$H4 + vmovdqa `32*6-0x90`(%rax),$T3 # s3^4 + vmovdqa `32*8-0x90`(%rax),$S4 # s4^4 + + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + # + # however, as h2 is "chronologically" first one available pull + # corresponding operations up, so it's + # + # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4 + # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4 + + vpmuludq $H2,$T0,$D2 # d2 = h2*r0 + vpmuludq $H2,$T1,$D3 # d3 = h2*r1 + vpmuludq $H2,$T2,$D4 # d4 = h2*r2 + vpmuludq $H2,$T3,$D0 # d0 = h2*s3 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + + vpmuludq $H0,$T1,$T4 # h0*r1 + vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp + vpaddq $T4,$D1,$D1 # d1 += h0*r1 + vpaddq $H2,$D2,$D2 # d2 += h1*r1 + vpmuludq $H3,$T1,$T4 # h3*r1 + vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1 + vpaddq $T4,$D4,$D4 # d4 += h3*r1 + vpaddq $H2,$D0,$D0 # d0 += h4*s1 + vmovdqa `32*4-0x90`(%rax),$T1 # s2 + + vpmuludq $H0,$T0,$T4 # h0*r0 + vpmuludq $H1,$T0,$H2 # h1*r0 + vpaddq $T4,$D0,$D0 # d0 += h0*r0 + vpaddq $H2,$D1,$D1 # d1 += h1*r0 + vpmuludq $H3,$T0,$T4 # h3*r0 + vpmuludq $H4,$T0,$H2 # h4*r0 + vmovdqu 16*0($inp),%x#$T0 # load input + vpaddq $T4,$D3,$D3 # d3 += h3*r0 + vpaddq $H2,$D4,$D4 # d4 += h4*r0 + vinserti128 \$1,16*2($inp),$T0,$T0 + + vpmuludq $H3,$T1,$T4 # h3*s2 + vpmuludq $H4,$T1,$H2 # h4*s2 + vmovdqu 16*1($inp),%x#$T1 + vpaddq $T4,$D0,$D0 # d0 += h3*s2 + vpaddq $H2,$D1,$D1 # d1 += h4*s2 + vmovdqa `32*5-0x90`(%rax),$H2 # r3 + vpmuludq $H1,$T2,$T4 # h1*r2 + vpmuludq $H0,$T2,$T2 # h0*r2 + vpaddq $T4,$D3,$D3 # d3 += h1*r2 + vpaddq $T2,$D2,$D2 # d2 += h0*r2 + vinserti128 \$1,16*3($inp),$T1,$T1 + lea 16*4($inp),$inp + + vpmuludq $H1,$H2,$T4 # h1*r3 + vpmuludq $H0,$H2,$H2 # h0*r3 + vpsrldq \$6,$T0,$T2 # splat input + vpaddq $T4,$D4,$D4 # d4 += h1*r3 + vpaddq $H2,$D3,$D3 # d3 += h0*r3 + vpmuludq $H3,$T3,$T4 # h3*s3 + vpmuludq $H4,$T3,$H2 # h4*s3 + vpsrldq \$6,$T1,$T3 + vpaddq $T4,$D1,$D1 # d1 += h3*s3 + vpaddq $H2,$D2,$D2 # d2 += h4*s3 + vpunpckhqdq $T1,$T0,$T4 # 4 + + vpmuludq $H3,$S4,$H3 # h3*s4 + vpmuludq $H4,$S4,$H4 # h4*s4 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 + vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 + vpunpcklqdq $T3,$T2,$T3 # 2:3 + vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4 + vpmuludq $H1,$S4,$H0 # h1*s4 + vmovdqa 64(%rcx),$MASK # .Lmask26 + vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 + vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 + + ################################################################ + # lazy reduction (interleaved with tail of input splat) + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$D1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D4 + vpand $MASK,$H4,$H4 + + vpsrlq \$4,$T3,$T2 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpand $MASK,$T2,$T2 # 2 + vpsrlq \$26,$T0,$T1 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpaddq $T2,$H2,$H2 # modulo-scheduled + vpsrlq \$30,$T3,$T3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$40,$T4,$T4 # 4 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpand $MASK,$T0,$T0 # 0 + vpand $MASK,$T1,$T1 # 1 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + sub \$64,$len + jnz .Loop_avx2$suffix + + .byte 0x66,0x90 +.Ltail_avx2$suffix: + ################################################################ + # while above multiplications were by r^4 in all lanes, in last + # iteration we multiply least significant lane by r^4 and most + # significant one by r, so copy of above except that references + # to the precomputed table are displaced by 4... + + #vpaddq $H2,$T2,$H2 # accumulate input + vpaddq $H0,$T0,$H0 + vmovdqu `32*0+4`(%rsp),$T0 # r0^4 + vpaddq $H1,$T1,$H1 + vmovdqu `32*1+4`(%rsp),$T1 # r1^4 + vpaddq $H3,$T3,$H3 + vmovdqu `32*3+4`(%rsp),$T2 # r2^4 + vpaddq $H4,$T4,$H4 + vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4 + vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4 + + vpmuludq $H2,$T0,$D2 # d2 = h2*r0 + vpmuludq $H2,$T1,$D3 # d3 = h2*r1 + vpmuludq $H2,$T2,$D4 # d4 = h2*r2 + vpmuludq $H2,$T3,$D0 # d0 = h2*s3 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + + vpmuludq $H0,$T1,$T4 # h0*r1 + vpmuludq $H1,$T1,$H2 # h1*r1 + vpaddq $T4,$D1,$D1 # d1 += h0*r1 + vpaddq $H2,$D2,$D2 # d2 += h1*r1 + vpmuludq $H3,$T1,$T4 # h3*r1 + vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1 + vpaddq $T4,$D4,$D4 # d4 += h3*r1 + vpaddq $H2,$D0,$D0 # d0 += h4*s1 + + vpmuludq $H0,$T0,$T4 # h0*r0 + vpmuludq $H1,$T0,$H2 # h1*r0 + vpaddq $T4,$D0,$D0 # d0 += h0*r0 + vmovdqu `32*4+4-0x90`(%rax),$T1 # s2 + vpaddq $H2,$D1,$D1 # d1 += h1*r0 + vpmuludq $H3,$T0,$T4 # h3*r0 + vpmuludq $H4,$T0,$H2 # h4*r0 + vpaddq $T4,$D3,$D3 # d3 += h3*r0 + vpaddq $H2,$D4,$D4 # d4 += h4*r0 + + vpmuludq $H3,$T1,$T4 # h3*s2 + vpmuludq $H4,$T1,$H2 # h4*s2 + vpaddq $T4,$D0,$D0 # d0 += h3*s2 + vpaddq $H2,$D1,$D1 # d1 += h4*s2 + vmovdqu `32*5+4-0x90`(%rax),$H2 # r3 + vpmuludq $H1,$T2,$T4 # h1*r2 + vpmuludq $H0,$T2,$T2 # h0*r2 + vpaddq $T4,$D3,$D3 # d3 += h1*r2 + vpaddq $T2,$D2,$D2 # d2 += h0*r2 + + vpmuludq $H1,$H2,$T4 # h1*r3 + vpmuludq $H0,$H2,$H2 # h0*r3 + vpaddq $T4,$D4,$D4 # d4 += h1*r3 + vpaddq $H2,$D3,$D3 # d3 += h0*r3 + vpmuludq $H3,$T3,$T4 # h3*s3 + vpmuludq $H4,$T3,$H2 # h4*s3 + vpaddq $T4,$D1,$D1 # d1 += h3*s3 + vpaddq $H2,$D2,$D2 # d2 += h4*s3 + + vpmuludq $H3,$S4,$H3 # h3*s4 + vpmuludq $H4,$S4,$H4 # h4*s4 + vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 + vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 + vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4 + vpmuludq $H1,$S4,$H0 # h1*s4 + vmovdqa 64(%rcx),$MASK # .Lmask26 + vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 + vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 + + ################################################################ + # horizontal addition + + vpsrldq \$8,$D1,$T1 + vpsrldq \$8,$H2,$T2 + vpsrldq \$8,$H3,$T3 + vpsrldq \$8,$H4,$T4 + vpsrldq \$8,$H0,$T0 + vpaddq $T1,$D1,$D1 + vpaddq $T2,$H2,$H2 + vpaddq $T3,$H3,$H3 + vpaddq $T4,$H4,$H4 + vpaddq $T0,$H0,$H0 + + vpermq \$0x2,$H3,$T3 + vpermq \$0x2,$H4,$T4 + vpermq \$0x2,$H0,$T0 + vpermq \$0x2,$D1,$T1 + vpermq \$0x2,$H2,$T2 + vpaddq $T3,$H3,$H3 + vpaddq $T4,$H4,$H4 + vpaddq $T0,$H0,$H0 + vpaddq $T1,$D1,$D1 + vpaddq $T2,$H2,$H2 + + ################################################################ + # lazy reduction + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$D1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D4 + vpand $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced + vmovd %x#$H1,`4*1-48-64`($ctx) + vmovd %x#$H2,`4*2-48-64`($ctx) + vmovd %x#$H3,`4*3-48-64`($ctx) + vmovd %x#$H4,`4*4-48-64`($ctx) +___ +$code.=<<___ if ($win64); + vmovdqa -0xb0(%r10),%xmm6 + vmovdqa -0xa0(%r10),%xmm7 + vmovdqa -0x90(%r10),%xmm8 + vmovdqa -0x80(%r10),%xmm9 + vmovdqa -0x70(%r10),%xmm10 + vmovdqa -0x60(%r10),%xmm11 + vmovdqa -0x50(%r10),%xmm12 + vmovdqa -0x40(%r10),%xmm13 + vmovdqa -0x30(%r10),%xmm14 + vmovdqa -0x20(%r10),%xmm15 + lea -8(%r10),%rsp +.Ldo_avx2_epilogue$suffix: +___ +$code.=<<___ if (!$win64); + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +___ +$code.=<<___; + vzeroupper + ret +.cfi_endproc +___ +if($avx > 2 && $avx512) { +my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24)); +my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29)); +my $PADBIT="%zmm30"; + +map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain +map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4)); +map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4)); +map(s/%y/%z/,($MASK)); + +$code.=<<___; +.cfi_startproc +.Lblocks_avx512: + mov \$15,%eax + kmovw %eax,%k2 +___ +$code.=<<___ if (!$win64); + lea 8(%rsp),%r10 +.cfi_def_cfa_register %r10 + sub \$0x128,%rsp +___ +$code.=<<___ if ($win64); + lea 8(%rsp),%r10 + sub \$0x1c8,%rsp + vmovdqa %xmm6,-0xb0(%r10) + vmovdqa %xmm7,-0xa0(%r10) + vmovdqa %xmm8,-0x90(%r10) + vmovdqa %xmm9,-0x80(%r10) + vmovdqa %xmm10,-0x70(%r10) + vmovdqa %xmm11,-0x60(%r10) + vmovdqa %xmm12,-0x50(%r10) + vmovdqa %xmm13,-0x40(%r10) + vmovdqa %xmm14,-0x30(%r10) + vmovdqa %xmm15,-0x20(%r10) +.Ldo_avx512_body: +___ +$code.=<<___; + lea .Lconst(%rip),%rcx + lea 48+64($ctx),$ctx # size optimization + vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2 + + # expand pre-calculated table + vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0} + and \$-512,%rsp + vmovdqu `16*1-64`($ctx),%x#$D1 # will become ... ${R1} + mov \$0x20,%rax + vmovdqu `16*2-64`($ctx),%x#$T0 # ... ${S1} + vmovdqu `16*3-64`($ctx),%x#$D2 # ... ${R2} + vmovdqu `16*4-64`($ctx),%x#$T1 # ... ${S2} + vmovdqu `16*5-64`($ctx),%x#$D3 # ... ${R3} + vmovdqu `16*6-64`($ctx),%x#$T3 # ... ${S3} + vmovdqu `16*7-64`($ctx),%x#$D4 # ... ${R4} + vmovdqu `16*8-64`($ctx),%x#$T4 # ... ${S4} + vpermd $D0,$T2,$R0 # 00003412 -> 14243444 + vpbroadcastq 64(%rcx),$MASK # .Lmask26 + vpermd $D1,$T2,$R1 + vpermd $T0,$T2,$S1 + vpermd $D2,$T2,$R2 + vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0 + vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304 + vpermd $T1,$T2,$S2 + vmovdqu64 $R1,0x00(%rsp,%rax){%k2} + vpsrlq \$32,$R1,$T1 + vpermd $D3,$T2,$R3 + vmovdqa64 $S1,0x40(%rsp){%k2} + vpermd $T3,$T2,$S3 + vpermd $D4,$T2,$R4 + vmovdqu64 $R2,0x40(%rsp,%rax){%k2} + vpermd $T4,$T2,$S4 + vmovdqa64 $S2,0x80(%rsp){%k2} + vmovdqu64 $R3,0x80(%rsp,%rax){%k2} + vmovdqa64 $S3,0xc0(%rsp){%k2} + vmovdqu64 $R4,0xc0(%rsp,%rax){%k2} + vmovdqa64 $S4,0x100(%rsp){%k2} + + ################################################################ + # calculate 5th through 8th powers of the key + # + # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1 + # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2 + # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3 + # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4 + # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0 + + vpmuludq $T0,$R0,$D0 # d0 = r0'*r0 + vpmuludq $T0,$R1,$D1 # d1 = r0'*r1 + vpmuludq $T0,$R2,$D2 # d2 = r0'*r2 + vpmuludq $T0,$R3,$D3 # d3 = r0'*r3 + vpmuludq $T0,$R4,$D4 # d4 = r0'*r4 + vpsrlq \$32,$R2,$T2 + + vpmuludq $T1,$S4,$M0 + vpmuludq $T1,$R0,$M1 + vpmuludq $T1,$R1,$M2 + vpmuludq $T1,$R2,$M3 + vpmuludq $T1,$R3,$M4 + vpsrlq \$32,$R3,$T3 + vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4 + vpaddq $M1,$D1,$D1 # d1 += r1'*r0 + vpaddq $M2,$D2,$D2 # d2 += r1'*r1 + vpaddq $M3,$D3,$D3 # d3 += r1'*r2 + vpaddq $M4,$D4,$D4 # d4 += r1'*r3 + + vpmuludq $T2,$S3,$M0 + vpmuludq $T2,$S4,$M1 + vpmuludq $T2,$R1,$M3 + vpmuludq $T2,$R2,$M4 + vpmuludq $T2,$R0,$M2 + vpsrlq \$32,$R4,$T4 + vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3 + vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4 + vpaddq $M3,$D3,$D3 # d3 += r2'*r1 + vpaddq $M4,$D4,$D4 # d4 += r2'*r2 + vpaddq $M2,$D2,$D2 # d2 += r2'*r0 + + vpmuludq $T3,$S2,$M0 + vpmuludq $T3,$R0,$M3 + vpmuludq $T3,$R1,$M4 + vpmuludq $T3,$S3,$M1 + vpmuludq $T3,$S4,$M2 + vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2 + vpaddq $M3,$D3,$D3 # d3 += r3'*r0 + vpaddq $M4,$D4,$D4 # d4 += r3'*r1 + vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3 + vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4 + + vpmuludq $T4,$S4,$M3 + vpmuludq $T4,$R0,$M4 + vpmuludq $T4,$S1,$M0 + vpmuludq $T4,$S2,$M1 + vpmuludq $T4,$S3,$M2 + vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4 + vpaddq $M4,$D4,$D4 # d4 += r2'*r0 + vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1 + vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2 + vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3 + + ################################################################ + # load input + vmovdqu64 16*0($inp),%z#$T3 + vmovdqu64 16*4($inp),%z#$T4 + lea 16*8($inp),$inp + + ################################################################ + # lazy reduction + + vpsrlq \$26,$D3,$M3 + vpandq $MASK,$D3,$D3 + vpaddq $M3,$D4,$D4 # d3 -> d4 + + vpsrlq \$26,$D0,$M0 + vpandq $MASK,$D0,$D0 + vpaddq $M0,$D1,$D1 # d0 -> d1 + + vpsrlq \$26,$D4,$M4 + vpandq $MASK,$D4,$D4 + + vpsrlq \$26,$D1,$M1 + vpandq $MASK,$D1,$D1 + vpaddq $M1,$D2,$D2 # d1 -> d2 + + vpaddq $M4,$D0,$D0 + vpsllq \$2,$M4,$M4 + vpaddq $M4,$D0,$D0 # d4 -> d0 + + vpsrlq \$26,$D2,$M2 + vpandq $MASK,$D2,$D2 + vpaddq $M2,$D3,$D3 # d2 -> d3 + + vpsrlq \$26,$D0,$M0 + vpandq $MASK,$D0,$D0 + vpaddq $M0,$D1,$D1 # d0 -> d1 + + vpsrlq \$26,$D3,$M3 + vpandq $MASK,$D3,$D3 + vpaddq $M3,$D4,$D4 # d3 -> d4 + + ################################################################ + # at this point we have 14243444 in $R0-$S4 and 05060708 in + # $D0-$D4, ... + + vpunpcklqdq $T4,$T3,$T0 # transpose input + vpunpckhqdq $T4,$T3,$T4 + + # ... since input 64-bit lanes are ordered as 73625140, we could + # "vperm" it to 76543210 (here and in each loop iteration), *or* + # we could just flow along, hence the goal for $R0-$S4 is + # 1858286838784888 ... + + vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512: + mov \$0x7777,%eax + kmovw %eax,%k1 + + vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4--- + vpermd $R1,$M0,$R1 + vpermd $R2,$M0,$R2 + vpermd $R3,$M0,$R3 + vpermd $R4,$M0,$R4 + + vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888 + vpermd $D1,$M0,${R1}{%k1} + vpermd $D2,$M0,${R2}{%k1} + vpermd $D3,$M0,${R3}{%k1} + vpermd $D4,$M0,${R4}{%k1} + + vpslld \$2,$R1,$S1 # *5 + vpslld \$2,$R2,$S2 + vpslld \$2,$R3,$S3 + vpslld \$2,$R4,$S4 + vpaddd $R1,$S1,$S1 + vpaddd $R2,$S2,$S2 + vpaddd $R3,$S3,$S3 + vpaddd $R4,$S4,$S4 + + vpbroadcastq 32(%rcx),$PADBIT # .L129 + + vpsrlq \$52,$T0,$T2 # splat input + vpsllq \$12,$T4,$T3 + vporq $T3,$T2,$T2 + vpsrlq \$26,$T0,$T1 + vpsrlq \$14,$T4,$T3 + vpsrlq \$40,$T4,$T4 # 4 + vpandq $MASK,$T2,$T2 # 2 + vpandq $MASK,$T0,$T0 # 0 + #vpandq $MASK,$T1,$T1 # 1 + #vpandq $MASK,$T3,$T3 # 3 + #vporq $PADBIT,$T4,$T4 # padbit, yes, always + + vpaddq $H2,$T2,$H2 # accumulate input + sub \$192,$len + jbe .Ltail_avx512 + jmp .Loop_avx512 + +.align 32 +.Loop_avx512: + ################################################################ + # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8 + # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7 + # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6 + # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5 + # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4 + # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3 + # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2 + # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1 + # \________/\___________/ + ################################################################ + #vpaddq $H2,$T2,$H2 # accumulate input + + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + # + # however, as h2 is "chronologically" first one available pull + # corresponding operations up, so it's + # + # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4 + # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0 + # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1 + # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2 + # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3 + + vpmuludq $H2,$R1,$D3 # d3 = h2*r1 + vpaddq $H0,$T0,$H0 + vpmuludq $H2,$R2,$D4 # d4 = h2*r2 + vpandq $MASK,$T1,$T1 # 1 + vpmuludq $H2,$S3,$D0 # d0 = h2*s3 + vpandq $MASK,$T3,$T3 # 3 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + vporq $PADBIT,$T4,$T4 # padbit, yes, always + vpmuludq $H2,$R0,$D2 # d2 = h2*r0 + vpaddq $H1,$T1,$H1 # accumulate input + vpaddq $H3,$T3,$H3 + vpaddq $H4,$T4,$H4 + + vmovdqu64 16*0($inp),$T3 # load input + vmovdqu64 16*4($inp),$T4 + lea 16*8($inp),$inp + vpmuludq $H0,$R3,$M3 + vpmuludq $H0,$R4,$M4 + vpmuludq $H0,$R0,$M0 + vpmuludq $H0,$R1,$M1 + vpaddq $M3,$D3,$D3 # d3 += h0*r3 + vpaddq $M4,$D4,$D4 # d4 += h0*r4 + vpaddq $M0,$D0,$D0 # d0 += h0*r0 + vpaddq $M1,$D1,$D1 # d1 += h0*r1 + + vpmuludq $H1,$R2,$M3 + vpmuludq $H1,$R3,$M4 + vpmuludq $H1,$S4,$M0 + vpmuludq $H0,$R2,$M2 + vpaddq $M3,$D3,$D3 # d3 += h1*r2 + vpaddq $M4,$D4,$D4 # d4 += h1*r3 + vpaddq $M0,$D0,$D0 # d0 += h1*s4 + vpaddq $M2,$D2,$D2 # d2 += h0*r2 + + vpunpcklqdq $T4,$T3,$T0 # transpose input + vpunpckhqdq $T4,$T3,$T4 + + vpmuludq $H3,$R0,$M3 + vpmuludq $H3,$R1,$M4 + vpmuludq $H1,$R0,$M1 + vpmuludq $H1,$R1,$M2 + vpaddq $M3,$D3,$D3 # d3 += h3*r0 + vpaddq $M4,$D4,$D4 # d4 += h3*r1 + vpaddq $M1,$D1,$D1 # d1 += h1*r0 + vpaddq $M2,$D2,$D2 # d2 += h1*r1 + + vpmuludq $H4,$S4,$M3 + vpmuludq $H4,$R0,$M4 + vpmuludq $H3,$S2,$M0 + vpmuludq $H3,$S3,$M1 + vpaddq $M3,$D3,$D3 # d3 += h4*s4 + vpmuludq $H3,$S4,$M2 + vpaddq $M4,$D4,$D4 # d4 += h4*r0 + vpaddq $M0,$D0,$D0 # d0 += h3*s2 + vpaddq $M1,$D1,$D1 # d1 += h3*s3 + vpaddq $M2,$D2,$D2 # d2 += h3*s4 + + vpmuludq $H4,$S1,$M0 + vpmuludq $H4,$S2,$M1 + vpmuludq $H4,$S3,$M2 + vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 + vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 + vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 + + ################################################################ + # lazy reduction (interleaved with input splat) + + vpsrlq \$52,$T0,$T2 # splat input + vpsllq \$12,$T4,$T3 + + vpsrlq \$26,$D3,$H3 + vpandq $MASK,$D3,$D3 + vpaddq $H3,$D4,$H4 # h3 -> h4 + + vporq $T3,$T2,$T2 + + vpsrlq \$26,$H0,$D0 + vpandq $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpandq $MASK,$T2,$T2 # 2 + + vpsrlq \$26,$H4,$D4 + vpandq $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpandq $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpaddq $T2,$H2,$H2 # modulo-scheduled + vpsrlq \$26,$T0,$T1 + + vpsrlq \$26,$H2,$D2 + vpandq $MASK,$H2,$H2 + vpaddq $D2,$D3,$H3 # h2 -> h3 + + vpsrlq \$14,$T4,$T3 + + vpsrlq \$26,$H0,$D0 + vpandq $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$40,$T4,$T4 # 4 + + vpsrlq \$26,$H3,$D3 + vpandq $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpandq $MASK,$T0,$T0 # 0 + #vpandq $MASK,$T1,$T1 # 1 + #vpandq $MASK,$T3,$T3 # 3 + #vporq $PADBIT,$T4,$T4 # padbit, yes, always + + sub \$128,$len + ja .Loop_avx512 + +.Ltail_avx512: + ################################################################ + # while above multiplications were by r^8 in all lanes, in last + # iteration we multiply least significant lane by r^8 and most + # significant one by r, that's why table gets shifted... + + vpsrlq \$32,$R0,$R0 # 0105020603070408 + vpsrlq \$32,$R1,$R1 + vpsrlq \$32,$R2,$R2 + vpsrlq \$32,$S3,$S3 + vpsrlq \$32,$S4,$S4 + vpsrlq \$32,$R3,$R3 + vpsrlq \$32,$R4,$R4 + vpsrlq \$32,$S1,$S1 + vpsrlq \$32,$S2,$S2 + + ################################################################ + # load either next or last 64 byte of input + lea ($inp,$len),$inp + + #vpaddq $H2,$T2,$H2 # accumulate input + vpaddq $H0,$T0,$H0 + + vpmuludq $H2,$R1,$D3 # d3 = h2*r1 + vpmuludq $H2,$R2,$D4 # d4 = h2*r2 + vpmuludq $H2,$S3,$D0 # d0 = h2*s3 + vpandq $MASK,$T1,$T1 # 1 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + vpandq $MASK,$T3,$T3 # 3 + vpmuludq $H2,$R0,$D2 # d2 = h2*r0 + vporq $PADBIT,$T4,$T4 # padbit, yes, always + vpaddq $H1,$T1,$H1 # accumulate input + vpaddq $H3,$T3,$H3 + vpaddq $H4,$T4,$H4 + + vmovdqu 16*0($inp),%x#$T0 + vpmuludq $H0,$R3,$M3 + vpmuludq $H0,$R4,$M4 + vpmuludq $H0,$R0,$M0 + vpmuludq $H0,$R1,$M1 + vpaddq $M3,$D3,$D3 # d3 += h0*r3 + vpaddq $M4,$D4,$D4 # d4 += h0*r4 + vpaddq $M0,$D0,$D0 # d0 += h0*r0 + vpaddq $M1,$D1,$D1 # d1 += h0*r1 + + vmovdqu 16*1($inp),%x#$T1 + vpmuludq $H1,$R2,$M3 + vpmuludq $H1,$R3,$M4 + vpmuludq $H1,$S4,$M0 + vpmuludq $H0,$R2,$M2 + vpaddq $M3,$D3,$D3 # d3 += h1*r2 + vpaddq $M4,$D4,$D4 # d4 += h1*r3 + vpaddq $M0,$D0,$D0 # d0 += h1*s4 + vpaddq $M2,$D2,$D2 # d2 += h0*r2 + + vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0 + vpmuludq $H3,$R0,$M3 + vpmuludq $H3,$R1,$M4 + vpmuludq $H1,$R0,$M1 + vpmuludq $H1,$R1,$M2 + vpaddq $M3,$D3,$D3 # d3 += h3*r0 + vpaddq $M4,$D4,$D4 # d4 += h3*r1 + vpaddq $M1,$D1,$D1 # d1 += h1*r0 + vpaddq $M2,$D2,$D2 # d2 += h1*r1 + + vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1 + vpmuludq $H4,$S4,$M3 + vpmuludq $H4,$R0,$M4 + vpmuludq $H3,$S2,$M0 + vpmuludq $H3,$S3,$M1 + vpmuludq $H3,$S4,$M2 + vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4 + vpaddq $M4,$D4,$D4 # d4 += h4*r0 + vpaddq $M0,$D0,$D0 # d0 += h3*s2 + vpaddq $M1,$D1,$D1 # d1 += h3*s3 + vpaddq $M2,$D2,$D2 # d2 += h3*s4 + + vpmuludq $H4,$S1,$M0 + vpmuludq $H4,$S2,$M1 + vpmuludq $H4,$S3,$M2 + vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 + vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 + vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 + + ################################################################ + # horizontal addition + + mov \$1,%eax + vpermq \$0xb1,$H3,$D3 + vpermq \$0xb1,$D4,$H4 + vpermq \$0xb1,$H0,$D0 + vpermq \$0xb1,$H1,$D1 + vpermq \$0xb1,$H2,$D2 + vpaddq $D3,$H3,$H3 + vpaddq $D4,$H4,$H4 + vpaddq $D0,$H0,$H0 + vpaddq $D1,$H1,$H1 + vpaddq $D2,$H2,$H2 + + kmovw %eax,%k3 + vpermq \$0x2,$H3,$D3 + vpermq \$0x2,$H4,$D4 + vpermq \$0x2,$H0,$D0 + vpermq \$0x2,$H1,$D1 + vpermq \$0x2,$H2,$D2 + vpaddq $D3,$H3,$H3 + vpaddq $D4,$H4,$H4 + vpaddq $D0,$H0,$H0 + vpaddq $D1,$H1,$H1 + vpaddq $D2,$H2,$H2 + + vextracti64x4 \$0x1,$H3,%y#$D3 + vextracti64x4 \$0x1,$H4,%y#$D4 + vextracti64x4 \$0x1,$H0,%y#$D0 + vextracti64x4 \$0x1,$H1,%y#$D1 + vextracti64x4 \$0x1,$H2,%y#$D2 + vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case + vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2 + vpaddq $D0,$H0,${H0}{%k3}{z} + vpaddq $D1,$H1,${H1}{%k3}{z} + vpaddq $D2,$H2,${H2}{%k3}{z} +___ +map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT)); +map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK)); +$code.=<<___; + ################################################################ + # lazy reduction (interleaved with input splat) + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpsrldq \$6,$T0,$T2 # splat input + vpsrldq \$6,$T1,$T3 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpunpcklqdq $T3,$T2,$T2 # 2:3 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D4 + vpand $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpsrlq \$30,$T2,$T3 + vpsrlq \$4,$T2,$T2 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpsrlq \$26,$T0,$T1 + vpsrlq \$40,$T4,$T4 # 4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T0,$T0 # 0 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2 + vpand $MASK,$T1,$T1 # 1 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + vpaddq $D3,$H4,$H4 # h3 -> h4 + + lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2 + add \$64,$len + jnz .Ltail_avx2$suffix + + vpsubq $T2,$H2,$H2 # undo input accumulation + vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced + vmovd %x#$H1,`4*1-48-64`($ctx) + vmovd %x#$H2,`4*2-48-64`($ctx) + vmovd %x#$H3,`4*3-48-64`($ctx) + vmovd %x#$H4,`4*4-48-64`($ctx) + vzeroall +___ +$code.=<<___ if ($win64); + movdqa -0xb0(%r10),%xmm6 + movdqa -0xa0(%r10),%xmm7 + movdqa -0x90(%r10),%xmm8 + movdqa -0x80(%r10),%xmm9 + movdqa -0x70(%r10),%xmm10 + movdqa -0x60(%r10),%xmm11 + movdqa -0x50(%r10),%xmm12 + movdqa -0x40(%r10),%xmm13 + movdqa -0x30(%r10),%xmm14 + movdqa -0x20(%r10),%xmm15 + lea -8(%r10),%rsp +.Ldo_avx512_epilogue: +___ +$code.=<<___ if (!$win64); + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +___ +$code.=<<___; + ret +.cfi_endproc +___ + +} + +} + +&declare_function("poly1305_blocks_avx2", 32, 4); +poly1305_blocks_avxN(0); +&end_function("poly1305_blocks_avx2"); + +if($kernel) { + $code .= "#endif\n"; +} + +####################################################################### +if ($avx>2) { +# On entry we have input length divisible by 64. But since inner loop +# processes 128 bytes per iteration, cases when length is not divisible +# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this +# reason stack layout is kept identical to poly1305_blocks_avx2. If not +# for this tail, we wouldn't have to even allocate stack frame... + +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX512\n"; +} + +&declare_function("poly1305_blocks_avx512", 32, 4); +poly1305_blocks_avxN(1); +&end_function("poly1305_blocks_avx512"); + +if ($kernel) { + $code .= "#endif\n"; +} + +if (!$kernel && $avx>3) { +######################################################################## +# VPMADD52 version using 2^44 radix. +# +# One can argue that base 2^52 would be more natural. Well, even though +# some operations would be more natural, one has to recognize couple of +# things. Base 2^52 doesn't provide advantage over base 2^44 if you look +# at amount of multiply-n-accumulate operations. Secondly, it makes it +# impossible to pre-compute multiples of 5 [referred to as s[]/sN in +# reference implementations], which means that more such operations +# would have to be performed in inner loop, which in turn makes critical +# path longer. In other words, even though base 2^44 reduction might +# look less elegant, overall critical path is actually shorter... + +######################################################################## +# Layout of opaque area is following. +# +# unsigned __int64 h[3]; # current hash value base 2^44 +# unsigned __int64 s[2]; # key value*20 base 2^44 +# unsigned __int64 r[3]; # key value base 2^44 +# struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4]; +# # r^n positions reflect +# # placement in register, not +# # memory, R[3] is R[1]*20 + +$code.=<<___; +.type poly1305_init_base2_44,\@function,3 +.align 32 +poly1305_init_base2_44: + xor %rax,%rax + mov %rax,0($ctx) # initialize hash value + mov %rax,8($ctx) + mov %rax,16($ctx) + +.Linit_base2_44: + lea poly1305_blocks_vpmadd52(%rip),%r10 + lea poly1305_emit_base2_44(%rip),%r11 + + mov \$0x0ffffffc0fffffff,%rax + mov \$0x0ffffffc0ffffffc,%rcx + and 0($inp),%rax + mov \$0x00000fffffffffff,%r8 + and 8($inp),%rcx + mov \$0x00000fffffffffff,%r9 + and %rax,%r8 + shrd \$44,%rcx,%rax + mov %r8,40($ctx) # r0 + and %r9,%rax + shr \$24,%rcx + mov %rax,48($ctx) # r1 + lea (%rax,%rax,4),%rax # *5 + mov %rcx,56($ctx) # r2 + shl \$2,%rax # magic <<2 + lea (%rcx,%rcx,4),%rcx # *5 + shl \$2,%rcx # magic <<2 + mov %rax,24($ctx) # s1 + mov %rcx,32($ctx) # s2 + movq \$-1,64($ctx) # write impossible value +___ +$code.=<<___ if ($flavour !~ /elf32/); + mov %r10,0(%rdx) + mov %r11,8(%rdx) +___ +$code.=<<___ if ($flavour =~ /elf32/); + mov %r10d,0(%rdx) + mov %r11d,4(%rdx) +___ +$code.=<<___; + mov \$1,%eax + ret +.size poly1305_init_base2_44,.-poly1305_init_base2_44 +___ +{ +my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17)); +my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21)); +my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25)); + +$code.=<<___; +.type poly1305_blocks_vpmadd52,\@function,4 +.align 32 +poly1305_blocks_vpmadd52: + shr \$4,$len + jz .Lno_data_vpmadd52 # too short + + shl \$40,$padbit + mov 64($ctx),%r8 # peek on power of the key + + # if powers of the key are not calculated yet, process up to 3 + # blocks with this single-block subroutine, otherwise ensure that + # length is divisible by 2 blocks and pass the rest down to next + # subroutine... + + mov \$3,%rax + mov \$1,%r10 + cmp \$4,$len # is input long + cmovae %r10,%rax + test %r8,%r8 # is power value impossible? + cmovns %r10,%rax + + and $len,%rax # is input of favourable length? + jz .Lblocks_vpmadd52_4x + + sub %rax,$len + mov \$7,%r10d + mov \$1,%r11d + kmovw %r10d,%k7 + lea .L2_44_inp_permd(%rip),%r10 + kmovw %r11d,%k1 + + vmovq $padbit,%x#$PAD + vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd + vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift + vpermq \$0xcf,$PAD,$PAD + vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask + + vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value + vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys + vmovdqu64 32($ctx),${r1r0s2}{%k7}{z} + vmovdqu64 24($ctx),${r0s2s1}{%k7}{z} + + vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt + vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft + + jmp .Loop_vpmadd52 + +.align 32 +.Loop_vpmadd52: + vmovdqu32 0($inp),%x#$T0 # load input as ----3210 + lea 16($inp),$inp + + vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110 + vpsrlvq $inp_shift,$T0,$T0 + vpandq $reduc_mask,$T0,$T0 + vporq $PAD,$T0,$T0 + + vpaddq $T0,$Dlo,$Dlo # accumulate input + + vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value + vpermq \$0b01010101,$Dlo,${H1}{%k7}{z} + vpermq \$0b10101010,$Dlo,${H2}{%k7}{z} + + vpxord $Dlo,$Dlo,$Dlo + vpxord $Dhi,$Dhi,$Dhi + + vpmadd52luq $r2r1r0,$H0,$Dlo + vpmadd52huq $r2r1r0,$H0,$Dhi + + vpmadd52luq $r1r0s2,$H1,$Dlo + vpmadd52huq $r1r0s2,$H1,$Dhi + + vpmadd52luq $r0s2s1,$H2,$Dlo + vpmadd52huq $r0s2s1,$H2,$Dhi + + vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword + vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword + vpandq $reduc_mask,$Dlo,$Dlo + + vpaddq $T0,$Dhi,$Dhi + + vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword + + vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-) + + vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word + vpandq $reduc_mask,$Dlo,$Dlo + + vpermq \$0b10010011,$T0,$T0 + + vpaddq $T0,$Dlo,$Dlo + + vpermq \$0b10010011,$Dlo,${T0}{%k1}{z} + + vpaddq $T0,$Dlo,$Dlo + vpsllq \$2,$T0,$T0 + + vpaddq $T0,$Dlo,$Dlo + + dec %rax # len-=16 + jnz .Loop_vpmadd52 + + vmovdqu64 $Dlo,0($ctx){%k7} # store hash value + + test $len,$len + jnz .Lblocks_vpmadd52_4x + +.Lno_data_vpmadd52: + ret +.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 +___ +} +{ +######################################################################## +# As implied by its name 4x subroutine processes 4 blocks in parallel +# (but handles even 4*n+2 blocks lengths). It takes up to 4th key power +# and is handled in 256-bit %ymm registers. + +my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17)); +my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23)); +my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31)); + +$code.=<<___; +.type poly1305_blocks_vpmadd52_4x,\@function,4 +.align 32 +poly1305_blocks_vpmadd52_4x: + shr \$4,$len + jz .Lno_data_vpmadd52_4x # too short + + shl \$40,$padbit + mov 64($ctx),%r8 # peek on power of the key + +.Lblocks_vpmadd52_4x: + vpbroadcastq $padbit,$PAD + + vmovdqa64 .Lx_mask44(%rip),$mask44 + mov \$5,%eax + vmovdqa64 .Lx_mask42(%rip),$mask42 + kmovw %eax,%k1 # used in 2x path + + test %r8,%r8 # is power value impossible? + js .Linit_vpmadd52 # if it is, then init R[4] + + vmovq 0($ctx),%x#$H0 # load current hash value + vmovq 8($ctx),%x#$H1 + vmovq 16($ctx),%x#$H2 + + test \$3,$len # is length 4*n+2? + jnz .Lblocks_vpmadd52_2x_do + +.Lblocks_vpmadd52_4x_do: + vpbroadcastq 64($ctx),$R0 # load 4th power of the key + vpbroadcastq 96($ctx),$R1 + vpbroadcastq 128($ctx),$R2 + vpbroadcastq 160($ctx),$S1 + +.Lblocks_vpmadd52_4x_key_loaded: + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S2,$S2 + + test \$7,$len # is len 8*n? + jz .Lblocks_vpmadd52_8x + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*2($inp),$T3 + lea 16*4($inp),$inp + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + + # at this point 64-bit lanes are ordered as 3-1-2-0 + + vpsrlq \$24,$T3,$T2 # splat the data + vporq $PAD,$T2,$T2 + vpaddq $T2,$H2,$H2 # accumulate input + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + sub \$4,$len + jz .Ltail_vpmadd52_4x + jmp .Loop_vpmadd52_4x + ud2 + +.align 32 +.Linit_vpmadd52: + vmovq 24($ctx),%x#$S1 # load key + vmovq 56($ctx),%x#$H2 + vmovq 32($ctx),%x#$S2 + vmovq 40($ctx),%x#$R0 + vmovq 48($ctx),%x#$R1 + + vmovdqa $R0,$H0 + vmovdqa $R1,$H1 + vmovdqa $H2,$R2 + + mov \$2,%eax + +.Lmul_init_vpmadd52: + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + dec %eax + jz .Ldone_init_vpmadd52 + + vpunpcklqdq $R1,$H1,$R1 # 1,2 + vpbroadcastq %x#$H1,%x#$H1 # 2,2 + vpunpcklqdq $R2,$H2,$R2 + vpbroadcastq %x#$H2,%x#$H2 + vpunpcklqdq $R0,$H0,$R0 + vpbroadcastq %x#$H0,%x#$H0 + + vpsllq \$2,$R1,$S1 # S1 = R1*5*4 + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R1,$S1,$S1 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S1,$S1 + vpsllq \$2,$S2,$S2 + + jmp .Lmul_init_vpmadd52 + ud2 + +.align 32 +.Ldone_init_vpmadd52: + vinserti128 \$1,%x#$R1,$H1,$R1 # 1,2,3,4 + vinserti128 \$1,%x#$R2,$H2,$R2 + vinserti128 \$1,%x#$R0,$H0,$R0 + + vpermq \$0b11011000,$R1,$R1 # 1,3,2,4 + vpermq \$0b11011000,$R2,$R2 + vpermq \$0b11011000,$R0,$R0 + + vpsllq \$2,$R1,$S1 # S1 = R1*5*4 + vpaddq $R1,$S1,$S1 + vpsllq \$2,$S1,$S1 + + vmovq 0($ctx),%x#$H0 # load current hash value + vmovq 8($ctx),%x#$H1 + vmovq 16($ctx),%x#$H2 + + test \$3,$len # is length 4*n+2? + jnz .Ldone_init_vpmadd52_2x + + vmovdqu64 $R0,64($ctx) # save key powers + vpbroadcastq %x#$R0,$R0 # broadcast 4th power + vmovdqu64 $R1,96($ctx) + vpbroadcastq %x#$R1,$R1 + vmovdqu64 $R2,128($ctx) + vpbroadcastq %x#$R2,$R2 + vmovdqu64 $S1,160($ctx) + vpbroadcastq %x#$S1,$S1 + + jmp .Lblocks_vpmadd52_4x_key_loaded + ud2 + +.align 32 +.Ldone_init_vpmadd52_2x: + vmovdqu64 $R0,64($ctx) # save key powers + vpsrldq \$8,$R0,$R0 # 0-1-0-2 + vmovdqu64 $R1,96($ctx) + vpsrldq \$8,$R1,$R1 + vmovdqu64 $R2,128($ctx) + vpsrldq \$8,$R2,$R2 + vmovdqu64 $S1,160($ctx) + vpsrldq \$8,$S1,$S1 + jmp .Lblocks_vpmadd52_2x_key_loaded + ud2 + +.align 32 +.Lblocks_vpmadd52_2x_do: + vmovdqu64 128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers + vmovdqu64 160+8($ctx),${S1}{%k1}{z} + vmovdqu64 64+8($ctx),${R0}{%k1}{z} + vmovdqu64 96+8($ctx),${R1}{%k1}{z} + +.Lblocks_vpmadd52_2x_key_loaded: + vmovdqu64 16*0($inp),$T2 # load data + vpxorq $T3,$T3,$T3 + lea 16*2($inp),$inp + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + + # at this point 64-bit lanes are ordered as x-1-x-0 + + vpsrlq \$24,$T3,$T2 # splat the data + vporq $PAD,$T2,$T2 + vpaddq $T2,$H2,$H2 # accumulate input + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + jmp .Ltail_vpmadd52_2x + ud2 + +.align 32 +.Loop_vpmadd52_4x: + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*2($inp),$T3 + lea 16*4($inp),$inp + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # partial reduction (interleaved with data splat) + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpsrlq \$24,$T3,$T2 + vporq $PAD,$T2,$T2 + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + sub \$4,$len # len-=64 + jnz .Loop_vpmadd52_4x + +.Ltail_vpmadd52_4x: + vmovdqu64 128($ctx),$R2 # load all key powers + vmovdqu64 160($ctx),$S1 + vmovdqu64 64($ctx),$R0 + vmovdqu64 96($ctx),$R1 + +.Ltail_vpmadd52_2x: + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S2,$S2 + + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # horizontal addition + + mov \$1,%eax + kmovw %eax,%k1 + vpsrldq \$8,$D0lo,$T0 + vpsrldq \$8,$D0hi,$H0 + vpsrldq \$8,$D1lo,$T1 + vpsrldq \$8,$D1hi,$H1 + vpaddq $T0,$D0lo,$D0lo + vpaddq $H0,$D0hi,$D0hi + vpsrldq \$8,$D2lo,$T2 + vpsrldq \$8,$D2hi,$H2 + vpaddq $T1,$D1lo,$D1lo + vpaddq $H1,$D1hi,$D1hi + vpermq \$0x2,$D0lo,$T0 + vpermq \$0x2,$D0hi,$H0 + vpaddq $T2,$D2lo,$D2lo + vpaddq $H2,$D2hi,$D2hi + + vpermq \$0x2,$D1lo,$T1 + vpermq \$0x2,$D1hi,$H1 + vpaddq $T0,$D0lo,${D0lo}{%k1}{z} + vpaddq $H0,$D0hi,${D0hi}{%k1}{z} + vpermq \$0x2,$D2lo,$T2 + vpermq \$0x2,$D2hi,$H2 + vpaddq $T1,$D1lo,${D1lo}{%k1}{z} + vpaddq $H1,$D1hi,${D1hi}{%k1}{z} + vpaddq $T2,$D2lo,${D2lo}{%k1}{z} + vpaddq $H2,$D2hi,${D2hi}{%k1}{z} + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + # at this point $len is + # either 4*n+2 or 0... + sub \$2,$len # len-=32 + ja .Lblocks_vpmadd52_4x_do + + vmovq %x#$H0,0($ctx) + vmovq %x#$H1,8($ctx) + vmovq %x#$H2,16($ctx) + vzeroall + +.Lno_data_vpmadd52_4x: + ret +.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x +___ +} +{ +######################################################################## +# As implied by its name 8x subroutine processes 8 blocks in parallel... +# This is intermediate version, as it's used only in cases when input +# length is either 8*n, 8*n+1 or 8*n+2... + +my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17)); +my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23)); +my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31)); +my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10)); + +$code.=<<___; +.type poly1305_blocks_vpmadd52_8x,\@function,4 +.align 32 +poly1305_blocks_vpmadd52_8x: + shr \$4,$len + jz .Lno_data_vpmadd52_8x # too short + + shl \$40,$padbit + mov 64($ctx),%r8 # peek on power of the key + + vmovdqa64 .Lx_mask44(%rip),$mask44 + vmovdqa64 .Lx_mask42(%rip),$mask42 + + test %r8,%r8 # is power value impossible? + js .Linit_vpmadd52 # if it is, then init R[4] + + vmovq 0($ctx),%x#$H0 # load current hash value + vmovq 8($ctx),%x#$H1 + vmovq 16($ctx),%x#$H2 + +.Lblocks_vpmadd52_8x: + ################################################################ + # fist we calculate more key powers + + vmovdqu64 128($ctx),$R2 # load 1-3-2-4 powers + vmovdqu64 160($ctx),$S1 + vmovdqu64 64($ctx),$R0 + vmovdqu64 96($ctx),$R1 + + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S2,$S2 + + vpbroadcastq %x#$R2,$RR2 # broadcast 4th power + vpbroadcastq %x#$R0,$RR0 + vpbroadcastq %x#$R1,$RR1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $RR2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $RR2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $RR2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $RR2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $RR2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $RR2,$R0,$D2hi + + vpmadd52luq $RR0,$R0,$D0lo + vpmadd52huq $RR0,$R0,$D0hi + vpmadd52luq $RR0,$R1,$D1lo + vpmadd52huq $RR0,$R1,$D1hi + vpmadd52luq $RR0,$R2,$D2lo + vpmadd52huq $RR0,$R2,$D2hi + + vpmadd52luq $RR1,$S2,$D0lo + vpmadd52huq $RR1,$S2,$D0hi + vpmadd52luq $RR1,$R0,$D1lo + vpmadd52huq $RR1,$R0,$D1hi + vpmadd52luq $RR1,$R1,$D2lo + vpmadd52huq $RR1,$R1,$D2hi + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$RR0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$RR1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$RR2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$RR0,$RR0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$RR0,$RR0 + + vpsrlq \$44,$RR0,$tmp # additional step + vpandq $mask44,$RR0,$RR0 + + vpaddq $tmp,$RR1,$RR1 + + ################################################################ + # At this point Rx holds 1324 powers, RRx - 5768, and the goal + # is 15263748, which reflects how data is loaded... + + vpunpcklqdq $R2,$RR2,$T2 # 3748 + vpunpckhqdq $R2,$RR2,$R2 # 1526 + vpunpcklqdq $R0,$RR0,$T0 + vpunpckhqdq $R0,$RR0,$R0 + vpunpcklqdq $R1,$RR1,$T1 + vpunpckhqdq $R1,$RR1,$R1 +___ +######## switch to %zmm +map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); +map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); +map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD); +map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2); + +$code.=<<___; + vshufi64x2 \$0x44,$R2,$T2,$RR2 # 15263748 + vshufi64x2 \$0x44,$R0,$T0,$RR0 + vshufi64x2 \$0x44,$R1,$T1,$RR1 + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*4($inp),$T3 + lea 16*8($inp),$inp + + vpsllq \$2,$RR2,$SS2 # S2 = R2*5*4 + vpsllq \$2,$RR1,$SS1 # S1 = R1*5*4 + vpaddq $RR2,$SS2,$SS2 + vpaddq $RR1,$SS1,$SS1 + vpsllq \$2,$SS2,$SS2 + vpsllq \$2,$SS1,$SS1 + + vpbroadcastq $padbit,$PAD + vpbroadcastq %x#$mask44,$mask44 + vpbroadcastq %x#$mask42,$mask42 + + vpbroadcastq %x#$SS1,$S1 # broadcast 8th power + vpbroadcastq %x#$SS2,$S2 + vpbroadcastq %x#$RR0,$R0 + vpbroadcastq %x#$RR1,$R1 + vpbroadcastq %x#$RR2,$R2 + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + + # at this point 64-bit lanes are ordered as 73625140 + + vpsrlq \$24,$T3,$T2 # splat the data + vporq $PAD,$T2,$T2 + vpaddq $T2,$H2,$H2 # accumulate input + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + sub \$8,$len + jz .Ltail_vpmadd52_8x + jmp .Loop_vpmadd52_8x + +.align 32 +.Loop_vpmadd52_8x: + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*4($inp),$T3 + lea 16*8($inp),$inp + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # partial reduction (interleaved with data splat) + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpsrlq \$24,$T3,$T2 + vporq $PAD,$T2,$T2 + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + sub \$8,$len # len-=128 + jnz .Loop_vpmadd52_8x + +.Ltail_vpmadd52_8x: + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$SS1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$SS1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$SS2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$SS2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$RR0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$RR0,$D2hi + + vpmadd52luq $H0,$RR0,$D0lo + vpmadd52huq $H0,$RR0,$D0hi + vpmadd52luq $H0,$RR1,$D1lo + vpmadd52huq $H0,$RR1,$D1hi + vpmadd52luq $H0,$RR2,$D2lo + vpmadd52huq $H0,$RR2,$D2hi + + vpmadd52luq $H1,$SS2,$D0lo + vpmadd52huq $H1,$SS2,$D0hi + vpmadd52luq $H1,$RR0,$D1lo + vpmadd52huq $H1,$RR0,$D1hi + vpmadd52luq $H1,$RR1,$D2lo + vpmadd52huq $H1,$RR1,$D2hi + + ################################################################ + # horizontal addition + + mov \$1,%eax + kmovw %eax,%k1 + vpsrldq \$8,$D0lo,$T0 + vpsrldq \$8,$D0hi,$H0 + vpsrldq \$8,$D1lo,$T1 + vpsrldq \$8,$D1hi,$H1 + vpaddq $T0,$D0lo,$D0lo + vpaddq $H0,$D0hi,$D0hi + vpsrldq \$8,$D2lo,$T2 + vpsrldq \$8,$D2hi,$H2 + vpaddq $T1,$D1lo,$D1lo + vpaddq $H1,$D1hi,$D1hi + vpermq \$0x2,$D0lo,$T0 + vpermq \$0x2,$D0hi,$H0 + vpaddq $T2,$D2lo,$D2lo + vpaddq $H2,$D2hi,$D2hi + + vpermq \$0x2,$D1lo,$T1 + vpermq \$0x2,$D1hi,$H1 + vpaddq $T0,$D0lo,$D0lo + vpaddq $H0,$D0hi,$D0hi + vpermq \$0x2,$D2lo,$T2 + vpermq \$0x2,$D2hi,$H2 + vpaddq $T1,$D1lo,$D1lo + vpaddq $H1,$D1hi,$D1hi + vextracti64x4 \$1,$D0lo,%y#$T0 + vextracti64x4 \$1,$D0hi,%y#$H0 + vpaddq $T2,$D2lo,$D2lo + vpaddq $H2,$D2hi,$D2hi + + vextracti64x4 \$1,$D1lo,%y#$T1 + vextracti64x4 \$1,$D1hi,%y#$H1 + vextracti64x4 \$1,$D2lo,%y#$T2 + vextracti64x4 \$1,$D2hi,%y#$H2 +___ +######## switch back to %ymm +map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); +map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); +map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD); + +$code.=<<___; + vpaddq $T0,$D0lo,${D0lo}{%k1}{z} + vpaddq $H0,$D0hi,${D0hi}{%k1}{z} + vpaddq $T1,$D1lo,${D1lo}{%k1}{z} + vpaddq $H1,$D1hi,${D1hi}{%k1}{z} + vpaddq $T2,$D2lo,${D2lo}{%k1}{z} + vpaddq $H2,$D2hi,${D2hi}{%k1}{z} + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + ################################################################ + + vmovq %x#$H0,0($ctx) + vmovq %x#$H1,8($ctx) + vmovq %x#$H2,16($ctx) + vzeroall + +.Lno_data_vpmadd52_8x: + ret +.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x +___ +} +$code.=<<___; +.type poly1305_emit_base2_44,\@function,3 +.align 32 +poly1305_emit_base2_44: + mov 0($ctx),%r8 # load hash value + mov 8($ctx),%r9 + mov 16($ctx),%r10 + + mov %r9,%rax + shr \$20,%r9 + shl \$44,%rax + mov %r10,%rcx + shr \$40,%r10 + shl \$24,%rcx + + add %rax,%r8 + adc %rcx,%r9 + adc \$0,%r10 + + mov %r8,%rax + add \$5,%r8 # compare to modulus + mov %r9,%rcx + adc \$0,%r9 + adc \$0,%r10 + shr \$2,%r10 # did 130-bit value overflow? + cmovnz %r8,%rax + cmovnz %r9,%rcx + + add 0($nonce),%rax # accumulate nonce + adc 8($nonce),%rcx + mov %rax,0($mac) # write result + mov %rcx,8($mac) + + ret +.size poly1305_emit_base2_44,.-poly1305_emit_base2_44 +___ +} } } +} + +if (!$kernel) +{ # chacha20-poly1305 helpers +my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") : # Win64 order + ("%rdi","%rsi","%rdx","%rcx"); # Unix order +$code.=<<___; +.globl xor128_encrypt_n_pad +.type xor128_encrypt_n_pad,\@abi-omnipotent +.align 16 +xor128_encrypt_n_pad: + sub $otp,$inp + sub $otp,$out + mov $len,%r10 # put len aside + shr \$4,$len # len / 16 + jz .Ltail_enc + nop +.Loop_enc_xmm: + movdqu ($inp,$otp),%xmm0 + pxor ($otp),%xmm0 + movdqu %xmm0,($out,$otp) + movdqa %xmm0,($otp) + lea 16($otp),$otp + dec $len + jnz .Loop_enc_xmm + + and \$15,%r10 # len % 16 + jz .Ldone_enc + +.Ltail_enc: + mov \$16,$len + sub %r10,$len + xor %eax,%eax +.Loop_enc_byte: + mov ($inp,$otp),%al + xor ($otp),%al + mov %al,($out,$otp) + mov %al,($otp) + lea 1($otp),$otp + dec %r10 + jnz .Loop_enc_byte + + xor %eax,%eax +.Loop_enc_pad: + mov %al,($otp) + lea 1($otp),$otp + dec $len + jnz .Loop_enc_pad + +.Ldone_enc: + mov $otp,%rax + ret +.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad + +.globl xor128_decrypt_n_pad +.type xor128_decrypt_n_pad,\@abi-omnipotent +.align 16 +xor128_decrypt_n_pad: + sub $otp,$inp + sub $otp,$out + mov $len,%r10 # put len aside + shr \$4,$len # len / 16 + jz .Ltail_dec + nop +.Loop_dec_xmm: + movdqu ($inp,$otp),%xmm0 + movdqa ($otp),%xmm1 + pxor %xmm0,%xmm1 + movdqu %xmm1,($out,$otp) + movdqa %xmm0,($otp) + lea 16($otp),$otp + dec $len + jnz .Loop_dec_xmm + + pxor %xmm1,%xmm1 + and \$15,%r10 # len % 16 + jz .Ldone_dec + +.Ltail_dec: + mov \$16,$len + sub %r10,$len + xor %eax,%eax + xor %r11,%r11 +.Loop_dec_byte: + mov ($inp,$otp),%r11b + mov ($otp),%al + xor %r11b,%al + mov %al,($out,$otp) + mov %r11b,($otp) + lea 1($otp),$otp + dec %r10 + jnz .Loop_dec_byte + + xor %eax,%eax +.Loop_dec_pad: + mov %al,($otp) + lea 1($otp),$otp + dec $len + jnz .Loop_dec_pad + +.Ldone_dec: + mov $otp,%rax + ret +.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad +___ +} + +# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, +# CONTEXT *context,DISPATCHER_CONTEXT *disp) +if ($win64) { +$rec="%rcx"; +$frame="%rdx"; +$context="%r8"; +$disp="%r9"; + +$code.=<<___; +.extern __imp_RtlVirtualUnwind +.type se_handler,\@abi-omnipotent +.align 16 +se_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->Rip<.Lprologue + jb .Lcommon_seh_tail + + mov 152($context),%rax # pull context->Rsp + + mov 4(%r11),%r10d # HandlerData[1] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=.Lepilogue + jae .Lcommon_seh_tail + + lea 48(%rax),%rax + + mov -8(%rax),%rbx + mov -16(%rax),%rbp + mov -24(%rax),%r12 + mov -32(%rax),%r13 + mov -40(%rax),%r14 + mov -48(%rax),%r15 + mov %rbx,144($context) # restore context->Rbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R14 + + jmp .Lcommon_seh_tail +.size se_handler,.-se_handler + +.type avx_handler,\@abi-omnipotent +.align 16 +avx_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->RipRsp + + mov 4(%r11),%r10d # HandlerData[1] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=epilogue label + jae .Lcommon_seh_tail + + mov 208($context),%rax # pull context->R11 + + lea 0x50(%rax),%rsi + lea 0xf8(%rax),%rax + lea 512($context),%rdi # &context.Xmm6 + mov \$20,%ecx + .long 0xa548f3fc # cld; rep movsq + +.Lcommon_seh_tail: + mov 8(%rax),%rdi + mov 16(%rax),%rsi + mov %rax,152($context) # restore context->Rsp + mov %rsi,168($context) # restore context->Rsi + mov %rdi,176($context) # restore context->Rdi + + mov 40($disp),%rdi # disp->ContextRecord + mov $context,%rsi # context + mov \$154,%ecx # sizeof(CONTEXT) + .long 0xa548f3fc # cld; rep movsq + + mov $disp,%rsi + xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + mov 8(%rsi),%rdx # arg2, disp->ImageBase + mov 0(%rsi),%r8 # arg3, disp->ControlPc + mov 16(%rsi),%r9 # arg4, disp->FunctionEntry + mov 40(%rsi),%r10 # disp->ContextRecord + lea 56(%rsi),%r11 # &disp->HandlerData + lea 24(%rsi),%r12 # &disp->EstablisherFrame + mov %r10,32(%rsp) # arg5 + mov %r11,40(%rsp) # arg6 + mov %r12,48(%rsp) # arg7 + mov %rcx,56(%rsp) # arg8, (NULL) + call *__imp_RtlVirtualUnwind(%rip) + + mov \$1,%eax # ExceptionContinueSearch + add \$64,%rsp + popfq + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %rdi + pop %rsi + ret +.size avx_handler,.-avx_handler + +.section .pdata +.align 4 + .rva .LSEH_begin_poly1305_init_x86_64 + .rva .LSEH_end_poly1305_init_x86_64 + .rva .LSEH_info_poly1305_init_x86_64 + + .rva .LSEH_begin_poly1305_blocks_x86_64 + .rva .LSEH_end_poly1305_blocks_x86_64 + .rva .LSEH_info_poly1305_blocks_x86_64 + + .rva .LSEH_begin_poly1305_emit_x86_64 + .rva .LSEH_end_poly1305_emit_x86_64 + .rva .LSEH_info_poly1305_emit_x86_64 +___ +$code.=<<___ if ($avx); + .rva .LSEH_begin_poly1305_blocks_avx + .rva .Lbase2_64_avx + .rva .LSEH_info_poly1305_blocks_avx_1 + + .rva .Lbase2_64_avx + .rva .Leven_avx + .rva .LSEH_info_poly1305_blocks_avx_2 + + .rva .Leven_avx + .rva .LSEH_end_poly1305_blocks_avx + .rva .LSEH_info_poly1305_blocks_avx_3 + + .rva .LSEH_begin_poly1305_emit_avx + .rva .LSEH_end_poly1305_emit_avx + .rva .LSEH_info_poly1305_emit_avx +___ +$code.=<<___ if ($avx>1); + .rva .LSEH_begin_poly1305_blocks_avx2 + .rva .Lbase2_64_avx2 + .rva .LSEH_info_poly1305_blocks_avx2_1 + + .rva .Lbase2_64_avx2 + .rva .Leven_avx2 + .rva .LSEH_info_poly1305_blocks_avx2_2 + + .rva .Leven_avx2 + .rva .LSEH_end_poly1305_blocks_avx2 + .rva .LSEH_info_poly1305_blocks_avx2_3 +___ +$code.=<<___ if ($avx>2); + .rva .LSEH_begin_poly1305_blocks_avx512 + .rva .LSEH_end_poly1305_blocks_avx512 + .rva .LSEH_info_poly1305_blocks_avx512 +___ +$code.=<<___; +.section .xdata +.align 8 +.LSEH_info_poly1305_init_x86_64: + .byte 9,0,0,0 + .rva se_handler + .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64 + +.LSEH_info_poly1305_blocks_x86_64: + .byte 9,0,0,0 + .rva se_handler + .rva .Lblocks_body,.Lblocks_epilogue + +.LSEH_info_poly1305_emit_x86_64: + .byte 9,0,0,0 + .rva se_handler + .rva .LSEH_begin_poly1305_emit_x86_64,.LSEH_begin_poly1305_emit_x86_64 +___ +$code.=<<___ if ($avx); +.LSEH_info_poly1305_blocks_avx_1: + .byte 9,0,0,0 + .rva se_handler + .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx_2: + .byte 9,0,0,0 + .rva se_handler + .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx_3: + .byte 9,0,0,0 + .rva avx_handler + .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[] + +.LSEH_info_poly1305_emit_avx: + .byte 9,0,0,0 + .rva se_handler + .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx +___ +$code.=<<___ if ($avx>1); +.LSEH_info_poly1305_blocks_avx2_1: + .byte 9,0,0,0 + .rva se_handler + .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx2_2: + .byte 9,0,0,0 + .rva se_handler + .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx2_3: + .byte 9,0,0,0 + .rva avx_handler + .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[] +___ +$code.=<<___ if ($avx>2); +.LSEH_info_poly1305_blocks_avx512: + .byte 9,0,0,0 + .rva avx_handler + .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[] +___ +} + +open SELF,$0; +while() { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split('\n',$code)) { + s/\`([^\`]*)\`/eval($1)/ge; + s/%r([a-z]+)#d/%e$1/g; + s/%r([0-9]+)#d/%r$1d/g; + s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g; + + if ($kernel) { + s/(^\.type.*),[0-9]+$/\1/; + s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/; + next if /^\.cfi.*/; + } + + print $_,"\n"; +} +close STDOUT; diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305.c b/net/wireguard/crypto/zinc/poly1305/poly1305.c new file mode 100644 index 000000000000..a54bc3309cf2 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * Implementation of the Poly1305 message authenticator. + * + * Information: https://cr.yp.to/mac.html + */ + +#include +#include "../selftest/run.h" + +#include +#include +#include +#include +#include + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "poly1305-x86_64-glue.c" +#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64) +#include "poly1305-arm-glue.c" +#elif defined(CONFIG_ZINC_ARCH_MIPS) || defined(CONFIG_ZINC_ARCH_MIPS64) +#include "poly1305-mips-glue.c" +#else +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + return false; +} +static inline bool poly1305_blocks_arch(void *ctx, const u8 *input, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + return false; +} +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + return false; +} +static bool *const poly1305_nobs[] __initconst = { }; +static void __init poly1305_fpu_init(void) +{ +} +#endif + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) +#include "poly1305-donna64.c" +#else +#include "poly1305-donna32.c" +#endif + +void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE]) +{ + ctx->nonce[0] = get_unaligned_le32(&key[16]); + ctx->nonce[1] = get_unaligned_le32(&key[20]); + ctx->nonce[2] = get_unaligned_le32(&key[24]); + ctx->nonce[3] = get_unaligned_le32(&key[28]); + + if (!poly1305_init_arch(ctx->opaque, key)) + poly1305_init_generic(ctx->opaque, key); + + ctx->num = 0; +} + +static inline void poly1305_blocks(void *ctx, const u8 *input, const size_t len, + const u32 padbit, + simd_context_t *simd_context) +{ + if (!poly1305_blocks_arch(ctx, input, len, padbit, simd_context)) + poly1305_blocks_generic(ctx, input, len, padbit); +} + +static inline void poly1305_emit(void *ctx, u8 mac[POLY1305_KEY_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + if (!poly1305_emit_arch(ctx, mac, nonce, simd_context)) + poly1305_emit_generic(ctx, mac, nonce); +} + +void poly1305_update(struct poly1305_ctx *ctx, const u8 *input, size_t len, + simd_context_t *simd_context) +{ + const size_t num = ctx->num; + size_t rem; + + if (num) { + rem = POLY1305_BLOCK_SIZE - num; + if (len < rem) { + memcpy(ctx->data + num, input, len); + ctx->num = num + len; + return; + } + memcpy(ctx->data + num, input, rem); + poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1, + simd_context); + input += rem; + len -= rem; + } + + rem = len % POLY1305_BLOCK_SIZE; + len -= rem; + + if (len >= POLY1305_BLOCK_SIZE) { + poly1305_blocks(ctx->opaque, input, len, 1, simd_context); + input += len; + } + + if (rem) + memcpy(ctx->data, input, rem); + + ctx->num = rem; +} + +void poly1305_final(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE], + simd_context_t *simd_context) +{ + size_t num = ctx->num; + + if (num) { + ctx->data[num++] = 1; + while (num < POLY1305_BLOCK_SIZE) + ctx->data[num++] = 0; + poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0, + simd_context); + } + + poly1305_emit(ctx->opaque, mac, ctx->nonce, simd_context); + + memzero_explicit(ctx, sizeof(*ctx)); +} + +#include "../selftest/poly1305.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init poly1305_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + poly1305_fpu_init(); + if (!selftest_run("poly1305", poly1305_selftest, poly1305_nobs, + ARRAY_SIZE(poly1305_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Poly1305 one-time authenticator"); +MODULE_AUTHOR("Jason A. Donenfeld "); +#endif diff --git a/net/wireguard/crypto/zinc/selftest/blake2s.c b/net/wireguard/crypto/zinc/selftest/blake2s.c new file mode 100644 index 000000000000..1b5c210dc7a8 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/blake2s.c @@ -0,0 +1,2090 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { + { 0x69, 0x21, 0x7a, 0x30, 0x79, 0x90, 0x80, 0x94, + 0xe1, 0x11, 0x21, 0xd0, 0x42, 0x35, 0x4a, 0x7c, + 0x1f, 0x55, 0xb6, 0x48, 0x2c, 0xa1, 0xa5, 0x1e, + 0x1b, 0x25, 0x0d, 0xfd, 0x1e, 0xd0, 0xee, 0xf9 }, + { 0xe3, 0x4d, 0x74, 0xdb, 0xaf, 0x4f, 0xf4, 0xc6, + 0xab, 0xd8, 0x71, 0xcc, 0x22, 0x04, 0x51, 0xd2, + 0xea, 0x26, 0x48, 0x84, 0x6c, 0x77, 0x57, 0xfb, + 0xaa, 0xc8, 0x2f, 0xe5, 0x1a, 0xd6, 0x4b, 0xea }, + { 0xdd, 0xad, 0x9a, 0xb1, 0x5d, 0xac, 0x45, 0x49, + 0xba, 0x42, 0xf4, 0x9d, 0x26, 0x24, 0x96, 0xbe, + 0xf6, 0xc0, 0xba, 0xe1, 0xdd, 0x34, 0x2a, 0x88, + 0x08, 0xf8, 0xea, 0x26, 0x7c, 0x6e, 0x21, 0x0c }, + { 0xe8, 0xf9, 0x1c, 0x6e, 0xf2, 0x32, 0xa0, 0x41, + 0x45, 0x2a, 0xb0, 0xe1, 0x49, 0x07, 0x0c, 0xdd, + 0x7d, 0xd1, 0x76, 0x9e, 0x75, 0xb3, 0xa5, 0x92, + 0x1b, 0xe3, 0x78, 0x76, 0xc4, 0x5c, 0x99, 0x00 }, + { 0x0c, 0xc7, 0x0e, 0x00, 0x34, 0x8b, 0x86, 0xba, + 0x29, 0x44, 0xd0, 0xc3, 0x20, 0x38, 0xb2, 0x5c, + 0x55, 0x58, 0x4f, 0x90, 0xdf, 0x23, 0x04, 0xf5, + 0x5f, 0xa3, 0x32, 0xaf, 0x5f, 0xb0, 0x1e, 0x20 }, + { 0xec, 0x19, 0x64, 0x19, 0x10, 0x87, 0xa4, 0xfe, + 0x9d, 0xf1, 0xc7, 0x95, 0x34, 0x2a, 0x02, 0xff, + 0xc1, 0x91, 0xa5, 0xb2, 0x51, 0x76, 0x48, 0x56, + 0xae, 0x5b, 0x8b, 0x57, 0x69, 0xf0, 0xc6, 0xcd }, + { 0xe1, 0xfa, 0x51, 0x61, 0x8d, 0x7d, 0xf4, 0xeb, + 0x70, 0xcf, 0x0d, 0x5a, 0x9e, 0x90, 0x6f, 0x80, + 0x6e, 0x9d, 0x19, 0xf7, 0xf4, 0xf0, 0x1e, 0x3b, + 0x62, 0x12, 0x88, 0xe4, 0x12, 0x04, 0x05, 0xd6 }, + { 0x59, 0x80, 0x01, 0xfa, 0xfb, 0xe8, 0xf9, 0x4e, + 0xc6, 0x6d, 0xc8, 0x27, 0xd0, 0x12, 0xcf, 0xcb, + 0xba, 0x22, 0x28, 0x56, 0x9f, 0x44, 0x8e, 0x89, + 0xea, 0x22, 0x08, 0xc8, 0xbf, 0x76, 0x92, 0x93 }, + { 0xc7, 0xe8, 0x87, 0xb5, 0x46, 0x62, 0x36, 0x35, + 0xe9, 0x3e, 0x04, 0x95, 0x59, 0x8f, 0x17, 0x26, + 0x82, 0x19, 0x96, 0xc2, 0x37, 0x77, 0x05, 0xb9, + 0x3a, 0x1f, 0x63, 0x6f, 0x87, 0x2b, 0xfa, 0x2d }, + { 0xc3, 0x15, 0xa4, 0x37, 0xdd, 0x28, 0x06, 0x2a, + 0x77, 0x0d, 0x48, 0x19, 0x67, 0x13, 0x6b, 0x1b, + 0x5e, 0xb8, 0x8b, 0x21, 0xee, 0x53, 0xd0, 0x32, + 0x9c, 0x58, 0x97, 0x12, 0x6e, 0x9d, 0xb0, 0x2c }, + { 0xbb, 0x47, 0x3d, 0xed, 0xdc, 0x05, 0x5f, 0xea, + 0x62, 0x28, 0xf2, 0x07, 0xda, 0x57, 0x53, 0x47, + 0xbb, 0x00, 0x40, 0x4c, 0xd3, 0x49, 0xd3, 0x8c, + 0x18, 0x02, 0x63, 0x07, 0xa2, 0x24, 0xcb, 0xff }, + { 0x68, 0x7e, 0x18, 0x73, 0xa8, 0x27, 0x75, 0x91, + 0xbb, 0x33, 0xd9, 0xad, 0xf9, 0xa1, 0x39, 0x12, + 0xef, 0xef, 0xe5, 0x57, 0xca, 0xfc, 0x39, 0xa7, + 0x95, 0x26, 0x23, 0xe4, 0x72, 0x55, 0xf1, 0x6d }, + { 0x1a, 0xc7, 0xba, 0x75, 0x4d, 0x6e, 0x2f, 0x94, + 0xe0, 0xe8, 0x6c, 0x46, 0xbf, 0xb2, 0x62, 0xab, + 0xbb, 0x74, 0xf4, 0x50, 0xef, 0x45, 0x6d, 0x6b, + 0x4d, 0x97, 0xaa, 0x80, 0xce, 0x6d, 0xa7, 0x67 }, + { 0x01, 0x2c, 0x97, 0x80, 0x96, 0x14, 0x81, 0x6b, + 0x5d, 0x94, 0x94, 0x47, 0x7d, 0x4b, 0x68, 0x7d, + 0x15, 0xb9, 0x6e, 0xb6, 0x9c, 0x0e, 0x80, 0x74, + 0xa8, 0x51, 0x6f, 0x31, 0x22, 0x4b, 0x5c, 0x98 }, + { 0x91, 0xff, 0xd2, 0x6c, 0xfa, 0x4d, 0xa5, 0x13, + 0x4c, 0x7e, 0xa2, 0x62, 0xf7, 0x88, 0x9c, 0x32, + 0x9f, 0x61, 0xf6, 0xa6, 0x57, 0x22, 0x5c, 0xc2, + 0x12, 0xf4, 0x00, 0x56, 0xd9, 0x86, 0xb3, 0xf4 }, + { 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21, + 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67, + 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04, + 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d }, + { 0xef, 0xc0, 0x4c, 0xdc, 0x39, 0x1c, 0x7e, 0x91, + 0x19, 0xbd, 0x38, 0x66, 0x8a, 0x53, 0x4e, 0x65, + 0xfe, 0x31, 0x03, 0x6d, 0x6a, 0x62, 0x11, 0x2e, + 0x44, 0xeb, 0xeb, 0x11, 0xf9, 0xc5, 0x70, 0x80 }, + { 0x99, 0x2c, 0xf5, 0xc0, 0x53, 0x44, 0x2a, 0x5f, + 0xbc, 0x4f, 0xaf, 0x58, 0x3e, 0x04, 0xe5, 0x0b, + 0xb7, 0x0d, 0x2f, 0x39, 0xfb, 0xb6, 0xa5, 0x03, + 0xf8, 0x9e, 0x56, 0xa6, 0x3e, 0x18, 0x57, 0x8a }, + { 0x38, 0x64, 0x0e, 0x9f, 0x21, 0x98, 0x3e, 0x67, + 0xb5, 0x39, 0xca, 0xcc, 0xae, 0x5e, 0xcf, 0x61, + 0x5a, 0xe2, 0x76, 0x4f, 0x75, 0xa0, 0x9c, 0x9c, + 0x59, 0xb7, 0x64, 0x83, 0xc1, 0xfb, 0xc7, 0x35 }, + { 0x21, 0x3d, 0xd3, 0x4c, 0x7e, 0xfe, 0x4f, 0xb2, + 0x7a, 0x6b, 0x35, 0xf6, 0xb4, 0x00, 0x0d, 0x1f, + 0xe0, 0x32, 0x81, 0xaf, 0x3c, 0x72, 0x3e, 0x5c, + 0x9f, 0x94, 0x74, 0x7a, 0x5f, 0x31, 0xcd, 0x3b }, + { 0xec, 0x24, 0x6e, 0xee, 0xb9, 0xce, 0xd3, 0xf7, + 0xad, 0x33, 0xed, 0x28, 0x66, 0x0d, 0xd9, 0xbb, + 0x07, 0x32, 0x51, 0x3d, 0xb4, 0xe2, 0xfa, 0x27, + 0x8b, 0x60, 0xcd, 0xe3, 0x68, 0x2a, 0x4c, 0xcd }, + { 0xac, 0x9b, 0x61, 0xd4, 0x46, 0x64, 0x8c, 0x30, + 0x05, 0xd7, 0x89, 0x2b, 0xf3, 0xa8, 0x71, 0x9f, + 0x4c, 0x81, 0x81, 0xcf, 0xdc, 0xbc, 0x2b, 0x79, + 0xfe, 0xf1, 0x0a, 0x27, 0x9b, 0x91, 0x10, 0x95 }, + { 0x7b, 0xf8, 0xb2, 0x29, 0x59, 0xe3, 0x4e, 0x3a, + 0x43, 0xf7, 0x07, 0x92, 0x23, 0xe8, 0x3a, 0x97, + 0x54, 0x61, 0x7d, 0x39, 0x1e, 0x21, 0x3d, 0xfd, + 0x80, 0x8e, 0x41, 0xb9, 0xbe, 0xad, 0x4c, 0xe7 }, + { 0x68, 0xd4, 0xb5, 0xd4, 0xfa, 0x0e, 0x30, 0x2b, + 0x64, 0xcc, 0xc5, 0xaf, 0x79, 0x29, 0x13, 0xac, + 0x4c, 0x88, 0xec, 0x95, 0xc0, 0x7d, 0xdf, 0x40, + 0x69, 0x42, 0x56, 0xeb, 0x88, 0xce, 0x9f, 0x3d }, + { 0xb2, 0xc2, 0x42, 0x0f, 0x05, 0xf9, 0xab, 0xe3, + 0x63, 0x15, 0x91, 0x93, 0x36, 0xb3, 0x7e, 0x4e, + 0x0f, 0xa3, 0x3f, 0xf7, 0xe7, 0x6a, 0x49, 0x27, + 0x67, 0x00, 0x6f, 0xdb, 0x5d, 0x93, 0x54, 0x62 }, + { 0x13, 0x4f, 0x61, 0xbb, 0xd0, 0xbb, 0xb6, 0x9a, + 0xed, 0x53, 0x43, 0x90, 0x45, 0x51, 0xa3, 0xe6, + 0xc1, 0xaa, 0x7d, 0xcd, 0xd7, 0x7e, 0x90, 0x3e, + 0x70, 0x23, 0xeb, 0x7c, 0x60, 0x32, 0x0a, 0xa7 }, + { 0x46, 0x93, 0xf9, 0xbf, 0xf7, 0xd4, 0xf3, 0x98, + 0x6a, 0x7d, 0x17, 0x6e, 0x6e, 0x06, 0xf7, 0x2a, + 0xd1, 0x49, 0x0d, 0x80, 0x5c, 0x99, 0xe2, 0x53, + 0x47, 0xb8, 0xde, 0x77, 0xb4, 0xdb, 0x6d, 0x9b }, + { 0x85, 0x3e, 0x26, 0xf7, 0x41, 0x95, 0x3b, 0x0f, + 0xd5, 0xbd, 0xb4, 0x24, 0xe8, 0xab, 0x9e, 0x8b, + 0x37, 0x50, 0xea, 0xa8, 0xef, 0x61, 0xe4, 0x79, + 0x02, 0xc9, 0x1e, 0x55, 0x4e, 0x9c, 0x73, 0xb9 }, + { 0xf7, 0xde, 0x53, 0x63, 0x61, 0xab, 0xaa, 0x0e, + 0x15, 0x81, 0x56, 0xcf, 0x0e, 0xa4, 0xf6, 0x3a, + 0x99, 0xb5, 0xe4, 0x05, 0x4f, 0x8f, 0xa4, 0xc9, + 0xd4, 0x5f, 0x62, 0x85, 0xca, 0xd5, 0x56, 0x94 }, + { 0x4c, 0x23, 0x06, 0x08, 0x86, 0x0a, 0x99, 0xae, + 0x8d, 0x7b, 0xd5, 0xc2, 0xcc, 0x17, 0xfa, 0x52, + 0x09, 0x6b, 0x9a, 0x61, 0xbe, 0xdb, 0x17, 0xcb, + 0x76, 0x17, 0x86, 0x4a, 0xd2, 0x9c, 0xa7, 0xa6 }, + { 0xae, 0xb9, 0x20, 0xea, 0x87, 0x95, 0x2d, 0xad, + 0xb1, 0xfb, 0x75, 0x92, 0x91, 0xe3, 0x38, 0x81, + 0x39, 0xa8, 0x72, 0x86, 0x50, 0x01, 0x88, 0x6e, + 0xd8, 0x47, 0x52, 0xe9, 0x3c, 0x25, 0x0c, 0x2a }, + { 0xab, 0xa4, 0xad, 0x9b, 0x48, 0x0b, 0x9d, 0xf3, + 0xd0, 0x8c, 0xa5, 0xe8, 0x7b, 0x0c, 0x24, 0x40, + 0xd4, 0xe4, 0xea, 0x21, 0x22, 0x4c, 0x2e, 0xb4, + 0x2c, 0xba, 0xe4, 0x69, 0xd0, 0x89, 0xb9, 0x31 }, + { 0x05, 0x82, 0x56, 0x07, 0xd7, 0xfd, 0xf2, 0xd8, + 0x2e, 0xf4, 0xc3, 0xc8, 0xc2, 0xae, 0xa9, 0x61, + 0xad, 0x98, 0xd6, 0x0e, 0xdf, 0xf7, 0xd0, 0x18, + 0x98, 0x3e, 0x21, 0x20, 0x4c, 0x0d, 0x93, 0xd1 }, + { 0xa7, 0x42, 0xf8, 0xb6, 0xaf, 0x82, 0xd8, 0xa6, + 0xca, 0x23, 0x57, 0xc5, 0xf1, 0xcf, 0x91, 0xde, + 0xfb, 0xd0, 0x66, 0x26, 0x7d, 0x75, 0xc0, 0x48, + 0xb3, 0x52, 0x36, 0x65, 0x85, 0x02, 0x59, 0x62 }, + { 0x2b, 0xca, 0xc8, 0x95, 0x99, 0x00, 0x0b, 0x42, + 0xc9, 0x5a, 0xe2, 0x38, 0x35, 0xa7, 0x13, 0x70, + 0x4e, 0xd7, 0x97, 0x89, 0xc8, 0x4f, 0xef, 0x14, + 0x9a, 0x87, 0x4f, 0xf7, 0x33, 0xf0, 0x17, 0xa2 }, + { 0xac, 0x1e, 0xd0, 0x7d, 0x04, 0x8f, 0x10, 0x5a, + 0x9e, 0x5b, 0x7a, 0xb8, 0x5b, 0x09, 0xa4, 0x92, + 0xd5, 0xba, 0xff, 0x14, 0xb8, 0xbf, 0xb0, 0xe9, + 0xfd, 0x78, 0x94, 0x86, 0xee, 0xa2, 0xb9, 0x74 }, + { 0xe4, 0x8d, 0x0e, 0xcf, 0xaf, 0x49, 0x7d, 0x5b, + 0x27, 0xc2, 0x5d, 0x99, 0xe1, 0x56, 0xcb, 0x05, + 0x79, 0xd4, 0x40, 0xd6, 0xe3, 0x1f, 0xb6, 0x24, + 0x73, 0x69, 0x6d, 0xbf, 0x95, 0xe0, 0x10, 0xe4 }, + { 0x12, 0xa9, 0x1f, 0xad, 0xf8, 0xb2, 0x16, 0x44, + 0xfd, 0x0f, 0x93, 0x4f, 0x3c, 0x4a, 0x8f, 0x62, + 0xba, 0x86, 0x2f, 0xfd, 0x20, 0xe8, 0xe9, 0x61, + 0x15, 0x4c, 0x15, 0xc1, 0x38, 0x84, 0xed, 0x3d }, + { 0x7c, 0xbe, 0xe9, 0x6e, 0x13, 0x98, 0x97, 0xdc, + 0x98, 0xfb, 0xef, 0x3b, 0xe8, 0x1a, 0xd4, 0xd9, + 0x64, 0xd2, 0x35, 0xcb, 0x12, 0x14, 0x1f, 0xb6, + 0x67, 0x27, 0xe6, 0xe5, 0xdf, 0x73, 0xa8, 0x78 }, + { 0xeb, 0xf6, 0x6a, 0xbb, 0x59, 0x7a, 0xe5, 0x72, + 0xa7, 0x29, 0x7c, 0xb0, 0x87, 0x1e, 0x35, 0x5a, + 0xcc, 0xaf, 0xad, 0x83, 0x77, 0xb8, 0xe7, 0x8b, + 0xf1, 0x64, 0xce, 0x2a, 0x18, 0xde, 0x4b, 0xaf }, + { 0x71, 0xb9, 0x33, 0xb0, 0x7e, 0x4f, 0xf7, 0x81, + 0x8c, 0xe0, 0x59, 0xd0, 0x08, 0x82, 0x9e, 0x45, + 0x3c, 0x6f, 0xf0, 0x2e, 0xc0, 0xa7, 0xdb, 0x39, + 0x3f, 0xc2, 0xd8, 0x70, 0xf3, 0x7a, 0x72, 0x86 }, + { 0x7c, 0xf7, 0xc5, 0x13, 0x31, 0x22, 0x0b, 0x8d, + 0x3e, 0xba, 0xed, 0x9c, 0x29, 0x39, 0x8a, 0x16, + 0xd9, 0x81, 0x56, 0xe2, 0x61, 0x3c, 0xb0, 0x88, + 0xf2, 0xb0, 0xe0, 0x8a, 0x1b, 0xe4, 0xcf, 0x4f }, + { 0x3e, 0x41, 0xa1, 0x08, 0xe0, 0xf6, 0x4a, 0xd2, + 0x76, 0xb9, 0x79, 0xe1, 0xce, 0x06, 0x82, 0x79, + 0xe1, 0x6f, 0x7b, 0xc7, 0xe4, 0xaa, 0x1d, 0x21, + 0x1e, 0x17, 0xb8, 0x11, 0x61, 0xdf, 0x16, 0x02 }, + { 0x88, 0x65, 0x02, 0xa8, 0x2a, 0xb4, 0x7b, 0xa8, + 0xd8, 0x67, 0x10, 0xaa, 0x9d, 0xe3, 0xd4, 0x6e, + 0xa6, 0x5c, 0x47, 0xaf, 0x6e, 0xe8, 0xde, 0x45, + 0x0c, 0xce, 0xb8, 0xb1, 0x1b, 0x04, 0x5f, 0x50 }, + { 0xc0, 0x21, 0xbc, 0x5f, 0x09, 0x54, 0xfe, 0xe9, + 0x4f, 0x46, 0xea, 0x09, 0x48, 0x7e, 0x10, 0xa8, + 0x48, 0x40, 0xd0, 0x2f, 0x64, 0x81, 0x0b, 0xc0, + 0x8d, 0x9e, 0x55, 0x1f, 0x7d, 0x41, 0x68, 0x14 }, + { 0x20, 0x30, 0x51, 0x6e, 0x8a, 0x5f, 0xe1, 0x9a, + 0xe7, 0x9c, 0x33, 0x6f, 0xce, 0x26, 0x38, 0x2a, + 0x74, 0x9d, 0x3f, 0xd0, 0xec, 0x91, 0xe5, 0x37, + 0xd4, 0xbd, 0x23, 0x58, 0xc1, 0x2d, 0xfb, 0x22 }, + { 0x55, 0x66, 0x98, 0xda, 0xc8, 0x31, 0x7f, 0xd3, + 0x6d, 0xfb, 0xdf, 0x25, 0xa7, 0x9c, 0xb1, 0x12, + 0xd5, 0x42, 0x58, 0x60, 0x60, 0x5c, 0xba, 0xf5, + 0x07, 0xf2, 0x3b, 0xf7, 0xe9, 0xf4, 0x2a, 0xfe }, + { 0x2f, 0x86, 0x7b, 0xa6, 0x77, 0x73, 0xfd, 0xc3, + 0xe9, 0x2f, 0xce, 0xd9, 0x9a, 0x64, 0x09, 0xad, + 0x39, 0xd0, 0xb8, 0x80, 0xfd, 0xe8, 0xf1, 0x09, + 0xa8, 0x17, 0x30, 0xc4, 0x45, 0x1d, 0x01, 0x78 }, + { 0x17, 0x2e, 0xc2, 0x18, 0xf1, 0x19, 0xdf, 0xae, + 0x98, 0x89, 0x6d, 0xff, 0x29, 0xdd, 0x98, 0x76, + 0xc9, 0x4a, 0xf8, 0x74, 0x17, 0xf9, 0xae, 0x4c, + 0x70, 0x14, 0xbb, 0x4e, 0x4b, 0x96, 0xaf, 0xc7 }, + { 0x3f, 0x85, 0x81, 0x4a, 0x18, 0x19, 0x5f, 0x87, + 0x9a, 0xa9, 0x62, 0xf9, 0x5d, 0x26, 0xbd, 0x82, + 0xa2, 0x78, 0xf2, 0xb8, 0x23, 0x20, 0x21, 0x8f, + 0x6b, 0x3b, 0xd6, 0xf7, 0xf6, 0x67, 0xa6, 0xd9 }, + { 0x1b, 0x61, 0x8f, 0xba, 0xa5, 0x66, 0xb3, 0xd4, + 0x98, 0xc1, 0x2e, 0x98, 0x2c, 0x9e, 0xc5, 0x2e, + 0x4d, 0xa8, 0x5a, 0x8c, 0x54, 0xf3, 0x8f, 0x34, + 0xc0, 0x90, 0x39, 0x4f, 0x23, 0xc1, 0x84, 0xc1 }, + { 0x0c, 0x75, 0x8f, 0xb5, 0x69, 0x2f, 0xfd, 0x41, + 0xa3, 0x57, 0x5d, 0x0a, 0xf0, 0x0c, 0xc7, 0xfb, + 0xf2, 0xcb, 0xe5, 0x90, 0x5a, 0x58, 0x32, 0x3a, + 0x88, 0xae, 0x42, 0x44, 0xf6, 0xe4, 0xc9, 0x93 }, + { 0xa9, 0x31, 0x36, 0x0c, 0xad, 0x62, 0x8c, 0x7f, + 0x12, 0xa6, 0xc1, 0xc4, 0xb7, 0x53, 0xb0, 0xf4, + 0x06, 0x2a, 0xef, 0x3c, 0xe6, 0x5a, 0x1a, 0xe3, + 0xf1, 0x93, 0x69, 0xda, 0xdf, 0x3a, 0xe2, 0x3d }, + { 0xcb, 0xac, 0x7d, 0x77, 0x3b, 0x1e, 0x3b, 0x3c, + 0x66, 0x91, 0xd7, 0xab, 0xb7, 0xe9, 0xdf, 0x04, + 0x5c, 0x8b, 0xa1, 0x92, 0x68, 0xde, 0xd1, 0x53, + 0x20, 0x7f, 0x5e, 0x80, 0x43, 0x52, 0xec, 0x5d }, + { 0x23, 0xa1, 0x96, 0xd3, 0x80, 0x2e, 0xd3, 0xc1, + 0xb3, 0x84, 0x01, 0x9a, 0x82, 0x32, 0x58, 0x40, + 0xd3, 0x2f, 0x71, 0x95, 0x0c, 0x45, 0x80, 0xb0, + 0x34, 0x45, 0xe0, 0x89, 0x8e, 0x14, 0x05, 0x3c }, + { 0xf4, 0x49, 0x54, 0x70, 0xf2, 0x26, 0xc8, 0xc2, + 0x14, 0xbe, 0x08, 0xfd, 0xfa, 0xd4, 0xbc, 0x4a, + 0x2a, 0x9d, 0xbe, 0xa9, 0x13, 0x6a, 0x21, 0x0d, + 0xf0, 0xd4, 0xb6, 0x49, 0x29, 0xe6, 0xfc, 0x14 }, + { 0xe2, 0x90, 0xdd, 0x27, 0x0b, 0x46, 0x7f, 0x34, + 0xab, 0x1c, 0x00, 0x2d, 0x34, 0x0f, 0xa0, 0x16, + 0x25, 0x7f, 0xf1, 0x9e, 0x58, 0x33, 0xfd, 0xbb, + 0xf2, 0xcb, 0x40, 0x1c, 0x3b, 0x28, 0x17, 0xde }, + { 0x9f, 0xc7, 0xb5, 0xde, 0xd3, 0xc1, 0x50, 0x42, + 0xb2, 0xa6, 0x58, 0x2d, 0xc3, 0x9b, 0xe0, 0x16, + 0xd2, 0x4a, 0x68, 0x2d, 0x5e, 0x61, 0xad, 0x1e, + 0xff, 0x9c, 0x63, 0x30, 0x98, 0x48, 0xf7, 0x06 }, + { 0x8c, 0xca, 0x67, 0xa3, 0x6d, 0x17, 0xd5, 0xe6, + 0x34, 0x1c, 0xb5, 0x92, 0xfd, 0x7b, 0xef, 0x99, + 0x26, 0xc9, 0xe3, 0xaa, 0x10, 0x27, 0xea, 0x11, + 0xa7, 0xd8, 0xbd, 0x26, 0x0b, 0x57, 0x6e, 0x04 }, + { 0x40, 0x93, 0x92, 0xf5, 0x60, 0xf8, 0x68, 0x31, + 0xda, 0x43, 0x73, 0xee, 0x5e, 0x00, 0x74, 0x26, + 0x05, 0x95, 0xd7, 0xbc, 0x24, 0x18, 0x3b, 0x60, + 0xed, 0x70, 0x0d, 0x45, 0x83, 0xd3, 0xf6, 0xf0 }, + { 0x28, 0x02, 0x16, 0x5d, 0xe0, 0x90, 0x91, 0x55, + 0x46, 0xf3, 0x39, 0x8c, 0xd8, 0x49, 0x16, 0x4a, + 0x19, 0xf9, 0x2a, 0xdb, 0xc3, 0x61, 0xad, 0xc9, + 0x9b, 0x0f, 0x20, 0xc8, 0xea, 0x07, 0x10, 0x54 }, + { 0xad, 0x83, 0x91, 0x68, 0xd9, 0xf8, 0xa4, 0xbe, + 0x95, 0xba, 0x9e, 0xf9, 0xa6, 0x92, 0xf0, 0x72, + 0x56, 0xae, 0x43, 0xfe, 0x6f, 0x98, 0x64, 0xe2, + 0x90, 0x69, 0x1b, 0x02, 0x56, 0xce, 0x50, 0xa9 }, + { 0x75, 0xfd, 0xaa, 0x50, 0x38, 0xc2, 0x84, 0xb8, + 0x6d, 0x6e, 0x8a, 0xff, 0xe8, 0xb2, 0x80, 0x7e, + 0x46, 0x7b, 0x86, 0x60, 0x0e, 0x79, 0xaf, 0x36, + 0x89, 0xfb, 0xc0, 0x63, 0x28, 0xcb, 0xf8, 0x94 }, + { 0xe5, 0x7c, 0xb7, 0x94, 0x87, 0xdd, 0x57, 0x90, + 0x24, 0x32, 0xb2, 0x50, 0x73, 0x38, 0x13, 0xbd, + 0x96, 0xa8, 0x4e, 0xfc, 0xe5, 0x9f, 0x65, 0x0f, + 0xac, 0x26, 0xe6, 0x69, 0x6a, 0xef, 0xaf, 0xc3 }, + { 0x56, 0xf3, 0x4e, 0x8b, 0x96, 0x55, 0x7e, 0x90, + 0xc1, 0xf2, 0x4b, 0x52, 0xd0, 0xc8, 0x9d, 0x51, + 0x08, 0x6a, 0xcf, 0x1b, 0x00, 0xf6, 0x34, 0xcf, + 0x1d, 0xde, 0x92, 0x33, 0xb8, 0xea, 0xaa, 0x3e }, + { 0x1b, 0x53, 0xee, 0x94, 0xaa, 0xf3, 0x4e, 0x4b, + 0x15, 0x9d, 0x48, 0xde, 0x35, 0x2c, 0x7f, 0x06, + 0x61, 0xd0, 0xa4, 0x0e, 0xdf, 0xf9, 0x5a, 0x0b, + 0x16, 0x39, 0xb4, 0x09, 0x0e, 0x97, 0x44, 0x72 }, + { 0x05, 0x70, 0x5e, 0x2a, 0x81, 0x75, 0x7c, 0x14, + 0xbd, 0x38, 0x3e, 0xa9, 0x8d, 0xda, 0x54, 0x4e, + 0xb1, 0x0e, 0x6b, 0xc0, 0x7b, 0xae, 0x43, 0x5e, + 0x25, 0x18, 0xdb, 0xe1, 0x33, 0x52, 0x53, 0x75 }, + { 0xd8, 0xb2, 0x86, 0x6e, 0x8a, 0x30, 0x9d, 0xb5, + 0x3e, 0x52, 0x9e, 0xc3, 0x29, 0x11, 0xd8, 0x2f, + 0x5c, 0xa1, 0x6c, 0xff, 0x76, 0x21, 0x68, 0x91, + 0xa9, 0x67, 0x6a, 0xa3, 0x1a, 0xaa, 0x6c, 0x42 }, + { 0xf5, 0x04, 0x1c, 0x24, 0x12, 0x70, 0xeb, 0x04, + 0xc7, 0x1e, 0xc2, 0xc9, 0x5d, 0x4c, 0x38, 0xd8, + 0x03, 0xb1, 0x23, 0x7b, 0x0f, 0x29, 0xfd, 0x4d, + 0xb3, 0xeb, 0x39, 0x76, 0x69, 0xe8, 0x86, 0x99 }, + { 0x9a, 0x4c, 0xe0, 0x77, 0xc3, 0x49, 0x32, 0x2f, + 0x59, 0x5e, 0x0e, 0xe7, 0x9e, 0xd0, 0xda, 0x5f, + 0xab, 0x66, 0x75, 0x2c, 0xbf, 0xef, 0x8f, 0x87, + 0xd0, 0xe9, 0xd0, 0x72, 0x3c, 0x75, 0x30, 0xdd }, + { 0x65, 0x7b, 0x09, 0xf3, 0xd0, 0xf5, 0x2b, 0x5b, + 0x8f, 0x2f, 0x97, 0x16, 0x3a, 0x0e, 0xdf, 0x0c, + 0x04, 0xf0, 0x75, 0x40, 0x8a, 0x07, 0xbb, 0xeb, + 0x3a, 0x41, 0x01, 0xa8, 0x91, 0x99, 0x0d, 0x62 }, + { 0x1e, 0x3f, 0x7b, 0xd5, 0xa5, 0x8f, 0xa5, 0x33, + 0x34, 0x4a, 0xa8, 0xed, 0x3a, 0xc1, 0x22, 0xbb, + 0x9e, 0x70, 0xd4, 0xef, 0x50, 0xd0, 0x04, 0x53, + 0x08, 0x21, 0x94, 0x8f, 0x5f, 0xe6, 0x31, 0x5a }, + { 0x80, 0xdc, 0xcf, 0x3f, 0xd8, 0x3d, 0xfd, 0x0d, + 0x35, 0xaa, 0x28, 0x58, 0x59, 0x22, 0xab, 0x89, + 0xd5, 0x31, 0x39, 0x97, 0x67, 0x3e, 0xaf, 0x90, + 0x5c, 0xea, 0x9c, 0x0b, 0x22, 0x5c, 0x7b, 0x5f }, + { 0x8a, 0x0d, 0x0f, 0xbf, 0x63, 0x77, 0xd8, 0x3b, + 0xb0, 0x8b, 0x51, 0x4b, 0x4b, 0x1c, 0x43, 0xac, + 0xc9, 0x5d, 0x75, 0x17, 0x14, 0xf8, 0x92, 0x56, + 0x45, 0xcb, 0x6b, 0xc8, 0x56, 0xca, 0x15, 0x0a }, + { 0x9f, 0xa5, 0xb4, 0x87, 0x73, 0x8a, 0xd2, 0x84, + 0x4c, 0xc6, 0x34, 0x8a, 0x90, 0x19, 0x18, 0xf6, + 0x59, 0xa3, 0xb8, 0x9e, 0x9c, 0x0d, 0xfe, 0xea, + 0xd3, 0x0d, 0xd9, 0x4b, 0xcf, 0x42, 0xef, 0x8e }, + { 0x80, 0x83, 0x2c, 0x4a, 0x16, 0x77, 0xf5, 0xea, + 0x25, 0x60, 0xf6, 0x68, 0xe9, 0x35, 0x4d, 0xd3, + 0x69, 0x97, 0xf0, 0x37, 0x28, 0xcf, 0xa5, 0x5e, + 0x1b, 0x38, 0x33, 0x7c, 0x0c, 0x9e, 0xf8, 0x18 }, + { 0xab, 0x37, 0xdd, 0xb6, 0x83, 0x13, 0x7e, 0x74, + 0x08, 0x0d, 0x02, 0x6b, 0x59, 0x0b, 0x96, 0xae, + 0x9b, 0xb4, 0x47, 0x72, 0x2f, 0x30, 0x5a, 0x5a, + 0xc5, 0x70, 0xec, 0x1d, 0xf9, 0xb1, 0x74, 0x3c }, + { 0x3e, 0xe7, 0x35, 0xa6, 0x94, 0xc2, 0x55, 0x9b, + 0x69, 0x3a, 0xa6, 0x86, 0x29, 0x36, 0x1e, 0x15, + 0xd1, 0x22, 0x65, 0xad, 0x6a, 0x3d, 0xed, 0xf4, + 0x88, 0xb0, 0xb0, 0x0f, 0xac, 0x97, 0x54, 0xba }, + { 0xd6, 0xfc, 0xd2, 0x32, 0x19, 0xb6, 0x47, 0xe4, + 0xcb, 0xd5, 0xeb, 0x2d, 0x0a, 0xd0, 0x1e, 0xc8, + 0x83, 0x8a, 0x4b, 0x29, 0x01, 0xfc, 0x32, 0x5c, + 0xc3, 0x70, 0x19, 0x81, 0xca, 0x6c, 0x88, 0x8b }, + { 0x05, 0x20, 0xec, 0x2f, 0x5b, 0xf7, 0xa7, 0x55, + 0xda, 0xcb, 0x50, 0xc6, 0xbf, 0x23, 0x3e, 0x35, + 0x15, 0x43, 0x47, 0x63, 0xdb, 0x01, 0x39, 0xcc, + 0xd9, 0xfa, 0xef, 0xbb, 0x82, 0x07, 0x61, 0x2d }, + { 0xaf, 0xf3, 0xb7, 0x5f, 0x3f, 0x58, 0x12, 0x64, + 0xd7, 0x66, 0x16, 0x62, 0xb9, 0x2f, 0x5a, 0xd3, + 0x7c, 0x1d, 0x32, 0xbd, 0x45, 0xff, 0x81, 0xa4, + 0xed, 0x8a, 0xdc, 0x9e, 0xf3, 0x0d, 0xd9, 0x89 }, + { 0xd0, 0xdd, 0x65, 0x0b, 0xef, 0xd3, 0xba, 0x63, + 0xdc, 0x25, 0x10, 0x2c, 0x62, 0x7c, 0x92, 0x1b, + 0x9c, 0xbe, 0xb0, 0xb1, 0x30, 0x68, 0x69, 0x35, + 0xb5, 0xc9, 0x27, 0xcb, 0x7c, 0xcd, 0x5e, 0x3b }, + { 0xe1, 0x14, 0x98, 0x16, 0xb1, 0x0a, 0x85, 0x14, + 0xfb, 0x3e, 0x2c, 0xab, 0x2c, 0x08, 0xbe, 0xe9, + 0xf7, 0x3c, 0xe7, 0x62, 0x21, 0x70, 0x12, 0x46, + 0xa5, 0x89, 0xbb, 0xb6, 0x73, 0x02, 0xd8, 0xa9 }, + { 0x7d, 0xa3, 0xf4, 0x41, 0xde, 0x90, 0x54, 0x31, + 0x7e, 0x72, 0xb5, 0xdb, 0xf9, 0x79, 0xda, 0x01, + 0xe6, 0xbc, 0xee, 0xbb, 0x84, 0x78, 0xea, 0xe6, + 0xa2, 0x28, 0x49, 0xd9, 0x02, 0x92, 0x63, 0x5c }, + { 0x12, 0x30, 0xb1, 0xfc, 0x8a, 0x7d, 0x92, 0x15, + 0xed, 0xc2, 0xd4, 0xa2, 0xde, 0xcb, 0xdd, 0x0a, + 0x6e, 0x21, 0x6c, 0x92, 0x42, 0x78, 0xc9, 0x1f, + 0xc5, 0xd1, 0x0e, 0x7d, 0x60, 0x19, 0x2d, 0x94 }, + { 0x57, 0x50, 0xd7, 0x16, 0xb4, 0x80, 0x8f, 0x75, + 0x1f, 0xeb, 0xc3, 0x88, 0x06, 0xba, 0x17, 0x0b, + 0xf6, 0xd5, 0x19, 0x9a, 0x78, 0x16, 0xbe, 0x51, + 0x4e, 0x3f, 0x93, 0x2f, 0xbe, 0x0c, 0xb8, 0x71 }, + { 0x6f, 0xc5, 0x9b, 0x2f, 0x10, 0xfe, 0xba, 0x95, + 0x4a, 0xa6, 0x82, 0x0b, 0x3c, 0xa9, 0x87, 0xee, + 0x81, 0xd5, 0xcc, 0x1d, 0xa3, 0xc6, 0x3c, 0xe8, + 0x27, 0x30, 0x1c, 0x56, 0x9d, 0xfb, 0x39, 0xce }, + { 0xc7, 0xc3, 0xfe, 0x1e, 0xeb, 0xdc, 0x7b, 0x5a, + 0x93, 0x93, 0x26, 0xe8, 0xdd, 0xb8, 0x3e, 0x8b, + 0xf2, 0xb7, 0x80, 0xb6, 0x56, 0x78, 0xcb, 0x62, + 0xf2, 0x08, 0xb0, 0x40, 0xab, 0xdd, 0x35, 0xe2 }, + { 0x0c, 0x75, 0xc1, 0xa1, 0x5c, 0xf3, 0x4a, 0x31, + 0x4e, 0xe4, 0x78, 0xf4, 0xa5, 0xce, 0x0b, 0x8a, + 0x6b, 0x36, 0x52, 0x8e, 0xf7, 0xa8, 0x20, 0x69, + 0x6c, 0x3e, 0x42, 0x46, 0xc5, 0xa1, 0x58, 0x64 }, + { 0x21, 0x6d, 0xc1, 0x2a, 0x10, 0x85, 0x69, 0xa3, + 0xc7, 0xcd, 0xde, 0x4a, 0xed, 0x43, 0xa6, 0xc3, + 0x30, 0x13, 0x9d, 0xda, 0x3c, 0xcc, 0x4a, 0x10, + 0x89, 0x05, 0xdb, 0x38, 0x61, 0x89, 0x90, 0x50 }, + { 0xa5, 0x7b, 0xe6, 0xae, 0x67, 0x56, 0xf2, 0x8b, + 0x02, 0xf5, 0x9d, 0xad, 0xf7, 0xe0, 0xd7, 0xd8, + 0x80, 0x7f, 0x10, 0xfa, 0x15, 0xce, 0xd1, 0xad, + 0x35, 0x85, 0x52, 0x1a, 0x1d, 0x99, 0x5a, 0x89 }, + { 0x81, 0x6a, 0xef, 0x87, 0x59, 0x53, 0x71, 0x6c, + 0xd7, 0xa5, 0x81, 0xf7, 0x32, 0xf5, 0x3d, 0xd4, + 0x35, 0xda, 0xb6, 0x6d, 0x09, 0xc3, 0x61, 0xd2, + 0xd6, 0x59, 0x2d, 0xe1, 0x77, 0x55, 0xd8, 0xa8 }, + { 0x9a, 0x76, 0x89, 0x32, 0x26, 0x69, 0x3b, 0x6e, + 0xa9, 0x7e, 0x6a, 0x73, 0x8f, 0x9d, 0x10, 0xfb, + 0x3d, 0x0b, 0x43, 0xae, 0x0e, 0x8b, 0x7d, 0x81, + 0x23, 0xea, 0x76, 0xce, 0x97, 0x98, 0x9c, 0x7e }, + { 0x8d, 0xae, 0xdb, 0x9a, 0x27, 0x15, 0x29, 0xdb, + 0xb7, 0xdc, 0x3b, 0x60, 0x7f, 0xe5, 0xeb, 0x2d, + 0x32, 0x11, 0x77, 0x07, 0x58, 0xdd, 0x3b, 0x0a, + 0x35, 0x93, 0xd2, 0xd7, 0x95, 0x4e, 0x2d, 0x5b }, + { 0x16, 0xdb, 0xc0, 0xaa, 0x5d, 0xd2, 0xc7, 0x74, + 0xf5, 0x05, 0x10, 0x0f, 0x73, 0x37, 0x86, 0xd8, + 0xa1, 0x75, 0xfc, 0xbb, 0xb5, 0x9c, 0x43, 0xe1, + 0xfb, 0xff, 0x3e, 0x1e, 0xaf, 0x31, 0xcb, 0x4a }, + { 0x86, 0x06, 0xcb, 0x89, 0x9c, 0x6a, 0xea, 0xf5, + 0x1b, 0x9d, 0xb0, 0xfe, 0x49, 0x24, 0xa9, 0xfd, + 0x5d, 0xab, 0xc1, 0x9f, 0x88, 0x26, 0xf2, 0xbc, + 0x1c, 0x1d, 0x7d, 0xa1, 0x4d, 0x2c, 0x2c, 0x99 }, + { 0x84, 0x79, 0x73, 0x1a, 0xed, 0xa5, 0x7b, 0xd3, + 0x7e, 0xad, 0xb5, 0x1a, 0x50, 0x7e, 0x30, 0x7f, + 0x3b, 0xd9, 0x5e, 0x69, 0xdb, 0xca, 0x94, 0xf3, + 0xbc, 0x21, 0x72, 0x60, 0x66, 0xad, 0x6d, 0xfd }, + { 0x58, 0x47, 0x3a, 0x9e, 0xa8, 0x2e, 0xfa, 0x3f, + 0x3b, 0x3d, 0x8f, 0xc8, 0x3e, 0xd8, 0x86, 0x31, + 0x27, 0xb3, 0x3a, 0xe8, 0xde, 0xae, 0x63, 0x07, + 0x20, 0x1e, 0xdb, 0x6d, 0xde, 0x61, 0xde, 0x29 }, + { 0x9a, 0x92, 0x55, 0xd5, 0x3a, 0xf1, 0x16, 0xde, + 0x8b, 0xa2, 0x7c, 0xe3, 0x5b, 0x4c, 0x7e, 0x15, + 0x64, 0x06, 0x57, 0xa0, 0xfc, 0xb8, 0x88, 0xc7, + 0x0d, 0x95, 0x43, 0x1d, 0xac, 0xd8, 0xf8, 0x30 }, + { 0x9e, 0xb0, 0x5f, 0xfb, 0xa3, 0x9f, 0xd8, 0x59, + 0x6a, 0x45, 0x49, 0x3e, 0x18, 0xd2, 0x51, 0x0b, + 0xf3, 0xef, 0x06, 0x5c, 0x51, 0xd6, 0xe1, 0x3a, + 0xbe, 0x66, 0xaa, 0x57, 0xe0, 0x5c, 0xfd, 0xb7 }, + { 0x81, 0xdc, 0xc3, 0xa5, 0x05, 0xea, 0xce, 0x3f, + 0x87, 0x9d, 0x8f, 0x70, 0x27, 0x76, 0x77, 0x0f, + 0x9d, 0xf5, 0x0e, 0x52, 0x1d, 0x14, 0x28, 0xa8, + 0x5d, 0xaf, 0x04, 0xf9, 0xad, 0x21, 0x50, 0xe0 }, + { 0xe3, 0xe3, 0xc4, 0xaa, 0x3a, 0xcb, 0xbc, 0x85, + 0x33, 0x2a, 0xf9, 0xd5, 0x64, 0xbc, 0x24, 0x16, + 0x5e, 0x16, 0x87, 0xf6, 0xb1, 0xad, 0xcb, 0xfa, + 0xe7, 0x7a, 0x8f, 0x03, 0xc7, 0x2a, 0xc2, 0x8c }, + { 0x67, 0x46, 0xc8, 0x0b, 0x4e, 0xb5, 0x6a, 0xea, + 0x45, 0xe6, 0x4e, 0x72, 0x89, 0xbb, 0xa3, 0xed, + 0xbf, 0x45, 0xec, 0xf8, 0x20, 0x64, 0x81, 0xff, + 0x63, 0x02, 0x12, 0x29, 0x84, 0xcd, 0x52, 0x6a }, + { 0x2b, 0x62, 0x8e, 0x52, 0x76, 0x4d, 0x7d, 0x62, + 0xc0, 0x86, 0x8b, 0x21, 0x23, 0x57, 0xcd, 0xd1, + 0x2d, 0x91, 0x49, 0x82, 0x2f, 0x4e, 0x98, 0x45, + 0xd9, 0x18, 0xa0, 0x8d, 0x1a, 0xe9, 0x90, 0xc0 }, + { 0xe4, 0xbf, 0xe8, 0x0d, 0x58, 0xc9, 0x19, 0x94, + 0x61, 0x39, 0x09, 0xdc, 0x4b, 0x1a, 0x12, 0x49, + 0x68, 0x96, 0xc0, 0x04, 0xaf, 0x7b, 0x57, 0x01, + 0x48, 0x3d, 0xe4, 0x5d, 0x28, 0x23, 0xd7, 0x8e }, + { 0xeb, 0xb4, 0xba, 0x15, 0x0c, 0xef, 0x27, 0x34, + 0x34, 0x5b, 0x5d, 0x64, 0x1b, 0xbe, 0xd0, 0x3a, + 0x21, 0xea, 0xfa, 0xe9, 0x33, 0xc9, 0x9e, 0x00, + 0x92, 0x12, 0xef, 0x04, 0x57, 0x4a, 0x85, 0x30 }, + { 0x39, 0x66, 0xec, 0x73, 0xb1, 0x54, 0xac, 0xc6, + 0x97, 0xac, 0x5c, 0xf5, 0xb2, 0x4b, 0x40, 0xbd, + 0xb0, 0xdb, 0x9e, 0x39, 0x88, 0x36, 0xd7, 0x6d, + 0x4b, 0x88, 0x0e, 0x3b, 0x2a, 0xf1, 0xaa, 0x27 }, + { 0xef, 0x7e, 0x48, 0x31, 0xb3, 0xa8, 0x46, 0x36, + 0x51, 0x8d, 0x6e, 0x4b, 0xfc, 0xe6, 0x4a, 0x43, + 0xdb, 0x2a, 0x5d, 0xda, 0x9c, 0xca, 0x2b, 0x44, + 0xf3, 0x90, 0x33, 0xbd, 0xc4, 0x0d, 0x62, 0x43 }, + { 0x7a, 0xbf, 0x6a, 0xcf, 0x5c, 0x8e, 0x54, 0x9d, + 0xdb, 0xb1, 0x5a, 0xe8, 0xd8, 0xb3, 0x88, 0xc1, + 0xc1, 0x97, 0xe6, 0x98, 0x73, 0x7c, 0x97, 0x85, + 0x50, 0x1e, 0xd1, 0xf9, 0x49, 0x30, 0xb7, 0xd9 }, + { 0x88, 0x01, 0x8d, 0xed, 0x66, 0x81, 0x3f, 0x0c, + 0xa9, 0x5d, 0xef, 0x47, 0x4c, 0x63, 0x06, 0x92, + 0x01, 0x99, 0x67, 0xb9, 0xe3, 0x68, 0x88, 0xda, + 0xdd, 0x94, 0x12, 0x47, 0x19, 0xb6, 0x82, 0xf6 }, + { 0x39, 0x30, 0x87, 0x6b, 0x9f, 0xc7, 0x52, 0x90, + 0x36, 0xb0, 0x08, 0xb1, 0xb8, 0xbb, 0x99, 0x75, + 0x22, 0xa4, 0x41, 0x63, 0x5a, 0x0c, 0x25, 0xec, + 0x02, 0xfb, 0x6d, 0x90, 0x26, 0xe5, 0x5a, 0x97 }, + { 0x0a, 0x40, 0x49, 0xd5, 0x7e, 0x83, 0x3b, 0x56, + 0x95, 0xfa, 0xc9, 0x3d, 0xd1, 0xfb, 0xef, 0x31, + 0x66, 0xb4, 0x4b, 0x12, 0xad, 0x11, 0x24, 0x86, + 0x62, 0x38, 0x3a, 0xe0, 0x51, 0xe1, 0x58, 0x27 }, + { 0x81, 0xdc, 0xc0, 0x67, 0x8b, 0xb6, 0xa7, 0x65, + 0xe4, 0x8c, 0x32, 0x09, 0x65, 0x4f, 0xe9, 0x00, + 0x89, 0xce, 0x44, 0xff, 0x56, 0x18, 0x47, 0x7e, + 0x39, 0xab, 0x28, 0x64, 0x76, 0xdf, 0x05, 0x2b }, + { 0xe6, 0x9b, 0x3a, 0x36, 0xa4, 0x46, 0x19, 0x12, + 0xdc, 0x08, 0x34, 0x6b, 0x11, 0xdd, 0xcb, 0x9d, + 0xb7, 0x96, 0xf8, 0x85, 0xfd, 0x01, 0x93, 0x6e, + 0x66, 0x2f, 0xe2, 0x92, 0x97, 0xb0, 0x99, 0xa4 }, + { 0x5a, 0xc6, 0x50, 0x3b, 0x0d, 0x8d, 0xa6, 0x91, + 0x76, 0x46, 0xe6, 0xdc, 0xc8, 0x7e, 0xdc, 0x58, + 0xe9, 0x42, 0x45, 0x32, 0x4c, 0xc2, 0x04, 0xf4, + 0xdd, 0x4a, 0xf0, 0x15, 0x63, 0xac, 0xd4, 0x27 }, + { 0xdf, 0x6d, 0xda, 0x21, 0x35, 0x9a, 0x30, 0xbc, + 0x27, 0x17, 0x80, 0x97, 0x1c, 0x1a, 0xbd, 0x56, + 0xa6, 0xef, 0x16, 0x7e, 0x48, 0x08, 0x87, 0x88, + 0x8e, 0x73, 0xa8, 0x6d, 0x3b, 0xf6, 0x05, 0xe9 }, + { 0xe8, 0xe6, 0xe4, 0x70, 0x71, 0xe7, 0xb7, 0xdf, + 0x25, 0x80, 0xf2, 0x25, 0xcf, 0xbb, 0xed, 0xf8, + 0x4c, 0xe6, 0x77, 0x46, 0x62, 0x66, 0x28, 0xd3, + 0x30, 0x97, 0xe4, 0xb7, 0xdc, 0x57, 0x11, 0x07 }, + { 0x53, 0xe4, 0x0e, 0xad, 0x62, 0x05, 0x1e, 0x19, + 0xcb, 0x9b, 0xa8, 0x13, 0x3e, 0x3e, 0x5c, 0x1c, + 0xe0, 0x0d, 0xdc, 0xad, 0x8a, 0xcf, 0x34, 0x2a, + 0x22, 0x43, 0x60, 0xb0, 0xac, 0xc1, 0x47, 0x77 }, + { 0x9c, 0xcd, 0x53, 0xfe, 0x80, 0xbe, 0x78, 0x6a, + 0xa9, 0x84, 0x63, 0x84, 0x62, 0xfb, 0x28, 0xaf, + 0xdf, 0x12, 0x2b, 0x34, 0xd7, 0x8f, 0x46, 0x87, + 0xec, 0x63, 0x2b, 0xb1, 0x9d, 0xe2, 0x37, 0x1a }, + { 0xcb, 0xd4, 0x80, 0x52, 0xc4, 0x8d, 0x78, 0x84, + 0x66, 0xa3, 0xe8, 0x11, 0x8c, 0x56, 0xc9, 0x7f, + 0xe1, 0x46, 0xe5, 0x54, 0x6f, 0xaa, 0xf9, 0x3e, + 0x2b, 0xc3, 0xc4, 0x7e, 0x45, 0x93, 0x97, 0x53 }, + { 0x25, 0x68, 0x83, 0xb1, 0x4e, 0x2a, 0xf4, 0x4d, + 0xad, 0xb2, 0x8e, 0x1b, 0x34, 0xb2, 0xac, 0x0f, + 0x0f, 0x4c, 0x91, 0xc3, 0x4e, 0xc9, 0x16, 0x9e, + 0x29, 0x03, 0x61, 0x58, 0xac, 0xaa, 0x95, 0xb9 }, + { 0x44, 0x71, 0xb9, 0x1a, 0xb4, 0x2d, 0xb7, 0xc4, + 0xdd, 0x84, 0x90, 0xab, 0x95, 0xa2, 0xee, 0x8d, + 0x04, 0xe3, 0xef, 0x5c, 0x3d, 0x6f, 0xc7, 0x1a, + 0xc7, 0x4b, 0x2b, 0x26, 0x91, 0x4d, 0x16, 0x41 }, + { 0xa5, 0xeb, 0x08, 0x03, 0x8f, 0x8f, 0x11, 0x55, + 0xed, 0x86, 0xe6, 0x31, 0x90, 0x6f, 0xc1, 0x30, + 0x95, 0xf6, 0xbb, 0xa4, 0x1d, 0xe5, 0xd4, 0xe7, + 0x95, 0x75, 0x8e, 0xc8, 0xc8, 0xdf, 0x8a, 0xf1 }, + { 0xdc, 0x1d, 0xb6, 0x4e, 0xd8, 0xb4, 0x8a, 0x91, + 0x0e, 0x06, 0x0a, 0x6b, 0x86, 0x63, 0x74, 0xc5, + 0x78, 0x78, 0x4e, 0x9a, 0xc4, 0x9a, 0xb2, 0x77, + 0x40, 0x92, 0xac, 0x71, 0x50, 0x19, 0x34, 0xac }, + { 0x28, 0x54, 0x13, 0xb2, 0xf2, 0xee, 0x87, 0x3d, + 0x34, 0x31, 0x9e, 0xe0, 0xbb, 0xfb, 0xb9, 0x0f, + 0x32, 0xda, 0x43, 0x4c, 0xc8, 0x7e, 0x3d, 0xb5, + 0xed, 0x12, 0x1b, 0xb3, 0x98, 0xed, 0x96, 0x4b }, + { 0x02, 0x16, 0xe0, 0xf8, 0x1f, 0x75, 0x0f, 0x26, + 0xf1, 0x99, 0x8b, 0xc3, 0x93, 0x4e, 0x3e, 0x12, + 0x4c, 0x99, 0x45, 0xe6, 0x85, 0xa6, 0x0b, 0x25, + 0xe8, 0xfb, 0xd9, 0x62, 0x5a, 0xb6, 0xb5, 0x99 }, + { 0x38, 0xc4, 0x10, 0xf5, 0xb9, 0xd4, 0x07, 0x20, + 0x50, 0x75, 0x5b, 0x31, 0xdc, 0xa8, 0x9f, 0xd5, + 0x39, 0x5c, 0x67, 0x85, 0xee, 0xb3, 0xd7, 0x90, + 0xf3, 0x20, 0xff, 0x94, 0x1c, 0x5a, 0x93, 0xbf }, + { 0xf1, 0x84, 0x17, 0xb3, 0x9d, 0x61, 0x7a, 0xb1, + 0xc1, 0x8f, 0xdf, 0x91, 0xeb, 0xd0, 0xfc, 0x6d, + 0x55, 0x16, 0xbb, 0x34, 0xcf, 0x39, 0x36, 0x40, + 0x37, 0xbc, 0xe8, 0x1f, 0xa0, 0x4c, 0xec, 0xb1 }, + { 0x1f, 0xa8, 0x77, 0xde, 0x67, 0x25, 0x9d, 0x19, + 0x86, 0x3a, 0x2a, 0x34, 0xbc, 0xc6, 0x96, 0x2a, + 0x2b, 0x25, 0xfc, 0xbf, 0x5c, 0xbe, 0xcd, 0x7e, + 0xde, 0x8f, 0x1f, 0xa3, 0x66, 0x88, 0xa7, 0x96 }, + { 0x5b, 0xd1, 0x69, 0xe6, 0x7c, 0x82, 0xc2, 0xc2, + 0xe9, 0x8e, 0xf7, 0x00, 0x8b, 0xdf, 0x26, 0x1f, + 0x2d, 0xdf, 0x30, 0xb1, 0xc0, 0x0f, 0x9e, 0x7f, + 0x27, 0x5b, 0xb3, 0xe8, 0xa2, 0x8d, 0xc9, 0xa2 }, + { 0xc8, 0x0a, 0xbe, 0xeb, 0xb6, 0x69, 0xad, 0x5d, + 0xee, 0xb5, 0xf5, 0xec, 0x8e, 0xa6, 0xb7, 0xa0, + 0x5d, 0xdf, 0x7d, 0x31, 0xec, 0x4c, 0x0a, 0x2e, + 0xe2, 0x0b, 0x0b, 0x98, 0xca, 0xec, 0x67, 0x46 }, + { 0xe7, 0x6d, 0x3f, 0xbd, 0xa5, 0xba, 0x37, 0x4e, + 0x6b, 0xf8, 0xe5, 0x0f, 0xad, 0xc3, 0xbb, 0xb9, + 0xba, 0x5c, 0x20, 0x6e, 0xbd, 0xec, 0x89, 0xa3, + 0xa5, 0x4c, 0xf3, 0xdd, 0x84, 0xa0, 0x70, 0x16 }, + { 0x7b, 0xba, 0x9d, 0xc5, 0xb5, 0xdb, 0x20, 0x71, + 0xd1, 0x77, 0x52, 0xb1, 0x04, 0x4c, 0x1e, 0xce, + 0xd9, 0x6a, 0xaf, 0x2d, 0xd4, 0x6e, 0x9b, 0x43, + 0x37, 0x50, 0xe8, 0xea, 0x0d, 0xcc, 0x18, 0x70 }, + { 0xf2, 0x9b, 0x1b, 0x1a, 0xb9, 0xba, 0xb1, 0x63, + 0x01, 0x8e, 0xe3, 0xda, 0x15, 0x23, 0x2c, 0xca, + 0x78, 0xec, 0x52, 0xdb, 0xc3, 0x4e, 0xda, 0x5b, + 0x82, 0x2e, 0xc1, 0xd8, 0x0f, 0xc2, 0x1b, 0xd0 }, + { 0x9e, 0xe3, 0xe3, 0xe7, 0xe9, 0x00, 0xf1, 0xe1, + 0x1d, 0x30, 0x8c, 0x4b, 0x2b, 0x30, 0x76, 0xd2, + 0x72, 0xcf, 0x70, 0x12, 0x4f, 0x9f, 0x51, 0xe1, + 0xda, 0x60, 0xf3, 0x78, 0x46, 0xcd, 0xd2, 0xf4 }, + { 0x70, 0xea, 0x3b, 0x01, 0x76, 0x92, 0x7d, 0x90, + 0x96, 0xa1, 0x85, 0x08, 0xcd, 0x12, 0x3a, 0x29, + 0x03, 0x25, 0x92, 0x0a, 0x9d, 0x00, 0xa8, 0x9b, + 0x5d, 0xe0, 0x42, 0x73, 0xfb, 0xc7, 0x6b, 0x85 }, + { 0x67, 0xde, 0x25, 0xc0, 0x2a, 0x4a, 0xab, 0xa2, + 0x3b, 0xdc, 0x97, 0x3c, 0x8b, 0xb0, 0xb5, 0x79, + 0x6d, 0x47, 0xcc, 0x06, 0x59, 0xd4, 0x3d, 0xff, + 0x1f, 0x97, 0xde, 0x17, 0x49, 0x63, 0xb6, 0x8e }, + { 0xb2, 0x16, 0x8e, 0x4e, 0x0f, 0x18, 0xb0, 0xe6, + 0x41, 0x00, 0xb5, 0x17, 0xed, 0x95, 0x25, 0x7d, + 0x73, 0xf0, 0x62, 0x0d, 0xf8, 0x85, 0xc1, 0x3d, + 0x2e, 0xcf, 0x79, 0x36, 0x7b, 0x38, 0x4c, 0xee }, + { 0x2e, 0x7d, 0xec, 0x24, 0x28, 0x85, 0x3b, 0x2c, + 0x71, 0x76, 0x07, 0x45, 0x54, 0x1f, 0x7a, 0xfe, + 0x98, 0x25, 0xb5, 0xdd, 0x77, 0xdf, 0x06, 0x51, + 0x1d, 0x84, 0x41, 0xa9, 0x4b, 0xac, 0xc9, 0x27 }, + { 0xca, 0x9f, 0xfa, 0xc4, 0xc4, 0x3f, 0x0b, 0x48, + 0x46, 0x1d, 0xc5, 0xc2, 0x63, 0xbe, 0xa3, 0xf6, + 0xf0, 0x06, 0x11, 0xce, 0xac, 0xab, 0xf6, 0xf8, + 0x95, 0xba, 0x2b, 0x01, 0x01, 0xdb, 0xb6, 0x8d }, + { 0x74, 0x10, 0xd4, 0x2d, 0x8f, 0xd1, 0xd5, 0xe9, + 0xd2, 0xf5, 0x81, 0x5c, 0xb9, 0x34, 0x17, 0x99, + 0x88, 0x28, 0xef, 0x3c, 0x42, 0x30, 0xbf, 0xbd, + 0x41, 0x2d, 0xf0, 0xa4, 0xa7, 0xa2, 0x50, 0x7a }, + { 0x50, 0x10, 0xf6, 0x84, 0x51, 0x6d, 0xcc, 0xd0, + 0xb6, 0xee, 0x08, 0x52, 0xc2, 0x51, 0x2b, 0x4d, + 0xc0, 0x06, 0x6c, 0xf0, 0xd5, 0x6f, 0x35, 0x30, + 0x29, 0x78, 0xdb, 0x8a, 0xe3, 0x2c, 0x6a, 0x81 }, + { 0xac, 0xaa, 0xb5, 0x85, 0xf7, 0xb7, 0x9b, 0x71, + 0x99, 0x35, 0xce, 0xb8, 0x95, 0x23, 0xdd, 0xc5, + 0x48, 0x27, 0xf7, 0x5c, 0x56, 0x88, 0x38, 0x56, + 0x15, 0x4a, 0x56, 0xcd, 0xcd, 0x5e, 0xe9, 0x88 }, + { 0x66, 0x6d, 0xe5, 0xd1, 0x44, 0x0f, 0xee, 0x73, + 0x31, 0xaa, 0xf0, 0x12, 0x3a, 0x62, 0xef, 0x2d, + 0x8b, 0xa5, 0x74, 0x53, 0xa0, 0x76, 0x96, 0x35, + 0xac, 0x6c, 0xd0, 0x1e, 0x63, 0x3f, 0x77, 0x12 }, + { 0xa6, 0xf9, 0x86, 0x58, 0xf6, 0xea, 0xba, 0xf9, + 0x02, 0xd8, 0xb3, 0x87, 0x1a, 0x4b, 0x10, 0x1d, + 0x16, 0x19, 0x6e, 0x8a, 0x4b, 0x24, 0x1e, 0x15, + 0x58, 0xfe, 0x29, 0x96, 0x6e, 0x10, 0x3e, 0x8d }, + { 0x89, 0x15, 0x46, 0xa8, 0xb2, 0x9f, 0x30, 0x47, + 0xdd, 0xcf, 0xe5, 0xb0, 0x0e, 0x45, 0xfd, 0x55, + 0x75, 0x63, 0x73, 0x10, 0x5e, 0xa8, 0x63, 0x7d, + 0xfc, 0xff, 0x54, 0x7b, 0x6e, 0xa9, 0x53, 0x5f }, + { 0x18, 0xdf, 0xbc, 0x1a, 0xc5, 0xd2, 0x5b, 0x07, + 0x61, 0x13, 0x7d, 0xbd, 0x22, 0xc1, 0x7c, 0x82, + 0x9d, 0x0f, 0x0e, 0xf1, 0xd8, 0x23, 0x44, 0xe9, + 0xc8, 0x9c, 0x28, 0x66, 0x94, 0xda, 0x24, 0xe8 }, + { 0xb5, 0x4b, 0x9b, 0x67, 0xf8, 0xfe, 0xd5, 0x4b, + 0xbf, 0x5a, 0x26, 0x66, 0xdb, 0xdf, 0x4b, 0x23, + 0xcf, 0xf1, 0xd1, 0xb6, 0xf4, 0xaf, 0xc9, 0x85, + 0xb2, 0xe6, 0xd3, 0x30, 0x5a, 0x9f, 0xf8, 0x0f }, + { 0x7d, 0xb4, 0x42, 0xe1, 0x32, 0xba, 0x59, 0xbc, + 0x12, 0x89, 0xaa, 0x98, 0xb0, 0xd3, 0xe8, 0x06, + 0x00, 0x4f, 0x8e, 0xc1, 0x28, 0x11, 0xaf, 0x1e, + 0x2e, 0x33, 0xc6, 0x9b, 0xfd, 0xe7, 0x29, 0xe1 }, + { 0x25, 0x0f, 0x37, 0xcd, 0xc1, 0x5e, 0x81, 0x7d, + 0x2f, 0x16, 0x0d, 0x99, 0x56, 0xc7, 0x1f, 0xe3, + 0xeb, 0x5d, 0xb7, 0x45, 0x56, 0xe4, 0xad, 0xf9, + 0xa4, 0xff, 0xaf, 0xba, 0x74, 0x01, 0x03, 0x96 }, + { 0x4a, 0xb8, 0xa3, 0xdd, 0x1d, 0xdf, 0x8a, 0xd4, + 0x3d, 0xab, 0x13, 0xa2, 0x7f, 0x66, 0xa6, 0x54, + 0x4f, 0x29, 0x05, 0x97, 0xfa, 0x96, 0x04, 0x0e, + 0x0e, 0x1d, 0xb9, 0x26, 0x3a, 0xa4, 0x79, 0xf8 }, + { 0xee, 0x61, 0x72, 0x7a, 0x07, 0x66, 0xdf, 0x93, + 0x9c, 0xcd, 0xc8, 0x60, 0x33, 0x40, 0x44, 0xc7, + 0x9a, 0x3c, 0x9b, 0x15, 0x62, 0x00, 0xbc, 0x3a, + 0xa3, 0x29, 0x73, 0x48, 0x3d, 0x83, 0x41, 0xae }, + { 0x3f, 0x68, 0xc7, 0xec, 0x63, 0xac, 0x11, 0xeb, + 0xb9, 0x8f, 0x94, 0xb3, 0x39, 0xb0, 0x5c, 0x10, + 0x49, 0x84, 0xfd, 0xa5, 0x01, 0x03, 0x06, 0x01, + 0x44, 0xe5, 0xa2, 0xbf, 0xcc, 0xc9, 0xda, 0x95 }, + { 0x05, 0x6f, 0x29, 0x81, 0x6b, 0x8a, 0xf8, 0xf5, + 0x66, 0x82, 0xbc, 0x4d, 0x7c, 0xf0, 0x94, 0x11, + 0x1d, 0xa7, 0x73, 0x3e, 0x72, 0x6c, 0xd1, 0x3d, + 0x6b, 0x3e, 0x8e, 0xa0, 0x3e, 0x92, 0xa0, 0xd5 }, + { 0xf5, 0xec, 0x43, 0xa2, 0x8a, 0xcb, 0xef, 0xf1, + 0xf3, 0x31, 0x8a, 0x5b, 0xca, 0xc7, 0xc6, 0x6d, + 0xdb, 0x52, 0x30, 0xb7, 0x9d, 0xb2, 0xd1, 0x05, + 0xbc, 0xbe, 0x15, 0xf3, 0xc1, 0x14, 0x8d, 0x69 }, + { 0x2a, 0x69, 0x60, 0xad, 0x1d, 0x8d, 0xd5, 0x47, + 0x55, 0x5c, 0xfb, 0xd5, 0xe4, 0x60, 0x0f, 0x1e, + 0xaa, 0x1c, 0x8e, 0xda, 0x34, 0xde, 0x03, 0x74, + 0xec, 0x4a, 0x26, 0xea, 0xaa, 0xa3, 0x3b, 0x4e }, + { 0xdc, 0xc1, 0xea, 0x7b, 0xaa, 0xb9, 0x33, 0x84, + 0xf7, 0x6b, 0x79, 0x68, 0x66, 0x19, 0x97, 0x54, + 0x74, 0x2f, 0x7b, 0x96, 0xd6, 0xb4, 0xc1, 0x20, + 0x16, 0x5c, 0x04, 0xa6, 0xc4, 0xf5, 0xce, 0x10 }, + { 0x13, 0xd5, 0xdf, 0x17, 0x92, 0x21, 0x37, 0x9c, + 0x6a, 0x78, 0xc0, 0x7c, 0x79, 0x3f, 0xf5, 0x34, + 0x87, 0xca, 0xe6, 0xbf, 0x9f, 0xe8, 0x82, 0x54, + 0x1a, 0xb0, 0xe7, 0x35, 0xe3, 0xea, 0xda, 0x3b }, + { 0x8c, 0x59, 0xe4, 0x40, 0x76, 0x41, 0xa0, 0x1e, + 0x8f, 0xf9, 0x1f, 0x99, 0x80, 0xdc, 0x23, 0x6f, + 0x4e, 0xcd, 0x6f, 0xcf, 0x52, 0x58, 0x9a, 0x09, + 0x9a, 0x96, 0x16, 0x33, 0x96, 0x77, 0x14, 0xe1 }, + { 0x83, 0x3b, 0x1a, 0xc6, 0xa2, 0x51, 0xfd, 0x08, + 0xfd, 0x6d, 0x90, 0x8f, 0xea, 0x2a, 0x4e, 0xe1, + 0xe0, 0x40, 0xbc, 0xa9, 0x3f, 0xc1, 0xa3, 0x8e, + 0xc3, 0x82, 0x0e, 0x0c, 0x10, 0xbd, 0x82, 0xea }, + { 0xa2, 0x44, 0xf9, 0x27, 0xf3, 0xb4, 0x0b, 0x8f, + 0x6c, 0x39, 0x15, 0x70, 0xc7, 0x65, 0x41, 0x8f, + 0x2f, 0x6e, 0x70, 0x8e, 0xac, 0x90, 0x06, 0xc5, + 0x1a, 0x7f, 0xef, 0xf4, 0xaf, 0x3b, 0x2b, 0x9e }, + { 0x3d, 0x99, 0xed, 0x95, 0x50, 0xcf, 0x11, 0x96, + 0xe6, 0xc4, 0xd2, 0x0c, 0x25, 0x96, 0x20, 0xf8, + 0x58, 0xc3, 0xd7, 0x03, 0x37, 0x4c, 0x12, 0x8c, + 0xe7, 0xb5, 0x90, 0x31, 0x0c, 0x83, 0x04, 0x6d }, + { 0x2b, 0x35, 0xc4, 0x7d, 0x7b, 0x87, 0x76, 0x1f, + 0x0a, 0xe4, 0x3a, 0xc5, 0x6a, 0xc2, 0x7b, 0x9f, + 0x25, 0x83, 0x03, 0x67, 0xb5, 0x95, 0xbe, 0x8c, + 0x24, 0x0e, 0x94, 0x60, 0x0c, 0x6e, 0x33, 0x12 }, + { 0x5d, 0x11, 0xed, 0x37, 0xd2, 0x4d, 0xc7, 0x67, + 0x30, 0x5c, 0xb7, 0xe1, 0x46, 0x7d, 0x87, 0xc0, + 0x65, 0xac, 0x4b, 0xc8, 0xa4, 0x26, 0xde, 0x38, + 0x99, 0x1f, 0xf5, 0x9a, 0xa8, 0x73, 0x5d, 0x02 }, + { 0xb8, 0x36, 0x47, 0x8e, 0x1c, 0xa0, 0x64, 0x0d, + 0xce, 0x6f, 0xd9, 0x10, 0xa5, 0x09, 0x62, 0x72, + 0xc8, 0x33, 0x09, 0x90, 0xcd, 0x97, 0x86, 0x4a, + 0xc2, 0xbf, 0x14, 0xef, 0x6b, 0x23, 0x91, 0x4a }, + { 0x91, 0x00, 0xf9, 0x46, 0xd6, 0xcc, 0xde, 0x3a, + 0x59, 0x7f, 0x90, 0xd3, 0x9f, 0xc1, 0x21, 0x5b, + 0xad, 0xdc, 0x74, 0x13, 0x64, 0x3d, 0x85, 0xc2, + 0x1c, 0x3e, 0xee, 0x5d, 0x2d, 0xd3, 0x28, 0x94 }, + { 0xda, 0x70, 0xee, 0xdd, 0x23, 0xe6, 0x63, 0xaa, + 0x1a, 0x74, 0xb9, 0x76, 0x69, 0x35, 0xb4, 0x79, + 0x22, 0x2a, 0x72, 0xaf, 0xba, 0x5c, 0x79, 0x51, + 0x58, 0xda, 0xd4, 0x1a, 0x3b, 0xd7, 0x7e, 0x40 }, + { 0xf0, 0x67, 0xed, 0x6a, 0x0d, 0xbd, 0x43, 0xaa, + 0x0a, 0x92, 0x54, 0xe6, 0x9f, 0xd6, 0x6b, 0xdd, + 0x8a, 0xcb, 0x87, 0xde, 0x93, 0x6c, 0x25, 0x8c, + 0xfb, 0x02, 0x28, 0x5f, 0x2c, 0x11, 0xfa, 0x79 }, + { 0x71, 0x5c, 0x99, 0xc7, 0xd5, 0x75, 0x80, 0xcf, + 0x97, 0x53, 0xb4, 0xc1, 0xd7, 0x95, 0xe4, 0x5a, + 0x83, 0xfb, 0xb2, 0x28, 0xc0, 0xd3, 0x6f, 0xbe, + 0x20, 0xfa, 0xf3, 0x9b, 0xdd, 0x6d, 0x4e, 0x85 }, + { 0xe4, 0x57, 0xd6, 0xad, 0x1e, 0x67, 0xcb, 0x9b, + 0xbd, 0x17, 0xcb, 0xd6, 0x98, 0xfa, 0x6d, 0x7d, + 0xae, 0x0c, 0x9b, 0x7a, 0xd6, 0xcb, 0xd6, 0x53, + 0x96, 0x34, 0xe3, 0x2a, 0x71, 0x9c, 0x84, 0x92 }, + { 0xec, 0xe3, 0xea, 0x81, 0x03, 0xe0, 0x24, 0x83, + 0xc6, 0x4a, 0x70, 0xa4, 0xbd, 0xce, 0xe8, 0xce, + 0xb6, 0x27, 0x8f, 0x25, 0x33, 0xf3, 0xf4, 0x8d, + 0xbe, 0xed, 0xfb, 0xa9, 0x45, 0x31, 0xd4, 0xae }, + { 0x38, 0x8a, 0xa5, 0xd3, 0x66, 0x7a, 0x97, 0xc6, + 0x8d, 0x3d, 0x56, 0xf8, 0xf3, 0xee, 0x8d, 0x3d, + 0x36, 0x09, 0x1f, 0x17, 0xfe, 0x5d, 0x1b, 0x0d, + 0x5d, 0x84, 0xc9, 0x3b, 0x2f, 0xfe, 0x40, 0xbd }, + { 0x8b, 0x6b, 0x31, 0xb9, 0xad, 0x7c, 0x3d, 0x5c, + 0xd8, 0x4b, 0xf9, 0x89, 0x47, 0xb9, 0xcd, 0xb5, + 0x9d, 0xf8, 0xa2, 0x5f, 0xf7, 0x38, 0x10, 0x10, + 0x13, 0xbe, 0x4f, 0xd6, 0x5e, 0x1d, 0xd1, 0xa3 }, + { 0x06, 0x62, 0x91, 0xf6, 0xbb, 0xd2, 0x5f, 0x3c, + 0x85, 0x3d, 0xb7, 0xd8, 0xb9, 0x5c, 0x9a, 0x1c, + 0xfb, 0x9b, 0xf1, 0xc1, 0xc9, 0x9f, 0xb9, 0x5a, + 0x9b, 0x78, 0x69, 0xd9, 0x0f, 0x1c, 0x29, 0x03 }, + { 0xa7, 0x07, 0xef, 0xbc, 0xcd, 0xce, 0xed, 0x42, + 0x96, 0x7a, 0x66, 0xf5, 0x53, 0x9b, 0x93, 0xed, + 0x75, 0x60, 0xd4, 0x67, 0x30, 0x40, 0x16, 0xc4, + 0x78, 0x0d, 0x77, 0x55, 0xa5, 0x65, 0xd4, 0xc4 }, + { 0x38, 0xc5, 0x3d, 0xfb, 0x70, 0xbe, 0x7e, 0x79, + 0x2b, 0x07, 0xa6, 0xa3, 0x5b, 0x8a, 0x6a, 0x0a, + 0xba, 0x02, 0xc5, 0xc5, 0xf3, 0x8b, 0xaf, 0x5c, + 0x82, 0x3f, 0xdf, 0xd9, 0xe4, 0x2d, 0x65, 0x7e }, + { 0xf2, 0x91, 0x13, 0x86, 0x50, 0x1d, 0x9a, 0xb9, + 0xd7, 0x20, 0xcf, 0x8a, 0xd1, 0x05, 0x03, 0xd5, + 0x63, 0x4b, 0xf4, 0xb7, 0xd1, 0x2b, 0x56, 0xdf, + 0xb7, 0x4f, 0xec, 0xc6, 0xe4, 0x09, 0x3f, 0x68 }, + { 0xc6, 0xf2, 0xbd, 0xd5, 0x2b, 0x81, 0xe6, 0xe4, + 0xf6, 0x59, 0x5a, 0xbd, 0x4d, 0x7f, 0xb3, 0x1f, + 0x65, 0x11, 0x69, 0xd0, 0x0f, 0xf3, 0x26, 0x92, + 0x6b, 0x34, 0x94, 0x7b, 0x28, 0xa8, 0x39, 0x59 }, + { 0x29, 0x3d, 0x94, 0xb1, 0x8c, 0x98, 0xbb, 0x32, + 0x23, 0x36, 0x6b, 0x8c, 0xe7, 0x4c, 0x28, 0xfb, + 0xdf, 0x28, 0xe1, 0xf8, 0x4a, 0x33, 0x50, 0xb0, + 0xeb, 0x2d, 0x18, 0x04, 0xa5, 0x77, 0x57, 0x9b }, + { 0x2c, 0x2f, 0xa5, 0xc0, 0xb5, 0x15, 0x33, 0x16, + 0x5b, 0xc3, 0x75, 0xc2, 0x2e, 0x27, 0x81, 0x76, + 0x82, 0x70, 0xa3, 0x83, 0x98, 0x5d, 0x13, 0xbd, + 0x6b, 0x67, 0xb6, 0xfd, 0x67, 0xf8, 0x89, 0xeb }, + { 0xca, 0xa0, 0x9b, 0x82, 0xb7, 0x25, 0x62, 0xe4, + 0x3f, 0x4b, 0x22, 0x75, 0xc0, 0x91, 0x91, 0x8e, + 0x62, 0x4d, 0x91, 0x16, 0x61, 0xcc, 0x81, 0x1b, + 0xb5, 0xfa, 0xec, 0x51, 0xf6, 0x08, 0x8e, 0xf7 }, + { 0x24, 0x76, 0x1e, 0x45, 0xe6, 0x74, 0x39, 0x53, + 0x79, 0xfb, 0x17, 0x72, 0x9c, 0x78, 0xcb, 0x93, + 0x9e, 0x6f, 0x74, 0xc5, 0xdf, 0xfb, 0x9c, 0x96, + 0x1f, 0x49, 0x59, 0x82, 0xc3, 0xed, 0x1f, 0xe3 }, + { 0x55, 0xb7, 0x0a, 0x82, 0x13, 0x1e, 0xc9, 0x48, + 0x88, 0xd7, 0xab, 0x54, 0xa7, 0xc5, 0x15, 0x25, + 0x5c, 0x39, 0x38, 0xbb, 0x10, 0xbc, 0x78, 0x4d, + 0xc9, 0xb6, 0x7f, 0x07, 0x6e, 0x34, 0x1a, 0x73 }, + { 0x6a, 0xb9, 0x05, 0x7b, 0x97, 0x7e, 0xbc, 0x3c, + 0xa4, 0xd4, 0xce, 0x74, 0x50, 0x6c, 0x25, 0xcc, + 0xcd, 0xc5, 0x66, 0x49, 0x7c, 0x45, 0x0b, 0x54, + 0x15, 0xa3, 0x94, 0x86, 0xf8, 0x65, 0x7a, 0x03 }, + { 0x24, 0x06, 0x6d, 0xee, 0xe0, 0xec, 0xee, 0x15, + 0xa4, 0x5f, 0x0a, 0x32, 0x6d, 0x0f, 0x8d, 0xbc, + 0x79, 0x76, 0x1e, 0xbb, 0x93, 0xcf, 0x8c, 0x03, + 0x77, 0xaf, 0x44, 0x09, 0x78, 0xfc, 0xf9, 0x94 }, + { 0x20, 0x00, 0x0d, 0x3f, 0x66, 0xba, 0x76, 0x86, + 0x0d, 0x5a, 0x95, 0x06, 0x88, 0xb9, 0xaa, 0x0d, + 0x76, 0xcf, 0xea, 0x59, 0xb0, 0x05, 0xd8, 0x59, + 0x91, 0x4b, 0x1a, 0x46, 0x65, 0x3a, 0x93, 0x9b }, + { 0xb9, 0x2d, 0xaa, 0x79, 0x60, 0x3e, 0x3b, 0xdb, + 0xc3, 0xbf, 0xe0, 0xf4, 0x19, 0xe4, 0x09, 0xb2, + 0xea, 0x10, 0xdc, 0x43, 0x5b, 0xee, 0xfe, 0x29, + 0x59, 0xda, 0x16, 0x89, 0x5d, 0x5d, 0xca, 0x1c }, + { 0xe9, 0x47, 0x94, 0x87, 0x05, 0xb2, 0x06, 0xd5, + 0x72, 0xb0, 0xe8, 0xf6, 0x2f, 0x66, 0xa6, 0x55, + 0x1c, 0xbd, 0x6b, 0xc3, 0x05, 0xd2, 0x6c, 0xe7, + 0x53, 0x9a, 0x12, 0xf9, 0xaa, 0xdf, 0x75, 0x71 }, + { 0x3d, 0x67, 0xc1, 0xb3, 0xf9, 0xb2, 0x39, 0x10, + 0xe3, 0xd3, 0x5e, 0x6b, 0x0f, 0x2c, 0xcf, 0x44, + 0xa0, 0xb5, 0x40, 0xa4, 0x5c, 0x18, 0xba, 0x3c, + 0x36, 0x26, 0x4d, 0xd4, 0x8e, 0x96, 0xaf, 0x6a }, + { 0xc7, 0x55, 0x8b, 0xab, 0xda, 0x04, 0xbc, 0xcb, + 0x76, 0x4d, 0x0b, 0xbf, 0x33, 0x58, 0x42, 0x51, + 0x41, 0x90, 0x2d, 0x22, 0x39, 0x1d, 0x9f, 0x8c, + 0x59, 0x15, 0x9f, 0xec, 0x9e, 0x49, 0xb1, 0x51 }, + { 0x0b, 0x73, 0x2b, 0xb0, 0x35, 0x67, 0x5a, 0x50, + 0xff, 0x58, 0xf2, 0xc2, 0x42, 0xe4, 0x71, 0x0a, + 0xec, 0xe6, 0x46, 0x70, 0x07, 0x9c, 0x13, 0x04, + 0x4c, 0x79, 0xc9, 0xb7, 0x49, 0x1f, 0x70, 0x00 }, + { 0xd1, 0x20, 0xb5, 0xef, 0x6d, 0x57, 0xeb, 0xf0, + 0x6e, 0xaf, 0x96, 0xbc, 0x93, 0x3c, 0x96, 0x7b, + 0x16, 0xcb, 0xe6, 0xe2, 0xbf, 0x00, 0x74, 0x1c, + 0x30, 0xaa, 0x1c, 0x54, 0xba, 0x64, 0x80, 0x1f }, + { 0x58, 0xd2, 0x12, 0xad, 0x6f, 0x58, 0xae, 0xf0, + 0xf8, 0x01, 0x16, 0xb4, 0x41, 0xe5, 0x7f, 0x61, + 0x95, 0xbf, 0xef, 0x26, 0xb6, 0x14, 0x63, 0xed, + 0xec, 0x11, 0x83, 0xcd, 0xb0, 0x4f, 0xe7, 0x6d }, + { 0xb8, 0x83, 0x6f, 0x51, 0xd1, 0xe2, 0x9b, 0xdf, + 0xdb, 0xa3, 0x25, 0x56, 0x53, 0x60, 0x26, 0x8b, + 0x8f, 0xad, 0x62, 0x74, 0x73, 0xed, 0xec, 0xef, + 0x7e, 0xae, 0xfe, 0xe8, 0x37, 0xc7, 0x40, 0x03 }, + { 0xc5, 0x47, 0xa3, 0xc1, 0x24, 0xae, 0x56, 0x85, + 0xff, 0xa7, 0xb8, 0xed, 0xaf, 0x96, 0xec, 0x86, + 0xf8, 0xb2, 0xd0, 0xd5, 0x0c, 0xee, 0x8b, 0xe3, + 0xb1, 0xf0, 0xc7, 0x67, 0x63, 0x06, 0x9d, 0x9c }, + { 0x5d, 0x16, 0x8b, 0x76, 0x9a, 0x2f, 0x67, 0x85, + 0x3d, 0x62, 0x95, 0xf7, 0x56, 0x8b, 0xe4, 0x0b, + 0xb7, 0xa1, 0x6b, 0x8d, 0x65, 0xba, 0x87, 0x63, + 0x5d, 0x19, 0x78, 0xd2, 0xab, 0x11, 0xba, 0x2a }, + { 0xa2, 0xf6, 0x75, 0xdc, 0x73, 0x02, 0x63, 0x8c, + 0xb6, 0x02, 0x01, 0x06, 0x4c, 0xa5, 0x50, 0x77, + 0x71, 0x4d, 0x71, 0xfe, 0x09, 0x6a, 0x31, 0x5f, + 0x2f, 0xe7, 0x40, 0x12, 0x77, 0xca, 0xa5, 0xaf }, + { 0xc8, 0xaa, 0xb5, 0xcd, 0x01, 0x60, 0xae, 0x78, + 0xcd, 0x2e, 0x8a, 0xc5, 0xfb, 0x0e, 0x09, 0x3c, + 0xdb, 0x5c, 0x4b, 0x60, 0x52, 0xa0, 0xa9, 0x7b, + 0xb0, 0x42, 0x16, 0x82, 0x6f, 0xa7, 0xa4, 0x37 }, + { 0xff, 0x68, 0xca, 0x40, 0x35, 0xbf, 0xeb, 0x43, + 0xfb, 0xf1, 0x45, 0xfd, 0xdd, 0x5e, 0x43, 0xf1, + 0xce, 0xa5, 0x4f, 0x11, 0xf7, 0xbe, 0xe1, 0x30, + 0x58, 0xf0, 0x27, 0x32, 0x9a, 0x4a, 0x5f, 0xa4 }, + { 0x1d, 0x4e, 0x54, 0x87, 0xae, 0x3c, 0x74, 0x0f, + 0x2b, 0xa6, 0xe5, 0x41, 0xac, 0x91, 0xbc, 0x2b, + 0xfc, 0xd2, 0x99, 0x9c, 0x51, 0x8d, 0x80, 0x7b, + 0x42, 0x67, 0x48, 0x80, 0x3a, 0x35, 0x0f, 0xd4 }, + { 0x6d, 0x24, 0x4e, 0x1a, 0x06, 0xce, 0x4e, 0xf5, + 0x78, 0xdd, 0x0f, 0x63, 0xaf, 0xf0, 0x93, 0x67, + 0x06, 0x73, 0x51, 0x19, 0xca, 0x9c, 0x8d, 0x22, + 0xd8, 0x6c, 0x80, 0x14, 0x14, 0xab, 0x97, 0x41 }, + { 0xde, 0xcf, 0x73, 0x29, 0xdb, 0xcc, 0x82, 0x7b, + 0x8f, 0xc5, 0x24, 0xc9, 0x43, 0x1e, 0x89, 0x98, + 0x02, 0x9e, 0xce, 0x12, 0xce, 0x93, 0xb7, 0xb2, + 0xf3, 0xe7, 0x69, 0xa9, 0x41, 0xfb, 0x8c, 0xea }, + { 0x2f, 0xaf, 0xcc, 0x0f, 0x2e, 0x63, 0xcb, 0xd0, + 0x77, 0x55, 0xbe, 0x7b, 0x75, 0xec, 0xea, 0x0a, + 0xdf, 0xf9, 0xaa, 0x5e, 0xde, 0x2a, 0x52, 0xfd, + 0xab, 0x4d, 0xfd, 0x03, 0x74, 0xcd, 0x48, 0x3f }, + { 0xaa, 0x85, 0x01, 0x0d, 0xd4, 0x6a, 0x54, 0x6b, + 0x53, 0x5e, 0xf4, 0xcf, 0x5f, 0x07, 0xd6, 0x51, + 0x61, 0xe8, 0x98, 0x28, 0xf3, 0xa7, 0x7d, 0xb7, + 0xb9, 0xb5, 0x6f, 0x0d, 0xf5, 0x9a, 0xae, 0x45 }, + { 0x07, 0xe8, 0xe1, 0xee, 0x73, 0x2c, 0xb0, 0xd3, + 0x56, 0xc9, 0xc0, 0xd1, 0x06, 0x9c, 0x89, 0xd1, + 0x7a, 0xdf, 0x6a, 0x9a, 0x33, 0x4f, 0x74, 0x5e, + 0xc7, 0x86, 0x73, 0x32, 0x54, 0x8c, 0xa8, 0xe9 }, + { 0x0e, 0x01, 0xe8, 0x1c, 0xad, 0xa8, 0x16, 0x2b, + 0xfd, 0x5f, 0x8a, 0x8c, 0x81, 0x8a, 0x6c, 0x69, + 0xfe, 0xdf, 0x02, 0xce, 0xb5, 0x20, 0x85, 0x23, + 0xcb, 0xe5, 0x31, 0x3b, 0x89, 0xca, 0x10, 0x53 }, + { 0x6b, 0xb6, 0xc6, 0x47, 0x26, 0x55, 0x08, 0x43, + 0x99, 0x85, 0x2e, 0x00, 0x24, 0x9f, 0x8c, 0xb2, + 0x47, 0x89, 0x6d, 0x39, 0x2b, 0x02, 0xd7, 0x3b, + 0x7f, 0x0d, 0xd8, 0x18, 0xe1, 0xe2, 0x9b, 0x07 }, + { 0x42, 0xd4, 0x63, 0x6e, 0x20, 0x60, 0xf0, 0x8f, + 0x41, 0xc8, 0x82, 0xe7, 0x6b, 0x39, 0x6b, 0x11, + 0x2e, 0xf6, 0x27, 0xcc, 0x24, 0xc4, 0x3d, 0xd5, + 0xf8, 0x3a, 0x1d, 0x1a, 0x7e, 0xad, 0x71, 0x1a }, + { 0x48, 0x58, 0xc9, 0xa1, 0x88, 0xb0, 0x23, 0x4f, + 0xb9, 0xa8, 0xd4, 0x7d, 0x0b, 0x41, 0x33, 0x65, + 0x0a, 0x03, 0x0b, 0xd0, 0x61, 0x1b, 0x87, 0xc3, + 0x89, 0x2e, 0x94, 0x95, 0x1f, 0x8d, 0xf8, 0x52 }, + { 0x3f, 0xab, 0x3e, 0x36, 0x98, 0x8d, 0x44, 0x5a, + 0x51, 0xc8, 0x78, 0x3e, 0x53, 0x1b, 0xe3, 0xa0, + 0x2b, 0xe4, 0x0c, 0xd0, 0x47, 0x96, 0xcf, 0xb6, + 0x1d, 0x40, 0x34, 0x74, 0x42, 0xd3, 0xf7, 0x94 }, + { 0xeb, 0xab, 0xc4, 0x96, 0x36, 0xbd, 0x43, 0x3d, + 0x2e, 0xc8, 0xf0, 0xe5, 0x18, 0x73, 0x2e, 0xf8, + 0xfa, 0x21, 0xd4, 0xd0, 0x71, 0xcc, 0x3b, 0xc4, + 0x6c, 0xd7, 0x9f, 0xa3, 0x8a, 0x28, 0xb8, 0x10 }, + { 0xa1, 0xd0, 0x34, 0x35, 0x23, 0xb8, 0x93, 0xfc, + 0xa8, 0x4f, 0x47, 0xfe, 0xb4, 0xa6, 0x4d, 0x35, + 0x0a, 0x17, 0xd8, 0xee, 0xf5, 0x49, 0x7e, 0xce, + 0x69, 0x7d, 0x02, 0xd7, 0x91, 0x78, 0xb5, 0x91 }, + { 0x26, 0x2e, 0xbf, 0xd9, 0x13, 0x0b, 0x7d, 0x28, + 0x76, 0x0d, 0x08, 0xef, 0x8b, 0xfd, 0x3b, 0x86, + 0xcd, 0xd3, 0xb2, 0x11, 0x3d, 0x2c, 0xae, 0xf7, + 0xea, 0x95, 0x1a, 0x30, 0x3d, 0xfa, 0x38, 0x46 }, + { 0xf7, 0x61, 0x58, 0xed, 0xd5, 0x0a, 0x15, 0x4f, + 0xa7, 0x82, 0x03, 0xed, 0x23, 0x62, 0x93, 0x2f, + 0xcb, 0x82, 0x53, 0xaa, 0xe3, 0x78, 0x90, 0x3e, + 0xde, 0xd1, 0xe0, 0x3f, 0x70, 0x21, 0xa2, 0x57 }, + { 0x26, 0x17, 0x8e, 0x95, 0x0a, 0xc7, 0x22, 0xf6, + 0x7a, 0xe5, 0x6e, 0x57, 0x1b, 0x28, 0x4c, 0x02, + 0x07, 0x68, 0x4a, 0x63, 0x34, 0xa1, 0x77, 0x48, + 0xa9, 0x4d, 0x26, 0x0b, 0xc5, 0xf5, 0x52, 0x74 }, + { 0xc3, 0x78, 0xd1, 0xe4, 0x93, 0xb4, 0x0e, 0xf1, + 0x1f, 0xe6, 0xa1, 0x5d, 0x9c, 0x27, 0x37, 0xa3, + 0x78, 0x09, 0x63, 0x4c, 0x5a, 0xba, 0xd5, 0xb3, + 0x3d, 0x7e, 0x39, 0x3b, 0x4a, 0xe0, 0x5d, 0x03 }, + { 0x98, 0x4b, 0xd8, 0x37, 0x91, 0x01, 0xbe, 0x8f, + 0xd8, 0x06, 0x12, 0xd8, 0xea, 0x29, 0x59, 0xa7, + 0x86, 0x5e, 0xc9, 0x71, 0x85, 0x23, 0x55, 0x01, + 0x07, 0xae, 0x39, 0x38, 0xdf, 0x32, 0x01, 0x1b }, + { 0xc6, 0xf2, 0x5a, 0x81, 0x2a, 0x14, 0x48, 0x58, + 0xac, 0x5c, 0xed, 0x37, 0xa9, 0x3a, 0x9f, 0x47, + 0x59, 0xba, 0x0b, 0x1c, 0x0f, 0xdc, 0x43, 0x1d, + 0xce, 0x35, 0xf9, 0xec, 0x1f, 0x1f, 0x4a, 0x99 }, + { 0x92, 0x4c, 0x75, 0xc9, 0x44, 0x24, 0xff, 0x75, + 0xe7, 0x4b, 0x8b, 0x4e, 0x94, 0x35, 0x89, 0x58, + 0xb0, 0x27, 0xb1, 0x71, 0xdf, 0x5e, 0x57, 0x89, + 0x9a, 0xd0, 0xd4, 0xda, 0xc3, 0x73, 0x53, 0xb6 }, + { 0x0a, 0xf3, 0x58, 0x92, 0xa6, 0x3f, 0x45, 0x93, + 0x1f, 0x68, 0x46, 0xed, 0x19, 0x03, 0x61, 0xcd, + 0x07, 0x30, 0x89, 0xe0, 0x77, 0x16, 0x57, 0x14, + 0xb5, 0x0b, 0x81, 0xa2, 0xe3, 0xdd, 0x9b, 0xa1 }, + { 0xcc, 0x80, 0xce, 0xfb, 0x26, 0xc3, 0xb2, 0xb0, + 0xda, 0xef, 0x23, 0x3e, 0x60, 0x6d, 0x5f, 0xfc, + 0x80, 0xfa, 0x17, 0x42, 0x7d, 0x18, 0xe3, 0x04, + 0x89, 0x67, 0x3e, 0x06, 0xef, 0x4b, 0x87, 0xf7 }, + { 0xc2, 0xf8, 0xc8, 0x11, 0x74, 0x47, 0xf3, 0x97, + 0x8b, 0x08, 0x18, 0xdc, 0xf6, 0xf7, 0x01, 0x16, + 0xac, 0x56, 0xfd, 0x18, 0x4d, 0xd1, 0x27, 0x84, + 0x94, 0xe1, 0x03, 0xfc, 0x6d, 0x74, 0xa8, 0x87 }, + { 0xbd, 0xec, 0xf6, 0xbf, 0xc1, 0xba, 0x0d, 0xf6, + 0xe8, 0x62, 0xc8, 0x31, 0x99, 0x22, 0x07, 0x79, + 0x6a, 0xcc, 0x79, 0x79, 0x68, 0x35, 0x88, 0x28, + 0xc0, 0x6e, 0x7a, 0x51, 0xe0, 0x90, 0x09, 0x8f }, + { 0x24, 0xd1, 0xa2, 0x6e, 0x3d, 0xab, 0x02, 0xfe, + 0x45, 0x72, 0xd2, 0xaa, 0x7d, 0xbd, 0x3e, 0xc3, + 0x0f, 0x06, 0x93, 0xdb, 0x26, 0xf2, 0x73, 0xd0, + 0xab, 0x2c, 0xb0, 0xc1, 0x3b, 0x5e, 0x64, 0x51 }, + { 0xec, 0x56, 0xf5, 0x8b, 0x09, 0x29, 0x9a, 0x30, + 0x0b, 0x14, 0x05, 0x65, 0xd7, 0xd3, 0xe6, 0x87, + 0x82, 0xb6, 0xe2, 0xfb, 0xeb, 0x4b, 0x7e, 0xa9, + 0x7a, 0xc0, 0x57, 0x98, 0x90, 0x61, 0xdd, 0x3f }, + { 0x11, 0xa4, 0x37, 0xc1, 0xab, 0xa3, 0xc1, 0x19, + 0xdd, 0xfa, 0xb3, 0x1b, 0x3e, 0x8c, 0x84, 0x1d, + 0xee, 0xeb, 0x91, 0x3e, 0xf5, 0x7f, 0x7e, 0x48, + 0xf2, 0xc9, 0xcf, 0x5a, 0x28, 0xfa, 0x42, 0xbc }, + { 0x53, 0xc7, 0xe6, 0x11, 0x4b, 0x85, 0x0a, 0x2c, + 0xb4, 0x96, 0xc9, 0xb3, 0xc6, 0x9a, 0x62, 0x3e, + 0xae, 0xa2, 0xcb, 0x1d, 0x33, 0xdd, 0x81, 0x7e, + 0x47, 0x65, 0xed, 0xaa, 0x68, 0x23, 0xc2, 0x28 }, + { 0x15, 0x4c, 0x3e, 0x96, 0xfe, 0xe5, 0xdb, 0x14, + 0xf8, 0x77, 0x3e, 0x18, 0xaf, 0x14, 0x85, 0x79, + 0x13, 0x50, 0x9d, 0xa9, 0x99, 0xb4, 0x6c, 0xdd, + 0x3d, 0x4c, 0x16, 0x97, 0x60, 0xc8, 0x3a, 0xd2 }, + { 0x40, 0xb9, 0x91, 0x6f, 0x09, 0x3e, 0x02, 0x7a, + 0x87, 0x86, 0x64, 0x18, 0x18, 0x92, 0x06, 0x20, + 0x47, 0x2f, 0xbc, 0xf6, 0x8f, 0x70, 0x1d, 0x1b, + 0x68, 0x06, 0x32, 0xe6, 0x99, 0x6b, 0xde, 0xd3 }, + { 0x24, 0xc4, 0xcb, 0xba, 0x07, 0x11, 0x98, 0x31, + 0xa7, 0x26, 0xb0, 0x53, 0x05, 0xd9, 0x6d, 0xa0, + 0x2f, 0xf8, 0xb1, 0x48, 0xf0, 0xda, 0x44, 0x0f, + 0xe2, 0x33, 0xbc, 0xaa, 0x32, 0xc7, 0x2f, 0x6f }, + { 0x5d, 0x20, 0x15, 0x10, 0x25, 0x00, 0x20, 0xb7, + 0x83, 0x68, 0x96, 0x88, 0xab, 0xbf, 0x8e, 0xcf, + 0x25, 0x94, 0xa9, 0x6a, 0x08, 0xf2, 0xbf, 0xec, + 0x6c, 0xe0, 0x57, 0x44, 0x65, 0xdd, 0xed, 0x71 }, + { 0x04, 0x3b, 0x97, 0xe3, 0x36, 0xee, 0x6f, 0xdb, + 0xbe, 0x2b, 0x50, 0xf2, 0x2a, 0xf8, 0x32, 0x75, + 0xa4, 0x08, 0x48, 0x05, 0xd2, 0xd5, 0x64, 0x59, + 0x62, 0x45, 0x4b, 0x6c, 0x9b, 0x80, 0x53, 0xa0 }, + { 0x56, 0x48, 0x35, 0xcb, 0xae, 0xa7, 0x74, 0x94, + 0x85, 0x68, 0xbe, 0x36, 0xcf, 0x52, 0xfc, 0xdd, + 0x83, 0x93, 0x4e, 0xb0, 0xa2, 0x75, 0x12, 0xdb, + 0xe3, 0xe2, 0xdb, 0x47, 0xb9, 0xe6, 0x63, 0x5a }, + { 0xf2, 0x1c, 0x33, 0xf4, 0x7b, 0xde, 0x40, 0xa2, + 0xa1, 0x01, 0xc9, 0xcd, 0xe8, 0x02, 0x7a, 0xaf, + 0x61, 0xa3, 0x13, 0x7d, 0xe2, 0x42, 0x2b, 0x30, + 0x03, 0x5a, 0x04, 0xc2, 0x70, 0x89, 0x41, 0x83 }, + { 0x9d, 0xb0, 0xef, 0x74, 0xe6, 0x6c, 0xbb, 0x84, + 0x2e, 0xb0, 0xe0, 0x73, 0x43, 0xa0, 0x3c, 0x5c, + 0x56, 0x7e, 0x37, 0x2b, 0x3f, 0x23, 0xb9, 0x43, + 0xc7, 0x88, 0xa4, 0xf2, 0x50, 0xf6, 0x78, 0x91 }, + { 0xab, 0x8d, 0x08, 0x65, 0x5f, 0xf1, 0xd3, 0xfe, + 0x87, 0x58, 0xd5, 0x62, 0x23, 0x5f, 0xd2, 0x3e, + 0x7c, 0xf9, 0xdc, 0xaa, 0xd6, 0x58, 0x87, 0x2a, + 0x49, 0xe5, 0xd3, 0x18, 0x3b, 0x6c, 0xce, 0xbd }, + { 0x6f, 0x27, 0xf7, 0x7e, 0x7b, 0xcf, 0x46, 0xa1, + 0xe9, 0x63, 0xad, 0xe0, 0x30, 0x97, 0x33, 0x54, + 0x30, 0x31, 0xdc, 0xcd, 0xd4, 0x7c, 0xaa, 0xc1, + 0x74, 0xd7, 0xd2, 0x7c, 0xe8, 0x07, 0x7e, 0x8b }, + { 0xe3, 0xcd, 0x54, 0xda, 0x7e, 0x44, 0x4c, 0xaa, + 0x62, 0x07, 0x56, 0x95, 0x25, 0xa6, 0x70, 0xeb, + 0xae, 0x12, 0x78, 0xde, 0x4e, 0x3f, 0xe2, 0x68, + 0x4b, 0x3e, 0x33, 0xf5, 0xef, 0x90, 0xcc, 0x1b }, + { 0xb2, 0xc3, 0xe3, 0x3a, 0x51, 0xd2, 0x2c, 0x4c, + 0x08, 0xfc, 0x09, 0x89, 0xc8, 0x73, 0xc9, 0xcc, + 0x41, 0x50, 0x57, 0x9b, 0x1e, 0x61, 0x63, 0xfa, + 0x69, 0x4a, 0xd5, 0x1d, 0x53, 0xd7, 0x12, 0xdc }, + { 0xbe, 0x7f, 0xda, 0x98, 0x3e, 0x13, 0x18, 0x9b, + 0x4c, 0x77, 0xe0, 0xa8, 0x09, 0x20, 0xb6, 0xe0, + 0xe0, 0xea, 0x80, 0xc3, 0xb8, 0x4d, 0xbe, 0x7e, + 0x71, 0x17, 0xd2, 0x53, 0xf4, 0x81, 0x12, 0xf4 }, + { 0xb6, 0x00, 0x8c, 0x28, 0xfa, 0xe0, 0x8a, 0xa4, + 0x27, 0xe5, 0xbd, 0x3a, 0xad, 0x36, 0xf1, 0x00, + 0x21, 0xf1, 0x6c, 0x77, 0xcf, 0xea, 0xbe, 0xd0, + 0x7f, 0x97, 0xcc, 0x7d, 0xc1, 0xf1, 0x28, 0x4a }, + { 0x6e, 0x4e, 0x67, 0x60, 0xc5, 0x38, 0xf2, 0xe9, + 0x7b, 0x3a, 0xdb, 0xfb, 0xbc, 0xde, 0x57, 0xf8, + 0x96, 0x6b, 0x7e, 0xa8, 0xfc, 0xb5, 0xbf, 0x7e, + 0xfe, 0xc9, 0x13, 0xfd, 0x2a, 0x2b, 0x0c, 0x55 }, + { 0x4a, 0xe5, 0x1f, 0xd1, 0x83, 0x4a, 0xa5, 0xbd, + 0x9a, 0x6f, 0x7e, 0xc3, 0x9f, 0xc6, 0x63, 0x33, + 0x8d, 0xc5, 0xd2, 0xe2, 0x07, 0x61, 0x56, 0x6d, + 0x90, 0xcc, 0x68, 0xb1, 0xcb, 0x87, 0x5e, 0xd8 }, + { 0xb6, 0x73, 0xaa, 0xd7, 0x5a, 0xb1, 0xfd, 0xb5, + 0x40, 0x1a, 0xbf, 0xa1, 0xbf, 0x89, 0xf3, 0xad, + 0xd2, 0xeb, 0xc4, 0x68, 0xdf, 0x36, 0x24, 0xa4, + 0x78, 0xf4, 0xfe, 0x85, 0x9d, 0x8d, 0x55, 0xe2 }, + { 0x13, 0xc9, 0x47, 0x1a, 0x98, 0x55, 0x91, 0x35, + 0x39, 0x83, 0x66, 0x60, 0x39, 0x8d, 0xa0, 0xf3, + 0xf9, 0x9a, 0xda, 0x08, 0x47, 0x9c, 0x69, 0xd1, + 0xb7, 0xfc, 0xaa, 0x34, 0x61, 0xdd, 0x7e, 0x59 }, + { 0x2c, 0x11, 0xf4, 0xa7, 0xf9, 0x9a, 0x1d, 0x23, + 0xa5, 0x8b, 0xb6, 0x36, 0x35, 0x0f, 0xe8, 0x49, + 0xf2, 0x9c, 0xba, 0xc1, 0xb2, 0xa1, 0x11, 0x2d, + 0x9f, 0x1e, 0xd5, 0xbc, 0x5b, 0x31, 0x3c, 0xcd }, + { 0xc7, 0xd3, 0xc0, 0x70, 0x6b, 0x11, 0xae, 0x74, + 0x1c, 0x05, 0xa1, 0xef, 0x15, 0x0d, 0xd6, 0x5b, + 0x54, 0x94, 0xd6, 0xd5, 0x4c, 0x9a, 0x86, 0xe2, + 0x61, 0x78, 0x54, 0xe6, 0xae, 0xee, 0xbb, 0xd9 }, + { 0x19, 0x4e, 0x10, 0xc9, 0x38, 0x93, 0xaf, 0xa0, + 0x64, 0xc3, 0xac, 0x04, 0xc0, 0xdd, 0x80, 0x8d, + 0x79, 0x1c, 0x3d, 0x4b, 0x75, 0x56, 0xe8, 0x9d, + 0x8d, 0x9c, 0xb2, 0x25, 0xc4, 0xb3, 0x33, 0x39 }, + { 0x6f, 0xc4, 0x98, 0x8b, 0x8f, 0x78, 0x54, 0x6b, + 0x16, 0x88, 0x99, 0x18, 0x45, 0x90, 0x8f, 0x13, + 0x4b, 0x6a, 0x48, 0x2e, 0x69, 0x94, 0xb3, 0xd4, + 0x83, 0x17, 0xbf, 0x08, 0xdb, 0x29, 0x21, 0x85 }, + { 0x56, 0x65, 0xbe, 0xb8, 0xb0, 0x95, 0x55, 0x25, + 0x81, 0x3b, 0x59, 0x81, 0xcd, 0x14, 0x2e, 0xd4, + 0xd0, 0x3f, 0xba, 0x38, 0xa6, 0xf3, 0xe5, 0xad, + 0x26, 0x8e, 0x0c, 0xc2, 0x70, 0xd1, 0xcd, 0x11 }, + { 0xb8, 0x83, 0xd6, 0x8f, 0x5f, 0xe5, 0x19, 0x36, + 0x43, 0x1b, 0xa4, 0x25, 0x67, 0x38, 0x05, 0x3b, + 0x1d, 0x04, 0x26, 0xd4, 0xcb, 0x64, 0xb1, 0x6e, + 0x83, 0xba, 0xdc, 0x5e, 0x9f, 0xbe, 0x3b, 0x81 }, + { 0x53, 0xe7, 0xb2, 0x7e, 0xa5, 0x9c, 0x2f, 0x6d, + 0xbb, 0x50, 0x76, 0x9e, 0x43, 0x55, 0x4d, 0xf3, + 0x5a, 0xf8, 0x9f, 0x48, 0x22, 0xd0, 0x46, 0x6b, + 0x00, 0x7d, 0xd6, 0xf6, 0xde, 0xaf, 0xff, 0x02 }, + { 0x1f, 0x1a, 0x02, 0x29, 0xd4, 0x64, 0x0f, 0x01, + 0x90, 0x15, 0x88, 0xd9, 0xde, 0xc2, 0x2d, 0x13, + 0xfc, 0x3e, 0xb3, 0x4a, 0x61, 0xb3, 0x29, 0x38, + 0xef, 0xbf, 0x53, 0x34, 0xb2, 0x80, 0x0a, 0xfa }, + { 0xc2, 0xb4, 0x05, 0xaf, 0xa0, 0xfa, 0x66, 0x68, + 0x85, 0x2a, 0xee, 0x4d, 0x88, 0x04, 0x08, 0x53, + 0xfa, 0xb8, 0x00, 0xe7, 0x2b, 0x57, 0x58, 0x14, + 0x18, 0xe5, 0x50, 0x6f, 0x21, 0x4c, 0x7d, 0x1f }, + { 0xc0, 0x8a, 0xa1, 0xc2, 0x86, 0xd7, 0x09, 0xfd, + 0xc7, 0x47, 0x37, 0x44, 0x97, 0x71, 0x88, 0xc8, + 0x95, 0xba, 0x01, 0x10, 0x14, 0x24, 0x7e, 0x4e, + 0xfa, 0x8d, 0x07, 0xe7, 0x8f, 0xec, 0x69, 0x5c }, + { 0xf0, 0x3f, 0x57, 0x89, 0xd3, 0x33, 0x6b, 0x80, + 0xd0, 0x02, 0xd5, 0x9f, 0xdf, 0x91, 0x8b, 0xdb, + 0x77, 0x5b, 0x00, 0x95, 0x6e, 0xd5, 0x52, 0x8e, + 0x86, 0xaa, 0x99, 0x4a, 0xcb, 0x38, 0xfe, 0x2d } +}; + +static const u8 blake2s_keyed_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { + { 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b, + 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b, + 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a, + 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49 }, + { 0x40, 0xd1, 0x5f, 0xee, 0x7c, 0x32, 0x88, 0x30, + 0x16, 0x6a, 0xc3, 0xf9, 0x18, 0x65, 0x0f, 0x80, + 0x7e, 0x7e, 0x01, 0xe1, 0x77, 0x25, 0x8c, 0xdc, + 0x0a, 0x39, 0xb1, 0x1f, 0x59, 0x80, 0x66, 0xf1 }, + { 0x6b, 0xb7, 0x13, 0x00, 0x64, 0x4c, 0xd3, 0x99, + 0x1b, 0x26, 0xcc, 0xd4, 0xd2, 0x74, 0xac, 0xd1, + 0xad, 0xea, 0xb8, 0xb1, 0xd7, 0x91, 0x45, 0x46, + 0xc1, 0x19, 0x8b, 0xbe, 0x9f, 0xc9, 0xd8, 0x03 }, + { 0x1d, 0x22, 0x0d, 0xbe, 0x2e, 0xe1, 0x34, 0x66, + 0x1f, 0xdf, 0x6d, 0x9e, 0x74, 0xb4, 0x17, 0x04, + 0x71, 0x05, 0x56, 0xf2, 0xf6, 0xe5, 0xa0, 0x91, + 0xb2, 0x27, 0x69, 0x74, 0x45, 0xdb, 0xea, 0x6b }, + { 0xf6, 0xc3, 0xfb, 0xad, 0xb4, 0xcc, 0x68, 0x7a, + 0x00, 0x64, 0xa5, 0xbe, 0x6e, 0x79, 0x1b, 0xec, + 0x63, 0xb8, 0x68, 0xad, 0x62, 0xfb, 0xa6, 0x1b, + 0x37, 0x57, 0xef, 0x9c, 0xa5, 0x2e, 0x05, 0xb2 }, + { 0x49, 0xc1, 0xf2, 0x11, 0x88, 0xdf, 0xd7, 0x69, + 0xae, 0xa0, 0xe9, 0x11, 0xdd, 0x6b, 0x41, 0xf1, + 0x4d, 0xab, 0x10, 0x9d, 0x2b, 0x85, 0x97, 0x7a, + 0xa3, 0x08, 0x8b, 0x5c, 0x70, 0x7e, 0x85, 0x98 }, + { 0xfd, 0xd8, 0x99, 0x3d, 0xcd, 0x43, 0xf6, 0x96, + 0xd4, 0x4f, 0x3c, 0xea, 0x0f, 0xf3, 0x53, 0x45, + 0x23, 0x4e, 0xc8, 0xee, 0x08, 0x3e, 0xb3, 0xca, + 0xda, 0x01, 0x7c, 0x7f, 0x78, 0xc1, 0x71, 0x43 }, + { 0xe6, 0xc8, 0x12, 0x56, 0x37, 0x43, 0x8d, 0x09, + 0x05, 0xb7, 0x49, 0xf4, 0x65, 0x60, 0xac, 0x89, + 0xfd, 0x47, 0x1c, 0xf8, 0x69, 0x2e, 0x28, 0xfa, + 0xb9, 0x82, 0xf7, 0x3f, 0x01, 0x9b, 0x83, 0xa9 }, + { 0x19, 0xfc, 0x8c, 0xa6, 0x97, 0x9d, 0x60, 0xe6, + 0xed, 0xd3, 0xb4, 0x54, 0x1e, 0x2f, 0x96, 0x7c, + 0xed, 0x74, 0x0d, 0xf6, 0xec, 0x1e, 0xae, 0xbb, + 0xfe, 0x81, 0x38, 0x32, 0xe9, 0x6b, 0x29, 0x74 }, + { 0xa6, 0xad, 0x77, 0x7c, 0xe8, 0x81, 0xb5, 0x2b, + 0xb5, 0xa4, 0x42, 0x1a, 0xb6, 0xcd, 0xd2, 0xdf, + 0xba, 0x13, 0xe9, 0x63, 0x65, 0x2d, 0x4d, 0x6d, + 0x12, 0x2a, 0xee, 0x46, 0x54, 0x8c, 0x14, 0xa7 }, + { 0xf5, 0xc4, 0xb2, 0xba, 0x1a, 0x00, 0x78, 0x1b, + 0x13, 0xab, 0xa0, 0x42, 0x52, 0x42, 0xc6, 0x9c, + 0xb1, 0x55, 0x2f, 0x3f, 0x71, 0xa9, 0xa3, 0xbb, + 0x22, 0xb4, 0xa6, 0xb4, 0x27, 0x7b, 0x46, 0xdd }, + { 0xe3, 0x3c, 0x4c, 0x9b, 0xd0, 0xcc, 0x7e, 0x45, + 0xc8, 0x0e, 0x65, 0xc7, 0x7f, 0xa5, 0x99, 0x7f, + 0xec, 0x70, 0x02, 0x73, 0x85, 0x41, 0x50, 0x9e, + 0x68, 0xa9, 0x42, 0x38, 0x91, 0xe8, 0x22, 0xa3 }, + { 0xfb, 0xa1, 0x61, 0x69, 0xb2, 0xc3, 0xee, 0x10, + 0x5b, 0xe6, 0xe1, 0xe6, 0x50, 0xe5, 0xcb, 0xf4, + 0x07, 0x46, 0xb6, 0x75, 0x3d, 0x03, 0x6a, 0xb5, + 0x51, 0x79, 0x01, 0x4a, 0xd7, 0xef, 0x66, 0x51 }, + { 0xf5, 0xc4, 0xbe, 0xc6, 0xd6, 0x2f, 0xc6, 0x08, + 0xbf, 0x41, 0xcc, 0x11, 0x5f, 0x16, 0xd6, 0x1c, + 0x7e, 0xfd, 0x3f, 0xf6, 0xc6, 0x56, 0x92, 0xbb, + 0xe0, 0xaf, 0xff, 0xb1, 0xfe, 0xde, 0x74, 0x75 }, + { 0xa4, 0x86, 0x2e, 0x76, 0xdb, 0x84, 0x7f, 0x05, + 0xba, 0x17, 0xed, 0xe5, 0xda, 0x4e, 0x7f, 0x91, + 0xb5, 0x92, 0x5c, 0xf1, 0xad, 0x4b, 0xa1, 0x27, + 0x32, 0xc3, 0x99, 0x57, 0x42, 0xa5, 0xcd, 0x6e }, + { 0x65, 0xf4, 0xb8, 0x60, 0xcd, 0x15, 0xb3, 0x8e, + 0xf8, 0x14, 0xa1, 0xa8, 0x04, 0x31, 0x4a, 0x55, + 0xbe, 0x95, 0x3c, 0xaa, 0x65, 0xfd, 0x75, 0x8a, + 0xd9, 0x89, 0xff, 0x34, 0xa4, 0x1c, 0x1e, 0xea }, + { 0x19, 0xba, 0x23, 0x4f, 0x0a, 0x4f, 0x38, 0x63, + 0x7d, 0x18, 0x39, 0xf9, 0xd9, 0xf7, 0x6a, 0xd9, + 0x1c, 0x85, 0x22, 0x30, 0x71, 0x43, 0xc9, 0x7d, + 0x5f, 0x93, 0xf6, 0x92, 0x74, 0xce, 0xc9, 0xa7 }, + { 0x1a, 0x67, 0x18, 0x6c, 0xa4, 0xa5, 0xcb, 0x8e, + 0x65, 0xfc, 0xa0, 0xe2, 0xec, 0xbc, 0x5d, 0xdc, + 0x14, 0xae, 0x38, 0x1b, 0xb8, 0xbf, 0xfe, 0xb9, + 0xe0, 0xa1, 0x03, 0x44, 0x9e, 0x3e, 0xf0, 0x3c }, + { 0xaf, 0xbe, 0xa3, 0x17, 0xb5, 0xa2, 0xe8, 0x9c, + 0x0b, 0xd9, 0x0c, 0xcf, 0x5d, 0x7f, 0xd0, 0xed, + 0x57, 0xfe, 0x58, 0x5e, 0x4b, 0xe3, 0x27, 0x1b, + 0x0a, 0x6b, 0xf0, 0xf5, 0x78, 0x6b, 0x0f, 0x26 }, + { 0xf1, 0xb0, 0x15, 0x58, 0xce, 0x54, 0x12, 0x62, + 0xf5, 0xec, 0x34, 0x29, 0x9d, 0x6f, 0xb4, 0x09, + 0x00, 0x09, 0xe3, 0x43, 0x4b, 0xe2, 0xf4, 0x91, + 0x05, 0xcf, 0x46, 0xaf, 0x4d, 0x2d, 0x41, 0x24 }, + { 0x13, 0xa0, 0xa0, 0xc8, 0x63, 0x35, 0x63, 0x5e, + 0xaa, 0x74, 0xca, 0x2d, 0x5d, 0x48, 0x8c, 0x79, + 0x7b, 0xbb, 0x4f, 0x47, 0xdc, 0x07, 0x10, 0x50, + 0x15, 0xed, 0x6a, 0x1f, 0x33, 0x09, 0xef, 0xce }, + { 0x15, 0x80, 0xaf, 0xee, 0xbe, 0xbb, 0x34, 0x6f, + 0x94, 0xd5, 0x9f, 0xe6, 0x2d, 0xa0, 0xb7, 0x92, + 0x37, 0xea, 0xd7, 0xb1, 0x49, 0x1f, 0x56, 0x67, + 0xa9, 0x0e, 0x45, 0xed, 0xf6, 0xca, 0x8b, 0x03 }, + { 0x20, 0xbe, 0x1a, 0x87, 0x5b, 0x38, 0xc5, 0x73, + 0xdd, 0x7f, 0xaa, 0xa0, 0xde, 0x48, 0x9d, 0x65, + 0x5c, 0x11, 0xef, 0xb6, 0xa5, 0x52, 0x69, 0x8e, + 0x07, 0xa2, 0xd3, 0x31, 0xb5, 0xf6, 0x55, 0xc3 }, + { 0xbe, 0x1f, 0xe3, 0xc4, 0xc0, 0x40, 0x18, 0xc5, + 0x4c, 0x4a, 0x0f, 0x6b, 0x9a, 0x2e, 0xd3, 0xc5, + 0x3a, 0xbe, 0x3a, 0x9f, 0x76, 0xb4, 0xd2, 0x6d, + 0xe5, 0x6f, 0xc9, 0xae, 0x95, 0x05, 0x9a, 0x99 }, + { 0xe3, 0xe3, 0xac, 0xe5, 0x37, 0xeb, 0x3e, 0xdd, + 0x84, 0x63, 0xd9, 0xad, 0x35, 0x82, 0xe1, 0x3c, + 0xf8, 0x65, 0x33, 0xff, 0xde, 0x43, 0xd6, 0x68, + 0xdd, 0x2e, 0x93, 0xbb, 0xdb, 0xd7, 0x19, 0x5a }, + { 0x11, 0x0c, 0x50, 0xc0, 0xbf, 0x2c, 0x6e, 0x7a, + 0xeb, 0x7e, 0x43, 0x5d, 0x92, 0xd1, 0x32, 0xab, + 0x66, 0x55, 0x16, 0x8e, 0x78, 0xa2, 0xde, 0xcd, + 0xec, 0x33, 0x30, 0x77, 0x76, 0x84, 0xd9, 0xc1 }, + { 0xe9, 0xba, 0x8f, 0x50, 0x5c, 0x9c, 0x80, 0xc0, + 0x86, 0x66, 0xa7, 0x01, 0xf3, 0x36, 0x7e, 0x6c, + 0xc6, 0x65, 0xf3, 0x4b, 0x22, 0xe7, 0x3c, 0x3c, + 0x04, 0x17, 0xeb, 0x1c, 0x22, 0x06, 0x08, 0x2f }, + { 0x26, 0xcd, 0x66, 0xfc, 0xa0, 0x23, 0x79, 0xc7, + 0x6d, 0xf1, 0x23, 0x17, 0x05, 0x2b, 0xca, 0xfd, + 0x6c, 0xd8, 0xc3, 0xa7, 0xb8, 0x90, 0xd8, 0x05, + 0xf3, 0x6c, 0x49, 0x98, 0x97, 0x82, 0x43, 0x3a }, + { 0x21, 0x3f, 0x35, 0x96, 0xd6, 0xe3, 0xa5, 0xd0, + 0xe9, 0x93, 0x2c, 0xd2, 0x15, 0x91, 0x46, 0x01, + 0x5e, 0x2a, 0xbc, 0x94, 0x9f, 0x47, 0x29, 0xee, + 0x26, 0x32, 0xfe, 0x1e, 0xdb, 0x78, 0xd3, 0x37 }, + { 0x10, 0x15, 0xd7, 0x01, 0x08, 0xe0, 0x3b, 0xe1, + 0xc7, 0x02, 0xfe, 0x97, 0x25, 0x36, 0x07, 0xd1, + 0x4a, 0xee, 0x59, 0x1f, 0x24, 0x13, 0xea, 0x67, + 0x87, 0x42, 0x7b, 0x64, 0x59, 0xff, 0x21, 0x9a }, + { 0x3c, 0xa9, 0x89, 0xde, 0x10, 0xcf, 0xe6, 0x09, + 0x90, 0x94, 0x72, 0xc8, 0xd3, 0x56, 0x10, 0x80, + 0x5b, 0x2f, 0x97, 0x77, 0x34, 0xcf, 0x65, 0x2c, + 0xc6, 0x4b, 0x3b, 0xfc, 0x88, 0x2d, 0x5d, 0x89 }, + { 0xb6, 0x15, 0x6f, 0x72, 0xd3, 0x80, 0xee, 0x9e, + 0xa6, 0xac, 0xd1, 0x90, 0x46, 0x4f, 0x23, 0x07, + 0xa5, 0xc1, 0x79, 0xef, 0x01, 0xfd, 0x71, 0xf9, + 0x9f, 0x2d, 0x0f, 0x7a, 0x57, 0x36, 0x0a, 0xea }, + { 0xc0, 0x3b, 0xc6, 0x42, 0xb2, 0x09, 0x59, 0xcb, + 0xe1, 0x33, 0xa0, 0x30, 0x3e, 0x0c, 0x1a, 0xbf, + 0xf3, 0xe3, 0x1e, 0xc8, 0xe1, 0xa3, 0x28, 0xec, + 0x85, 0x65, 0xc3, 0x6d, 0xec, 0xff, 0x52, 0x65 }, + { 0x2c, 0x3e, 0x08, 0x17, 0x6f, 0x76, 0x0c, 0x62, + 0x64, 0xc3, 0xa2, 0xcd, 0x66, 0xfe, 0xc6, 0xc3, + 0xd7, 0x8d, 0xe4, 0x3f, 0xc1, 0x92, 0x45, 0x7b, + 0x2a, 0x4a, 0x66, 0x0a, 0x1e, 0x0e, 0xb2, 0x2b }, + { 0xf7, 0x38, 0xc0, 0x2f, 0x3c, 0x1b, 0x19, 0x0c, + 0x51, 0x2b, 0x1a, 0x32, 0xde, 0xab, 0xf3, 0x53, + 0x72, 0x8e, 0x0e, 0x9a, 0xb0, 0x34, 0x49, 0x0e, + 0x3c, 0x34, 0x09, 0x94, 0x6a, 0x97, 0xae, 0xec }, + { 0x8b, 0x18, 0x80, 0xdf, 0x30, 0x1c, 0xc9, 0x63, + 0x41, 0x88, 0x11, 0x08, 0x89, 0x64, 0x83, 0x92, + 0x87, 0xff, 0x7f, 0xe3, 0x1c, 0x49, 0xea, 0x6e, + 0xbd, 0x9e, 0x48, 0xbd, 0xee, 0xe4, 0x97, 0xc5 }, + { 0x1e, 0x75, 0xcb, 0x21, 0xc6, 0x09, 0x89, 0x02, + 0x03, 0x75, 0xf1, 0xa7, 0xa2, 0x42, 0x83, 0x9f, + 0x0b, 0x0b, 0x68, 0x97, 0x3a, 0x4c, 0x2a, 0x05, + 0xcf, 0x75, 0x55, 0xed, 0x5a, 0xae, 0xc4, 0xc1 }, + { 0x62, 0xbf, 0x8a, 0x9c, 0x32, 0xa5, 0xbc, 0xcf, + 0x29, 0x0b, 0x6c, 0x47, 0x4d, 0x75, 0xb2, 0xa2, + 0xa4, 0x09, 0x3f, 0x1a, 0x9e, 0x27, 0x13, 0x94, + 0x33, 0xa8, 0xf2, 0xb3, 0xbc, 0xe7, 0xb8, 0xd7 }, + { 0x16, 0x6c, 0x83, 0x50, 0xd3, 0x17, 0x3b, 0x5e, + 0x70, 0x2b, 0x78, 0x3d, 0xfd, 0x33, 0xc6, 0x6e, + 0xe0, 0x43, 0x27, 0x42, 0xe9, 0xb9, 0x2b, 0x99, + 0x7f, 0xd2, 0x3c, 0x60, 0xdc, 0x67, 0x56, 0xca }, + { 0x04, 0x4a, 0x14, 0xd8, 0x22, 0xa9, 0x0c, 0xac, + 0xf2, 0xf5, 0xa1, 0x01, 0x42, 0x8a, 0xdc, 0x8f, + 0x41, 0x09, 0x38, 0x6c, 0xcb, 0x15, 0x8b, 0xf9, + 0x05, 0xc8, 0x61, 0x8b, 0x8e, 0xe2, 0x4e, 0xc3 }, + { 0x38, 0x7d, 0x39, 0x7e, 0xa4, 0x3a, 0x99, 0x4b, + 0xe8, 0x4d, 0x2d, 0x54, 0x4a, 0xfb, 0xe4, 0x81, + 0xa2, 0x00, 0x0f, 0x55, 0x25, 0x26, 0x96, 0xbb, + 0xa2, 0xc5, 0x0c, 0x8e, 0xbd, 0x10, 0x13, 0x47 }, + { 0x56, 0xf8, 0xcc, 0xf1, 0xf8, 0x64, 0x09, 0xb4, + 0x6c, 0xe3, 0x61, 0x66, 0xae, 0x91, 0x65, 0x13, + 0x84, 0x41, 0x57, 0x75, 0x89, 0xdb, 0x08, 0xcb, + 0xc5, 0xf6, 0x6c, 0xa2, 0x97, 0x43, 0xb9, 0xfd }, + { 0x97, 0x06, 0xc0, 0x92, 0xb0, 0x4d, 0x91, 0xf5, + 0x3d, 0xff, 0x91, 0xfa, 0x37, 0xb7, 0x49, 0x3d, + 0x28, 0xb5, 0x76, 0xb5, 0xd7, 0x10, 0x46, 0x9d, + 0xf7, 0x94, 0x01, 0x66, 0x22, 0x36, 0xfc, 0x03 }, + { 0x87, 0x79, 0x68, 0x68, 0x6c, 0x06, 0x8c, 0xe2, + 0xf7, 0xe2, 0xad, 0xcf, 0xf6, 0x8b, 0xf8, 0x74, + 0x8e, 0xdf, 0x3c, 0xf8, 0x62, 0xcf, 0xb4, 0xd3, + 0x94, 0x7a, 0x31, 0x06, 0x95, 0x80, 0x54, 0xe3 }, + { 0x88, 0x17, 0xe5, 0x71, 0x98, 0x79, 0xac, 0xf7, + 0x02, 0x47, 0x87, 0xec, 0xcd, 0xb2, 0x71, 0x03, + 0x55, 0x66, 0xcf, 0xa3, 0x33, 0xe0, 0x49, 0x40, + 0x7c, 0x01, 0x78, 0xcc, 0xc5, 0x7a, 0x5b, 0x9f }, + { 0x89, 0x38, 0x24, 0x9e, 0x4b, 0x50, 0xca, 0xda, + 0xcc, 0xdf, 0x5b, 0x18, 0x62, 0x13, 0x26, 0xcb, + 0xb1, 0x52, 0x53, 0xe3, 0x3a, 0x20, 0xf5, 0x63, + 0x6e, 0x99, 0x5d, 0x72, 0x47, 0x8d, 0xe4, 0x72 }, + { 0xf1, 0x64, 0xab, 0xba, 0x49, 0x63, 0xa4, 0x4d, + 0x10, 0x72, 0x57, 0xe3, 0x23, 0x2d, 0x90, 0xac, + 0xa5, 0xe6, 0x6a, 0x14, 0x08, 0x24, 0x8c, 0x51, + 0x74, 0x1e, 0x99, 0x1d, 0xb5, 0x22, 0x77, 0x56 }, + { 0xd0, 0x55, 0x63, 0xe2, 0xb1, 0xcb, 0xa0, 0xc4, + 0xa2, 0xa1, 0xe8, 0xbd, 0xe3, 0xa1, 0xa0, 0xd9, + 0xf5, 0xb4, 0x0c, 0x85, 0xa0, 0x70, 0xd6, 0xf5, + 0xfb, 0x21, 0x06, 0x6e, 0xad, 0x5d, 0x06, 0x01 }, + { 0x03, 0xfb, 0xb1, 0x63, 0x84, 0xf0, 0xa3, 0x86, + 0x6f, 0x4c, 0x31, 0x17, 0x87, 0x76, 0x66, 0xef, + 0xbf, 0x12, 0x45, 0x97, 0x56, 0x4b, 0x29, 0x3d, + 0x4a, 0xab, 0x0d, 0x26, 0x9f, 0xab, 0xdd, 0xfa }, + { 0x5f, 0xa8, 0x48, 0x6a, 0xc0, 0xe5, 0x29, 0x64, + 0xd1, 0x88, 0x1b, 0xbe, 0x33, 0x8e, 0xb5, 0x4b, + 0xe2, 0xf7, 0x19, 0x54, 0x92, 0x24, 0x89, 0x20, + 0x57, 0xb4, 0xda, 0x04, 0xba, 0x8b, 0x34, 0x75 }, + { 0xcd, 0xfa, 0xbc, 0xee, 0x46, 0x91, 0x11, 0x11, + 0x23, 0x6a, 0x31, 0x70, 0x8b, 0x25, 0x39, 0xd7, + 0x1f, 0xc2, 0x11, 0xd9, 0xb0, 0x9c, 0x0d, 0x85, + 0x30, 0xa1, 0x1e, 0x1d, 0xbf, 0x6e, 0xed, 0x01 }, + { 0x4f, 0x82, 0xde, 0x03, 0xb9, 0x50, 0x47, 0x93, + 0xb8, 0x2a, 0x07, 0xa0, 0xbd, 0xcd, 0xff, 0x31, + 0x4d, 0x75, 0x9e, 0x7b, 0x62, 0xd2, 0x6b, 0x78, + 0x49, 0x46, 0xb0, 0xd3, 0x6f, 0x91, 0x6f, 0x52 }, + { 0x25, 0x9e, 0xc7, 0xf1, 0x73, 0xbc, 0xc7, 0x6a, + 0x09, 0x94, 0xc9, 0x67, 0xb4, 0xf5, 0xf0, 0x24, + 0xc5, 0x60, 0x57, 0xfb, 0x79, 0xc9, 0x65, 0xc4, + 0xfa, 0xe4, 0x18, 0x75, 0xf0, 0x6a, 0x0e, 0x4c }, + { 0x19, 0x3c, 0xc8, 0xe7, 0xc3, 0xe0, 0x8b, 0xb3, + 0x0f, 0x54, 0x37, 0xaa, 0x27, 0xad, 0xe1, 0xf1, + 0x42, 0x36, 0x9b, 0x24, 0x6a, 0x67, 0x5b, 0x23, + 0x83, 0xe6, 0xda, 0x9b, 0x49, 0xa9, 0x80, 0x9e }, + { 0x5c, 0x10, 0x89, 0x6f, 0x0e, 0x28, 0x56, 0xb2, + 0xa2, 0xee, 0xe0, 0xfe, 0x4a, 0x2c, 0x16, 0x33, + 0x56, 0x5d, 0x18, 0xf0, 0xe9, 0x3e, 0x1f, 0xab, + 0x26, 0xc3, 0x73, 0xe8, 0xf8, 0x29, 0x65, 0x4d }, + { 0xf1, 0x60, 0x12, 0xd9, 0x3f, 0x28, 0x85, 0x1a, + 0x1e, 0xb9, 0x89, 0xf5, 0xd0, 0xb4, 0x3f, 0x3f, + 0x39, 0xca, 0x73, 0xc9, 0xa6, 0x2d, 0x51, 0x81, + 0xbf, 0xf2, 0x37, 0x53, 0x6b, 0xd3, 0x48, 0xc3 }, + { 0x29, 0x66, 0xb3, 0xcf, 0xae, 0x1e, 0x44, 0xea, + 0x99, 0x6d, 0xc5, 0xd6, 0x86, 0xcf, 0x25, 0xfa, + 0x05, 0x3f, 0xb6, 0xf6, 0x72, 0x01, 0xb9, 0xe4, + 0x6e, 0xad, 0xe8, 0x5d, 0x0a, 0xd6, 0xb8, 0x06 }, + { 0xdd, 0xb8, 0x78, 0x24, 0x85, 0xe9, 0x00, 0xbc, + 0x60, 0xbc, 0xf4, 0xc3, 0x3a, 0x6f, 0xd5, 0x85, + 0x68, 0x0c, 0xc6, 0x83, 0xd5, 0x16, 0xef, 0xa0, + 0x3e, 0xb9, 0x98, 0x5f, 0xad, 0x87, 0x15, 0xfb }, + { 0x4c, 0x4d, 0x6e, 0x71, 0xae, 0xa0, 0x57, 0x86, + 0x41, 0x31, 0x48, 0xfc, 0x7a, 0x78, 0x6b, 0x0e, + 0xca, 0xf5, 0x82, 0xcf, 0xf1, 0x20, 0x9f, 0x5a, + 0x80, 0x9f, 0xba, 0x85, 0x04, 0xce, 0x66, 0x2c }, + { 0xfb, 0x4c, 0x5e, 0x86, 0xd7, 0xb2, 0x22, 0x9b, + 0x99, 0xb8, 0xba, 0x6d, 0x94, 0xc2, 0x47, 0xef, + 0x96, 0x4a, 0xa3, 0xa2, 0xba, 0xe8, 0xed, 0xc7, + 0x75, 0x69, 0xf2, 0x8d, 0xbb, 0xff, 0x2d, 0x4e }, + { 0xe9, 0x4f, 0x52, 0x6d, 0xe9, 0x01, 0x96, 0x33, + 0xec, 0xd5, 0x4a, 0xc6, 0x12, 0x0f, 0x23, 0x95, + 0x8d, 0x77, 0x18, 0xf1, 0xe7, 0x71, 0x7b, 0xf3, + 0x29, 0x21, 0x1a, 0x4f, 0xae, 0xed, 0x4e, 0x6d }, + { 0xcb, 0xd6, 0x66, 0x0a, 0x10, 0xdb, 0x3f, 0x23, + 0xf7, 0xa0, 0x3d, 0x4b, 0x9d, 0x40, 0x44, 0xc7, + 0x93, 0x2b, 0x28, 0x01, 0xac, 0x89, 0xd6, 0x0b, + 0xc9, 0xeb, 0x92, 0xd6, 0x5a, 0x46, 0xc2, 0xa0 }, + { 0x88, 0x18, 0xbb, 0xd3, 0xdb, 0x4d, 0xc1, 0x23, + 0xb2, 0x5c, 0xbb, 0xa5, 0xf5, 0x4c, 0x2b, 0xc4, + 0xb3, 0xfc, 0xf9, 0xbf, 0x7d, 0x7a, 0x77, 0x09, + 0xf4, 0xae, 0x58, 0x8b, 0x26, 0x7c, 0x4e, 0xce }, + { 0xc6, 0x53, 0x82, 0x51, 0x3f, 0x07, 0x46, 0x0d, + 0xa3, 0x98, 0x33, 0xcb, 0x66, 0x6c, 0x5e, 0xd8, + 0x2e, 0x61, 0xb9, 0xe9, 0x98, 0xf4, 0xb0, 0xc4, + 0x28, 0x7c, 0xee, 0x56, 0xc3, 0xcc, 0x9b, 0xcd }, + { 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66, + 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26, + 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab, + 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4 }, + { 0x21, 0xfe, 0x0c, 0xeb, 0x00, 0x52, 0xbe, 0x7f, + 0xb0, 0xf0, 0x04, 0x18, 0x7c, 0xac, 0xd7, 0xde, + 0x67, 0xfa, 0x6e, 0xb0, 0x93, 0x8d, 0x92, 0x76, + 0x77, 0xf2, 0x39, 0x8c, 0x13, 0x23, 0x17, 0xa8 }, + { 0x2e, 0xf7, 0x3f, 0x3c, 0x26, 0xf1, 0x2d, 0x93, + 0x88, 0x9f, 0x3c, 0x78, 0xb6, 0xa6, 0x6c, 0x1d, + 0x52, 0xb6, 0x49, 0xdc, 0x9e, 0x85, 0x6e, 0x2c, + 0x17, 0x2e, 0xa7, 0xc5, 0x8a, 0xc2, 0xb5, 0xe3 }, + { 0x38, 0x8a, 0x3c, 0xd5, 0x6d, 0x73, 0x86, 0x7a, + 0xbb, 0x5f, 0x84, 0x01, 0x49, 0x2b, 0x6e, 0x26, + 0x81, 0xeb, 0x69, 0x85, 0x1e, 0x76, 0x7f, 0xd8, + 0x42, 0x10, 0xa5, 0x60, 0x76, 0xfb, 0x3d, 0xd3 }, + { 0xaf, 0x53, 0x3e, 0x02, 0x2f, 0xc9, 0x43, 0x9e, + 0x4e, 0x3c, 0xb8, 0x38, 0xec, 0xd1, 0x86, 0x92, + 0x23, 0x2a, 0xdf, 0x6f, 0xe9, 0x83, 0x95, 0x26, + 0xd3, 0xc3, 0xdd, 0x1b, 0x71, 0x91, 0x0b, 0x1a }, + { 0x75, 0x1c, 0x09, 0xd4, 0x1a, 0x93, 0x43, 0x88, + 0x2a, 0x81, 0xcd, 0x13, 0xee, 0x40, 0x81, 0x8d, + 0x12, 0xeb, 0x44, 0xc6, 0xc7, 0xf4, 0x0d, 0xf1, + 0x6e, 0x4a, 0xea, 0x8f, 0xab, 0x91, 0x97, 0x2a }, + { 0x5b, 0x73, 0xdd, 0xb6, 0x8d, 0x9d, 0x2b, 0x0a, + 0xa2, 0x65, 0xa0, 0x79, 0x88, 0xd6, 0xb8, 0x8a, + 0xe9, 0xaa, 0xc5, 0x82, 0xaf, 0x83, 0x03, 0x2f, + 0x8a, 0x9b, 0x21, 0xa2, 0xe1, 0xb7, 0xbf, 0x18 }, + { 0x3d, 0xa2, 0x91, 0x26, 0xc7, 0xc5, 0xd7, 0xf4, + 0x3e, 0x64, 0x24, 0x2a, 0x79, 0xfe, 0xaa, 0x4e, + 0xf3, 0x45, 0x9c, 0xde, 0xcc, 0xc8, 0x98, 0xed, + 0x59, 0xa9, 0x7f, 0x6e, 0xc9, 0x3b, 0x9d, 0xab }, + { 0x56, 0x6d, 0xc9, 0x20, 0x29, 0x3d, 0xa5, 0xcb, + 0x4f, 0xe0, 0xaa, 0x8a, 0xbd, 0xa8, 0xbb, 0xf5, + 0x6f, 0x55, 0x23, 0x13, 0xbf, 0xf1, 0x90, 0x46, + 0x64, 0x1e, 0x36, 0x15, 0xc1, 0xe3, 0xed, 0x3f }, + { 0x41, 0x15, 0xbe, 0xa0, 0x2f, 0x73, 0xf9, 0x7f, + 0x62, 0x9e, 0x5c, 0x55, 0x90, 0x72, 0x0c, 0x01, + 0xe7, 0xe4, 0x49, 0xae, 0x2a, 0x66, 0x97, 0xd4, + 0xd2, 0x78, 0x33, 0x21, 0x30, 0x36, 0x92, 0xf9 }, + { 0x4c, 0xe0, 0x8f, 0x47, 0x62, 0x46, 0x8a, 0x76, + 0x70, 0x01, 0x21, 0x64, 0x87, 0x8d, 0x68, 0x34, + 0x0c, 0x52, 0xa3, 0x5e, 0x66, 0xc1, 0x88, 0x4d, + 0x5c, 0x86, 0x48, 0x89, 0xab, 0xc9, 0x66, 0x77 }, + { 0x81, 0xea, 0x0b, 0x78, 0x04, 0x12, 0x4e, 0x0c, + 0x22, 0xea, 0x5f, 0xc7, 0x11, 0x04, 0xa2, 0xaf, + 0xcb, 0x52, 0xa1, 0xfa, 0x81, 0x6f, 0x3e, 0xcb, + 0x7d, 0xcb, 0x5d, 0x9d, 0xea, 0x17, 0x86, 0xd0 }, + { 0xfe, 0x36, 0x27, 0x33, 0xb0, 0x5f, 0x6b, 0xed, + 0xaf, 0x93, 0x79, 0xd7, 0xf7, 0x93, 0x6e, 0xde, + 0x20, 0x9b, 0x1f, 0x83, 0x23, 0xc3, 0x92, 0x25, + 0x49, 0xd9, 0xe7, 0x36, 0x81, 0xb5, 0xdb, 0x7b }, + { 0xef, 0xf3, 0x7d, 0x30, 0xdf, 0xd2, 0x03, 0x59, + 0xbe, 0x4e, 0x73, 0xfd, 0xf4, 0x0d, 0x27, 0x73, + 0x4b, 0x3d, 0xf9, 0x0a, 0x97, 0xa5, 0x5e, 0xd7, + 0x45, 0x29, 0x72, 0x94, 0xca, 0x85, 0xd0, 0x9f }, + { 0x17, 0x2f, 0xfc, 0x67, 0x15, 0x3d, 0x12, 0xe0, + 0xca, 0x76, 0xa8, 0xb6, 0xcd, 0x5d, 0x47, 0x31, + 0x88, 0x5b, 0x39, 0xce, 0x0c, 0xac, 0x93, 0xa8, + 0x97, 0x2a, 0x18, 0x00, 0x6c, 0x8b, 0x8b, 0xaf }, + { 0xc4, 0x79, 0x57, 0xf1, 0xcc, 0x88, 0xe8, 0x3e, + 0xf9, 0x44, 0x58, 0x39, 0x70, 0x9a, 0x48, 0x0a, + 0x03, 0x6b, 0xed, 0x5f, 0x88, 0xac, 0x0f, 0xcc, + 0x8e, 0x1e, 0x70, 0x3f, 0xfa, 0xac, 0x13, 0x2c }, + { 0x30, 0xf3, 0x54, 0x83, 0x70, 0xcf, 0xdc, 0xed, + 0xa5, 0xc3, 0x7b, 0x56, 0x9b, 0x61, 0x75, 0xe7, + 0x99, 0xee, 0xf1, 0xa6, 0x2a, 0xaa, 0x94, 0x32, + 0x45, 0xae, 0x76, 0x69, 0xc2, 0x27, 0xa7, 0xb5 }, + { 0xc9, 0x5d, 0xcb, 0x3c, 0xf1, 0xf2, 0x7d, 0x0e, + 0xef, 0x2f, 0x25, 0xd2, 0x41, 0x38, 0x70, 0x90, + 0x4a, 0x87, 0x7c, 0x4a, 0x56, 0xc2, 0xde, 0x1e, + 0x83, 0xe2, 0xbc, 0x2a, 0xe2, 0xe4, 0x68, 0x21 }, + { 0xd5, 0xd0, 0xb5, 0xd7, 0x05, 0x43, 0x4c, 0xd4, + 0x6b, 0x18, 0x57, 0x49, 0xf6, 0x6b, 0xfb, 0x58, + 0x36, 0xdc, 0xdf, 0x6e, 0xe5, 0x49, 0xa2, 0xb7, + 0xa4, 0xae, 0xe7, 0xf5, 0x80, 0x07, 0xca, 0xaf }, + { 0xbb, 0xc1, 0x24, 0xa7, 0x12, 0xf1, 0x5d, 0x07, + 0xc3, 0x00, 0xe0, 0x5b, 0x66, 0x83, 0x89, 0xa4, + 0x39, 0xc9, 0x17, 0x77, 0xf7, 0x21, 0xf8, 0x32, + 0x0c, 0x1c, 0x90, 0x78, 0x06, 0x6d, 0x2c, 0x7e }, + { 0xa4, 0x51, 0xb4, 0x8c, 0x35, 0xa6, 0xc7, 0x85, + 0x4c, 0xfa, 0xae, 0x60, 0x26, 0x2e, 0x76, 0x99, + 0x08, 0x16, 0x38, 0x2a, 0xc0, 0x66, 0x7e, 0x5a, + 0x5c, 0x9e, 0x1b, 0x46, 0xc4, 0x34, 0x2d, 0xdf }, + { 0xb0, 0xd1, 0x50, 0xfb, 0x55, 0xe7, 0x78, 0xd0, + 0x11, 0x47, 0xf0, 0xb5, 0xd8, 0x9d, 0x99, 0xec, + 0xb2, 0x0f, 0xf0, 0x7e, 0x5e, 0x67, 0x60, 0xd6, + 0xb6, 0x45, 0xeb, 0x5b, 0x65, 0x4c, 0x62, 0x2b }, + { 0x34, 0xf7, 0x37, 0xc0, 0xab, 0x21, 0x99, 0x51, + 0xee, 0xe8, 0x9a, 0x9f, 0x8d, 0xac, 0x29, 0x9c, + 0x9d, 0x4c, 0x38, 0xf3, 0x3f, 0xa4, 0x94, 0xc5, + 0xc6, 0xee, 0xfc, 0x92, 0xb6, 0xdb, 0x08, 0xbc }, + { 0x1a, 0x62, 0xcc, 0x3a, 0x00, 0x80, 0x0d, 0xcb, + 0xd9, 0x98, 0x91, 0x08, 0x0c, 0x1e, 0x09, 0x84, + 0x58, 0x19, 0x3a, 0x8c, 0xc9, 0xf9, 0x70, 0xea, + 0x99, 0xfb, 0xef, 0xf0, 0x03, 0x18, 0xc2, 0x89 }, + { 0xcf, 0xce, 0x55, 0xeb, 0xaf, 0xc8, 0x40, 0xd7, + 0xae, 0x48, 0x28, 0x1c, 0x7f, 0xd5, 0x7e, 0xc8, + 0xb4, 0x82, 0xd4, 0xb7, 0x04, 0x43, 0x74, 0x95, + 0x49, 0x5a, 0xc4, 0x14, 0xcf, 0x4a, 0x37, 0x4b }, + { 0x67, 0x46, 0xfa, 0xcf, 0x71, 0x14, 0x6d, 0x99, + 0x9d, 0xab, 0xd0, 0x5d, 0x09, 0x3a, 0xe5, 0x86, + 0x64, 0x8d, 0x1e, 0xe2, 0x8e, 0x72, 0x61, 0x7b, + 0x99, 0xd0, 0xf0, 0x08, 0x6e, 0x1e, 0x45, 0xbf }, + { 0x57, 0x1c, 0xed, 0x28, 0x3b, 0x3f, 0x23, 0xb4, + 0xe7, 0x50, 0xbf, 0x12, 0xa2, 0xca, 0xf1, 0x78, + 0x18, 0x47, 0xbd, 0x89, 0x0e, 0x43, 0x60, 0x3c, + 0xdc, 0x59, 0x76, 0x10, 0x2b, 0x7b, 0xb1, 0x1b }, + { 0xcf, 0xcb, 0x76, 0x5b, 0x04, 0x8e, 0x35, 0x02, + 0x2c, 0x5d, 0x08, 0x9d, 0x26, 0xe8, 0x5a, 0x36, + 0xb0, 0x05, 0xa2, 0xb8, 0x04, 0x93, 0xd0, 0x3a, + 0x14, 0x4e, 0x09, 0xf4, 0x09, 0xb6, 0xaf, 0xd1 }, + { 0x40, 0x50, 0xc7, 0xa2, 0x77, 0x05, 0xbb, 0x27, + 0xf4, 0x20, 0x89, 0xb2, 0x99, 0xf3, 0xcb, 0xe5, + 0x05, 0x4e, 0xad, 0x68, 0x72, 0x7e, 0x8e, 0xf9, + 0x31, 0x8c, 0xe6, 0xf2, 0x5c, 0xd6, 0xf3, 0x1d }, + { 0x18, 0x40, 0x70, 0xbd, 0x5d, 0x26, 0x5f, 0xbd, + 0xc1, 0x42, 0xcd, 0x1c, 0x5c, 0xd0, 0xd7, 0xe4, + 0x14, 0xe7, 0x03, 0x69, 0xa2, 0x66, 0xd6, 0x27, + 0xc8, 0xfb, 0xa8, 0x4f, 0xa5, 0xe8, 0x4c, 0x34 }, + { 0x9e, 0xdd, 0xa9, 0xa4, 0x44, 0x39, 0x02, 0xa9, + 0x58, 0x8c, 0x0d, 0x0c, 0xcc, 0x62, 0xb9, 0x30, + 0x21, 0x84, 0x79, 0xa6, 0x84, 0x1e, 0x6f, 0xe7, + 0xd4, 0x30, 0x03, 0xf0, 0x4b, 0x1f, 0xd6, 0x43 }, + { 0xe4, 0x12, 0xfe, 0xef, 0x79, 0x08, 0x32, 0x4a, + 0x6d, 0xa1, 0x84, 0x16, 0x29, 0xf3, 0x5d, 0x3d, + 0x35, 0x86, 0x42, 0x01, 0x93, 0x10, 0xec, 0x57, + 0xc6, 0x14, 0x83, 0x6b, 0x63, 0xd3, 0x07, 0x63 }, + { 0x1a, 0x2b, 0x8e, 0xdf, 0xf3, 0xf9, 0xac, 0xc1, + 0x55, 0x4f, 0xcb, 0xae, 0x3c, 0xf1, 0xd6, 0x29, + 0x8c, 0x64, 0x62, 0xe2, 0x2e, 0x5e, 0xb0, 0x25, + 0x96, 0x84, 0xf8, 0x35, 0x01, 0x2b, 0xd1, 0x3f }, + { 0x28, 0x8c, 0x4a, 0xd9, 0xb9, 0x40, 0x97, 0x62, + 0xea, 0x07, 0xc2, 0x4a, 0x41, 0xf0, 0x4f, 0x69, + 0xa7, 0xd7, 0x4b, 0xee, 0x2d, 0x95, 0x43, 0x53, + 0x74, 0xbd, 0xe9, 0x46, 0xd7, 0x24, 0x1c, 0x7b }, + { 0x80, 0x56, 0x91, 0xbb, 0x28, 0x67, 0x48, 0xcf, + 0xb5, 0x91, 0xd3, 0xae, 0xbe, 0x7e, 0x6f, 0x4e, + 0x4d, 0xc6, 0xe2, 0x80, 0x8c, 0x65, 0x14, 0x3c, + 0xc0, 0x04, 0xe4, 0xeb, 0x6f, 0xd0, 0x9d, 0x43 }, + { 0xd4, 0xac, 0x8d, 0x3a, 0x0a, 0xfc, 0x6c, 0xfa, + 0x7b, 0x46, 0x0a, 0xe3, 0x00, 0x1b, 0xae, 0xb3, + 0x6d, 0xad, 0xb3, 0x7d, 0xa0, 0x7d, 0x2e, 0x8a, + 0xc9, 0x18, 0x22, 0xdf, 0x34, 0x8a, 0xed, 0x3d }, + { 0xc3, 0x76, 0x61, 0x70, 0x14, 0xd2, 0x01, 0x58, + 0xbc, 0xed, 0x3d, 0x3b, 0xa5, 0x52, 0xb6, 0xec, + 0xcf, 0x84, 0xe6, 0x2a, 0xa3, 0xeb, 0x65, 0x0e, + 0x90, 0x02, 0x9c, 0x84, 0xd1, 0x3e, 0xea, 0x69 }, + { 0xc4, 0x1f, 0x09, 0xf4, 0x3c, 0xec, 0xae, 0x72, + 0x93, 0xd6, 0x00, 0x7c, 0xa0, 0xa3, 0x57, 0x08, + 0x7d, 0x5a, 0xe5, 0x9b, 0xe5, 0x00, 0xc1, 0xcd, + 0x5b, 0x28, 0x9e, 0xe8, 0x10, 0xc7, 0xb0, 0x82 }, + { 0x03, 0xd1, 0xce, 0xd1, 0xfb, 0xa5, 0xc3, 0x91, + 0x55, 0xc4, 0x4b, 0x77, 0x65, 0xcb, 0x76, 0x0c, + 0x78, 0x70, 0x8d, 0xcf, 0xc8, 0x0b, 0x0b, 0xd8, + 0xad, 0xe3, 0xa5, 0x6d, 0xa8, 0x83, 0x0b, 0x29 }, + { 0x09, 0xbd, 0xe6, 0xf1, 0x52, 0x21, 0x8d, 0xc9, + 0x2c, 0x41, 0xd7, 0xf4, 0x53, 0x87, 0xe6, 0x3e, + 0x58, 0x69, 0xd8, 0x07, 0xec, 0x70, 0xb8, 0x21, + 0x40, 0x5d, 0xbd, 0x88, 0x4b, 0x7f, 0xcf, 0x4b }, + { 0x71, 0xc9, 0x03, 0x6e, 0x18, 0x17, 0x9b, 0x90, + 0xb3, 0x7d, 0x39, 0xe9, 0xf0, 0x5e, 0xb8, 0x9c, + 0xc5, 0xfc, 0x34, 0x1f, 0xd7, 0xc4, 0x77, 0xd0, + 0xd7, 0x49, 0x32, 0x85, 0xfa, 0xca, 0x08, 0xa4 }, + { 0x59, 0x16, 0x83, 0x3e, 0xbb, 0x05, 0xcd, 0x91, + 0x9c, 0xa7, 0xfe, 0x83, 0xb6, 0x92, 0xd3, 0x20, + 0x5b, 0xef, 0x72, 0x39, 0x2b, 0x2c, 0xf6, 0xbb, + 0x0a, 0x6d, 0x43, 0xf9, 0x94, 0xf9, 0x5f, 0x11 }, + { 0xf6, 0x3a, 0xab, 0x3e, 0xc6, 0x41, 0xb3, 0xb0, + 0x24, 0x96, 0x4c, 0x2b, 0x43, 0x7c, 0x04, 0xf6, + 0x04, 0x3c, 0x4c, 0x7e, 0x02, 0x79, 0x23, 0x99, + 0x95, 0x40, 0x19, 0x58, 0xf8, 0x6b, 0xbe, 0x54 }, + { 0xf1, 0x72, 0xb1, 0x80, 0xbf, 0xb0, 0x97, 0x40, + 0x49, 0x31, 0x20, 0xb6, 0x32, 0x6c, 0xbd, 0xc5, + 0x61, 0xe4, 0x77, 0xde, 0xf9, 0xbb, 0xcf, 0xd2, + 0x8c, 0xc8, 0xc1, 0xc5, 0xe3, 0x37, 0x9a, 0x31 }, + { 0xcb, 0x9b, 0x89, 0xcc, 0x18, 0x38, 0x1d, 0xd9, + 0x14, 0x1a, 0xde, 0x58, 0x86, 0x54, 0xd4, 0xe6, + 0xa2, 0x31, 0xd5, 0xbf, 0x49, 0xd4, 0xd5, 0x9a, + 0xc2, 0x7d, 0x86, 0x9c, 0xbe, 0x10, 0x0c, 0xf3 }, + { 0x7b, 0xd8, 0x81, 0x50, 0x46, 0xfd, 0xd8, 0x10, + 0xa9, 0x23, 0xe1, 0x98, 0x4a, 0xae, 0xbd, 0xcd, + 0xf8, 0x4d, 0x87, 0xc8, 0x99, 0x2d, 0x68, 0xb5, + 0xee, 0xb4, 0x60, 0xf9, 0x3e, 0xb3, 0xc8, 0xd7 }, + { 0x60, 0x7b, 0xe6, 0x68, 0x62, 0xfd, 0x08, 0xee, + 0x5b, 0x19, 0xfa, 0xca, 0xc0, 0x9d, 0xfd, 0xbc, + 0xd4, 0x0c, 0x31, 0x21, 0x01, 0xd6, 0x6e, 0x6e, + 0xbd, 0x2b, 0x84, 0x1f, 0x1b, 0x9a, 0x93, 0x25 }, + { 0x9f, 0xe0, 0x3b, 0xbe, 0x69, 0xab, 0x18, 0x34, + 0xf5, 0x21, 0x9b, 0x0d, 0xa8, 0x8a, 0x08, 0xb3, + 0x0a, 0x66, 0xc5, 0x91, 0x3f, 0x01, 0x51, 0x96, + 0x3c, 0x36, 0x05, 0x60, 0xdb, 0x03, 0x87, 0xb3 }, + { 0x90, 0xa8, 0x35, 0x85, 0x71, 0x7b, 0x75, 0xf0, + 0xe9, 0xb7, 0x25, 0xe0, 0x55, 0xee, 0xee, 0xb9, + 0xe7, 0xa0, 0x28, 0xea, 0x7e, 0x6c, 0xbc, 0x07, + 0xb2, 0x09, 0x17, 0xec, 0x03, 0x63, 0xe3, 0x8c }, + { 0x33, 0x6e, 0xa0, 0x53, 0x0f, 0x4a, 0x74, 0x69, + 0x12, 0x6e, 0x02, 0x18, 0x58, 0x7e, 0xbb, 0xde, + 0x33, 0x58, 0xa0, 0xb3, 0x1c, 0x29, 0xd2, 0x00, + 0xf7, 0xdc, 0x7e, 0xb1, 0x5c, 0x6a, 0xad, 0xd8 }, + { 0xa7, 0x9e, 0x76, 0xdc, 0x0a, 0xbc, 0xa4, 0x39, + 0x6f, 0x07, 0x47, 0xcd, 0x7b, 0x74, 0x8d, 0xf9, + 0x13, 0x00, 0x76, 0x26, 0xb1, 0xd6, 0x59, 0xda, + 0x0c, 0x1f, 0x78, 0xb9, 0x30, 0x3d, 0x01, 0xa3 }, + { 0x44, 0xe7, 0x8a, 0x77, 0x37, 0x56, 0xe0, 0x95, + 0x15, 0x19, 0x50, 0x4d, 0x70, 0x38, 0xd2, 0x8d, + 0x02, 0x13, 0xa3, 0x7e, 0x0c, 0xe3, 0x75, 0x37, + 0x17, 0x57, 0xbc, 0x99, 0x63, 0x11, 0xe3, 0xb8 }, + { 0x77, 0xac, 0x01, 0x2a, 0x3f, 0x75, 0x4d, 0xcf, + 0xea, 0xb5, 0xeb, 0x99, 0x6b, 0xe9, 0xcd, 0x2d, + 0x1f, 0x96, 0x11, 0x1b, 0x6e, 0x49, 0xf3, 0x99, + 0x4d, 0xf1, 0x81, 0xf2, 0x85, 0x69, 0xd8, 0x25 }, + { 0xce, 0x5a, 0x10, 0xdb, 0x6f, 0xcc, 0xda, 0xf1, + 0x40, 0xaa, 0xa4, 0xde, 0xd6, 0x25, 0x0a, 0x9c, + 0x06, 0xe9, 0x22, 0x2b, 0xc9, 0xf9, 0xf3, 0x65, + 0x8a, 0x4a, 0xff, 0x93, 0x5f, 0x2b, 0x9f, 0x3a }, + { 0xec, 0xc2, 0x03, 0xa7, 0xfe, 0x2b, 0xe4, 0xab, + 0xd5, 0x5b, 0xb5, 0x3e, 0x6e, 0x67, 0x35, 0x72, + 0xe0, 0x07, 0x8d, 0xa8, 0xcd, 0x37, 0x5e, 0xf4, + 0x30, 0xcc, 0x97, 0xf9, 0xf8, 0x00, 0x83, 0xaf }, + { 0x14, 0xa5, 0x18, 0x6d, 0xe9, 0xd7, 0xa1, 0x8b, + 0x04, 0x12, 0xb8, 0x56, 0x3e, 0x51, 0xcc, 0x54, + 0x33, 0x84, 0x0b, 0x4a, 0x12, 0x9a, 0x8f, 0xf9, + 0x63, 0xb3, 0x3a, 0x3c, 0x4a, 0xfe, 0x8e, 0xbb }, + { 0x13, 0xf8, 0xef, 0x95, 0xcb, 0x86, 0xe6, 0xa6, + 0x38, 0x93, 0x1c, 0x8e, 0x10, 0x76, 0x73, 0xeb, + 0x76, 0xba, 0x10, 0xd7, 0xc2, 0xcd, 0x70, 0xb9, + 0xd9, 0x92, 0x0b, 0xbe, 0xed, 0x92, 0x94, 0x09 }, + { 0x0b, 0x33, 0x8f, 0x4e, 0xe1, 0x2f, 0x2d, 0xfc, + 0xb7, 0x87, 0x13, 0x37, 0x79, 0x41, 0xe0, 0xb0, + 0x63, 0x21, 0x52, 0x58, 0x1d, 0x13, 0x32, 0x51, + 0x6e, 0x4a, 0x2c, 0xab, 0x19, 0x42, 0xcc, 0xa4 }, + { 0xea, 0xab, 0x0e, 0xc3, 0x7b, 0x3b, 0x8a, 0xb7, + 0x96, 0xe9, 0xf5, 0x72, 0x38, 0xde, 0x14, 0xa2, + 0x64, 0xa0, 0x76, 0xf3, 0x88, 0x7d, 0x86, 0xe2, + 0x9b, 0xb5, 0x90, 0x6d, 0xb5, 0xa0, 0x0e, 0x02 }, + { 0x23, 0xcb, 0x68, 0xb8, 0xc0, 0xe6, 0xdc, 0x26, + 0xdc, 0x27, 0x76, 0x6d, 0xdc, 0x0a, 0x13, 0xa9, + 0x94, 0x38, 0xfd, 0x55, 0x61, 0x7a, 0xa4, 0x09, + 0x5d, 0x8f, 0x96, 0x97, 0x20, 0xc8, 0x72, 0xdf }, + { 0x09, 0x1d, 0x8e, 0xe3, 0x0d, 0x6f, 0x29, 0x68, + 0xd4, 0x6b, 0x68, 0x7d, 0xd6, 0x52, 0x92, 0x66, + 0x57, 0x42, 0xde, 0x0b, 0xb8, 0x3d, 0xcc, 0x00, + 0x04, 0xc7, 0x2c, 0xe1, 0x00, 0x07, 0xa5, 0x49 }, + { 0x7f, 0x50, 0x7a, 0xbc, 0x6d, 0x19, 0xba, 0x00, + 0xc0, 0x65, 0xa8, 0x76, 0xec, 0x56, 0x57, 0x86, + 0x88, 0x82, 0xd1, 0x8a, 0x22, 0x1b, 0xc4, 0x6c, + 0x7a, 0x69, 0x12, 0x54, 0x1f, 0x5b, 0xc7, 0xba }, + { 0xa0, 0x60, 0x7c, 0x24, 0xe1, 0x4e, 0x8c, 0x22, + 0x3d, 0xb0, 0xd7, 0x0b, 0x4d, 0x30, 0xee, 0x88, + 0x01, 0x4d, 0x60, 0x3f, 0x43, 0x7e, 0x9e, 0x02, + 0xaa, 0x7d, 0xaf, 0xa3, 0xcd, 0xfb, 0xad, 0x94 }, + { 0xdd, 0xbf, 0xea, 0x75, 0xcc, 0x46, 0x78, 0x82, + 0xeb, 0x34, 0x83, 0xce, 0x5e, 0x2e, 0x75, 0x6a, + 0x4f, 0x47, 0x01, 0xb7, 0x6b, 0x44, 0x55, 0x19, + 0xe8, 0x9f, 0x22, 0xd6, 0x0f, 0xa8, 0x6e, 0x06 }, + { 0x0c, 0x31, 0x1f, 0x38, 0xc3, 0x5a, 0x4f, 0xb9, + 0x0d, 0x65, 0x1c, 0x28, 0x9d, 0x48, 0x68, 0x56, + 0xcd, 0x14, 0x13, 0xdf, 0x9b, 0x06, 0x77, 0xf5, + 0x3e, 0xce, 0x2c, 0xd9, 0xe4, 0x77, 0xc6, 0x0a }, + { 0x46, 0xa7, 0x3a, 0x8d, 0xd3, 0xe7, 0x0f, 0x59, + 0xd3, 0x94, 0x2c, 0x01, 0xdf, 0x59, 0x9d, 0xef, + 0x78, 0x3c, 0x9d, 0xa8, 0x2f, 0xd8, 0x32, 0x22, + 0xcd, 0x66, 0x2b, 0x53, 0xdc, 0xe7, 0xdb, 0xdf }, + { 0xad, 0x03, 0x8f, 0xf9, 0xb1, 0x4d, 0xe8, 0x4a, + 0x80, 0x1e, 0x4e, 0x62, 0x1c, 0xe5, 0xdf, 0x02, + 0x9d, 0xd9, 0x35, 0x20, 0xd0, 0xc2, 0xfa, 0x38, + 0xbf, 0xf1, 0x76, 0xa8, 0xb1, 0xd1, 0x69, 0x8c }, + { 0xab, 0x70, 0xc5, 0xdf, 0xbd, 0x1e, 0xa8, 0x17, + 0xfe, 0xd0, 0xcd, 0x06, 0x72, 0x93, 0xab, 0xf3, + 0x19, 0xe5, 0xd7, 0x90, 0x1c, 0x21, 0x41, 0xd5, + 0xd9, 0x9b, 0x23, 0xf0, 0x3a, 0x38, 0xe7, 0x48 }, + { 0x1f, 0xff, 0xda, 0x67, 0x93, 0x2b, 0x73, 0xc8, + 0xec, 0xaf, 0x00, 0x9a, 0x34, 0x91, 0xa0, 0x26, + 0x95, 0x3b, 0xab, 0xfe, 0x1f, 0x66, 0x3b, 0x06, + 0x97, 0xc3, 0xc4, 0xae, 0x8b, 0x2e, 0x7d, 0xcb }, + { 0xb0, 0xd2, 0xcc, 0x19, 0x47, 0x2d, 0xd5, 0x7f, + 0x2b, 0x17, 0xef, 0xc0, 0x3c, 0x8d, 0x58, 0xc2, + 0x28, 0x3d, 0xbb, 0x19, 0xda, 0x57, 0x2f, 0x77, + 0x55, 0x85, 0x5a, 0xa9, 0x79, 0x43, 0x17, 0xa0 }, + { 0xa0, 0xd1, 0x9a, 0x6e, 0xe3, 0x39, 0x79, 0xc3, + 0x25, 0x51, 0x0e, 0x27, 0x66, 0x22, 0xdf, 0x41, + 0xf7, 0x15, 0x83, 0xd0, 0x75, 0x01, 0xb8, 0x70, + 0x71, 0x12, 0x9a, 0x0a, 0xd9, 0x47, 0x32, 0xa5 }, + { 0x72, 0x46, 0x42, 0xa7, 0x03, 0x2d, 0x10, 0x62, + 0xb8, 0x9e, 0x52, 0xbe, 0xa3, 0x4b, 0x75, 0xdf, + 0x7d, 0x8f, 0xe7, 0x72, 0xd9, 0xfe, 0x3c, 0x93, + 0xdd, 0xf3, 0xc4, 0x54, 0x5a, 0xb5, 0xa9, 0x9b }, + { 0xad, 0xe5, 0xea, 0xa7, 0xe6, 0x1f, 0x67, 0x2d, + 0x58, 0x7e, 0xa0, 0x3d, 0xae, 0x7d, 0x7b, 0x55, + 0x22, 0x9c, 0x01, 0xd0, 0x6b, 0xc0, 0xa5, 0x70, + 0x14, 0x36, 0xcb, 0xd1, 0x83, 0x66, 0xa6, 0x26 }, + { 0x01, 0x3b, 0x31, 0xeb, 0xd2, 0x28, 0xfc, 0xdd, + 0xa5, 0x1f, 0xab, 0xb0, 0x3b, 0xb0, 0x2d, 0x60, + 0xac, 0x20, 0xca, 0x21, 0x5a, 0xaf, 0xa8, 0x3b, + 0xdd, 0x85, 0x5e, 0x37, 0x55, 0xa3, 0x5f, 0x0b }, + { 0x33, 0x2e, 0xd4, 0x0b, 0xb1, 0x0d, 0xde, 0x3c, + 0x95, 0x4a, 0x75, 0xd7, 0xb8, 0x99, 0x9d, 0x4b, + 0x26, 0xa1, 0xc0, 0x63, 0xc1, 0xdc, 0x6e, 0x32, + 0xc1, 0xd9, 0x1b, 0xab, 0x7b, 0xbb, 0x7d, 0x16 }, + { 0xc7, 0xa1, 0x97, 0xb3, 0xa0, 0x5b, 0x56, 0x6b, + 0xcc, 0x9f, 0xac, 0xd2, 0x0e, 0x44, 0x1d, 0x6f, + 0x6c, 0x28, 0x60, 0xac, 0x96, 0x51, 0xcd, 0x51, + 0xd6, 0xb9, 0xd2, 0xcd, 0xee, 0xea, 0x03, 0x90 }, + { 0xbd, 0x9c, 0xf6, 0x4e, 0xa8, 0x95, 0x3c, 0x03, + 0x71, 0x08, 0xe6, 0xf6, 0x54, 0x91, 0x4f, 0x39, + 0x58, 0xb6, 0x8e, 0x29, 0xc1, 0x67, 0x00, 0xdc, + 0x18, 0x4d, 0x94, 0xa2, 0x17, 0x08, 0xff, 0x60 }, + { 0x88, 0x35, 0xb0, 0xac, 0x02, 0x11, 0x51, 0xdf, + 0x71, 0x64, 0x74, 0xce, 0x27, 0xce, 0x4d, 0x3c, + 0x15, 0xf0, 0xb2, 0xda, 0xb4, 0x80, 0x03, 0xcf, + 0x3f, 0x3e, 0xfd, 0x09, 0x45, 0x10, 0x6b, 0x9a }, + { 0x3b, 0xfe, 0xfa, 0x33, 0x01, 0xaa, 0x55, 0xc0, + 0x80, 0x19, 0x0c, 0xff, 0xda, 0x8e, 0xae, 0x51, + 0xd9, 0xaf, 0x48, 0x8b, 0x4c, 0x1f, 0x24, 0xc3, + 0xd9, 0xa7, 0x52, 0x42, 0xfd, 0x8e, 0xa0, 0x1d }, + { 0x08, 0x28, 0x4d, 0x14, 0x99, 0x3c, 0xd4, 0x7d, + 0x53, 0xeb, 0xae, 0xcf, 0x0d, 0xf0, 0x47, 0x8c, + 0xc1, 0x82, 0xc8, 0x9c, 0x00, 0xe1, 0x85, 0x9c, + 0x84, 0x85, 0x16, 0x86, 0xdd, 0xf2, 0xc1, 0xb7 }, + { 0x1e, 0xd7, 0xef, 0x9f, 0x04, 0xc2, 0xac, 0x8d, + 0xb6, 0xa8, 0x64, 0xdb, 0x13, 0x10, 0x87, 0xf2, + 0x70, 0x65, 0x09, 0x8e, 0x69, 0xc3, 0xfe, 0x78, + 0x71, 0x8d, 0x9b, 0x94, 0x7f, 0x4a, 0x39, 0xd0 }, + { 0xc1, 0x61, 0xf2, 0xdc, 0xd5, 0x7e, 0x9c, 0x14, + 0x39, 0xb3, 0x1a, 0x9d, 0xd4, 0x3d, 0x8f, 0x3d, + 0x7d, 0xd8, 0xf0, 0xeb, 0x7c, 0xfa, 0xc6, 0xfb, + 0x25, 0xa0, 0xf2, 0x8e, 0x30, 0x6f, 0x06, 0x61 }, + { 0xc0, 0x19, 0x69, 0xad, 0x34, 0xc5, 0x2c, 0xaf, + 0x3d, 0xc4, 0xd8, 0x0d, 0x19, 0x73, 0x5c, 0x29, + 0x73, 0x1a, 0xc6, 0xe7, 0xa9, 0x20, 0x85, 0xab, + 0x92, 0x50, 0xc4, 0x8d, 0xea, 0x48, 0xa3, 0xfc }, + { 0x17, 0x20, 0xb3, 0x65, 0x56, 0x19, 0xd2, 0xa5, + 0x2b, 0x35, 0x21, 0xae, 0x0e, 0x49, 0xe3, 0x45, + 0xcb, 0x33, 0x89, 0xeb, 0xd6, 0x20, 0x8a, 0xca, + 0xf9, 0xf1, 0x3f, 0xda, 0xcc, 0xa8, 0xbe, 0x49 }, + { 0x75, 0x62, 0x88, 0x36, 0x1c, 0x83, 0xe2, 0x4c, + 0x61, 0x7c, 0xf9, 0x5c, 0x90, 0x5b, 0x22, 0xd0, + 0x17, 0xcd, 0xc8, 0x6f, 0x0b, 0xf1, 0xd6, 0x58, + 0xf4, 0x75, 0x6c, 0x73, 0x79, 0x87, 0x3b, 0x7f }, + { 0xe7, 0xd0, 0xed, 0xa3, 0x45, 0x26, 0x93, 0xb7, + 0x52, 0xab, 0xcd, 0xa1, 0xb5, 0x5e, 0x27, 0x6f, + 0x82, 0x69, 0x8f, 0x5f, 0x16, 0x05, 0x40, 0x3e, + 0xff, 0x83, 0x0b, 0xea, 0x00, 0x71, 0xa3, 0x94 }, + { 0x2c, 0x82, 0xec, 0xaa, 0x6b, 0x84, 0x80, 0x3e, + 0x04, 0x4a, 0xf6, 0x31, 0x18, 0xaf, 0xe5, 0x44, + 0x68, 0x7c, 0xb6, 0xe6, 0xc7, 0xdf, 0x49, 0xed, + 0x76, 0x2d, 0xfd, 0x7c, 0x86, 0x93, 0xa1, 0xbc }, + { 0x61, 0x36, 0xcb, 0xf4, 0xb4, 0x41, 0x05, 0x6f, + 0xa1, 0xe2, 0x72, 0x24, 0x98, 0x12, 0x5d, 0x6d, + 0xed, 0x45, 0xe1, 0x7b, 0x52, 0x14, 0x39, 0x59, + 0xc7, 0xf4, 0xd4, 0xe3, 0x95, 0x21, 0x8a, 0xc2 }, + { 0x72, 0x1d, 0x32, 0x45, 0xaa, 0xfe, 0xf2, 0x7f, + 0x6a, 0x62, 0x4f, 0x47, 0x95, 0x4b, 0x6c, 0x25, + 0x50, 0x79, 0x52, 0x6f, 0xfa, 0x25, 0xe9, 0xff, + 0x77, 0xe5, 0xdc, 0xff, 0x47, 0x3b, 0x15, 0x97 }, + { 0x9d, 0xd2, 0xfb, 0xd8, 0xce, 0xf1, 0x6c, 0x35, + 0x3c, 0x0a, 0xc2, 0x11, 0x91, 0xd5, 0x09, 0xeb, + 0x28, 0xdd, 0x9e, 0x3e, 0x0d, 0x8c, 0xea, 0x5d, + 0x26, 0xca, 0x83, 0x93, 0x93, 0x85, 0x1c, 0x3a }, + { 0xb2, 0x39, 0x4c, 0xea, 0xcd, 0xeb, 0xf2, 0x1b, + 0xf9, 0xdf, 0x2c, 0xed, 0x98, 0xe5, 0x8f, 0x1c, + 0x3a, 0x4b, 0xbb, 0xff, 0x66, 0x0d, 0xd9, 0x00, + 0xf6, 0x22, 0x02, 0xd6, 0x78, 0x5c, 0xc4, 0x6e }, + { 0x57, 0x08, 0x9f, 0x22, 0x27, 0x49, 0xad, 0x78, + 0x71, 0x76, 0x5f, 0x06, 0x2b, 0x11, 0x4f, 0x43, + 0xba, 0x20, 0xec, 0x56, 0x42, 0x2a, 0x8b, 0x1e, + 0x3f, 0x87, 0x19, 0x2c, 0x0e, 0xa7, 0x18, 0xc6 }, + { 0xe4, 0x9a, 0x94, 0x59, 0x96, 0x1c, 0xd3, 0x3c, + 0xdf, 0x4a, 0xae, 0x1b, 0x10, 0x78, 0xa5, 0xde, + 0xa7, 0xc0, 0x40, 0xe0, 0xfe, 0xa3, 0x40, 0xc9, + 0x3a, 0x72, 0x48, 0x72, 0xfc, 0x4a, 0xf8, 0x06 }, + { 0xed, 0xe6, 0x7f, 0x72, 0x0e, 0xff, 0xd2, 0xca, + 0x9c, 0x88, 0x99, 0x41, 0x52, 0xd0, 0x20, 0x1d, + 0xee, 0x6b, 0x0a, 0x2d, 0x2c, 0x07, 0x7a, 0xca, + 0x6d, 0xae, 0x29, 0xf7, 0x3f, 0x8b, 0x63, 0x09 }, + { 0xe0, 0xf4, 0x34, 0xbf, 0x22, 0xe3, 0x08, 0x80, + 0x39, 0xc2, 0x1f, 0x71, 0x9f, 0xfc, 0x67, 0xf0, + 0xf2, 0xcb, 0x5e, 0x98, 0xa7, 0xa0, 0x19, 0x4c, + 0x76, 0xe9, 0x6b, 0xf4, 0xe8, 0xe1, 0x7e, 0x61 }, + { 0x27, 0x7c, 0x04, 0xe2, 0x85, 0x34, 0x84, 0xa4, + 0xeb, 0xa9, 0x10, 0xad, 0x33, 0x6d, 0x01, 0xb4, + 0x77, 0xb6, 0x7c, 0xc2, 0x00, 0xc5, 0x9f, 0x3c, + 0x8d, 0x77, 0xee, 0xf8, 0x49, 0x4f, 0x29, 0xcd }, + { 0x15, 0x6d, 0x57, 0x47, 0xd0, 0xc9, 0x9c, 0x7f, + 0x27, 0x09, 0x7d, 0x7b, 0x7e, 0x00, 0x2b, 0x2e, + 0x18, 0x5c, 0xb7, 0x2d, 0x8d, 0xd7, 0xeb, 0x42, + 0x4a, 0x03, 0x21, 0x52, 0x81, 0x61, 0x21, 0x9f }, + { 0x20, 0xdd, 0xd1, 0xed, 0x9b, 0x1c, 0xa8, 0x03, + 0x94, 0x6d, 0x64, 0xa8, 0x3a, 0xe4, 0x65, 0x9d, + 0xa6, 0x7f, 0xba, 0x7a, 0x1a, 0x3e, 0xdd, 0xb1, + 0xe1, 0x03, 0xc0, 0xf5, 0xe0, 0x3e, 0x3a, 0x2c }, + { 0xf0, 0xaf, 0x60, 0x4d, 0x3d, 0xab, 0xbf, 0x9a, + 0x0f, 0x2a, 0x7d, 0x3d, 0xda, 0x6b, 0xd3, 0x8b, + 0xba, 0x72, 0xc6, 0xd0, 0x9b, 0xe4, 0x94, 0xfc, + 0xef, 0x71, 0x3f, 0xf1, 0x01, 0x89, 0xb6, 0xe6 }, + { 0x98, 0x02, 0xbb, 0x87, 0xde, 0xf4, 0xcc, 0x10, + 0xc4, 0xa5, 0xfd, 0x49, 0xaa, 0x58, 0xdf, 0xe2, + 0xf3, 0xfd, 0xdb, 0x46, 0xb4, 0x70, 0x88, 0x14, + 0xea, 0xd8, 0x1d, 0x23, 0xba, 0x95, 0x13, 0x9b }, + { 0x4f, 0x8c, 0xe1, 0xe5, 0x1d, 0x2f, 0xe7, 0xf2, + 0x40, 0x43, 0xa9, 0x04, 0xd8, 0x98, 0xeb, 0xfc, + 0x91, 0x97, 0x54, 0x18, 0x75, 0x34, 0x13, 0xaa, + 0x09, 0x9b, 0x79, 0x5e, 0xcb, 0x35, 0xce, 0xdb }, + { 0xbd, 0xdc, 0x65, 0x14, 0xd7, 0xee, 0x6a, 0xce, + 0x0a, 0x4a, 0xc1, 0xd0, 0xe0, 0x68, 0x11, 0x22, + 0x88, 0xcb, 0xcf, 0x56, 0x04, 0x54, 0x64, 0x27, + 0x05, 0x63, 0x01, 0x77, 0xcb, 0xa6, 0x08, 0xbd }, + { 0xd6, 0x35, 0x99, 0x4f, 0x62, 0x91, 0x51, 0x7b, + 0x02, 0x81, 0xff, 0xdd, 0x49, 0x6a, 0xfa, 0x86, + 0x27, 0x12, 0xe5, 0xb3, 0xc4, 0xe5, 0x2e, 0x4c, + 0xd5, 0xfd, 0xae, 0x8c, 0x0e, 0x72, 0xfb, 0x08 }, + { 0x87, 0x8d, 0x9c, 0xa6, 0x00, 0xcf, 0x87, 0xe7, + 0x69, 0xcc, 0x30, 0x5c, 0x1b, 0x35, 0x25, 0x51, + 0x86, 0x61, 0x5a, 0x73, 0xa0, 0xda, 0x61, 0x3b, + 0x5f, 0x1c, 0x98, 0xdb, 0xf8, 0x12, 0x83, 0xea }, + { 0xa6, 0x4e, 0xbe, 0x5d, 0xc1, 0x85, 0xde, 0x9f, + 0xdd, 0xe7, 0x60, 0x7b, 0x69, 0x98, 0x70, 0x2e, + 0xb2, 0x34, 0x56, 0x18, 0x49, 0x57, 0x30, 0x7d, + 0x2f, 0xa7, 0x2e, 0x87, 0xa4, 0x77, 0x02, 0xd6 }, + { 0xce, 0x50, 0xea, 0xb7, 0xb5, 0xeb, 0x52, 0xbd, + 0xc9, 0xad, 0x8e, 0x5a, 0x48, 0x0a, 0xb7, 0x80, + 0xca, 0x93, 0x20, 0xe4, 0x43, 0x60, 0xb1, 0xfe, + 0x37, 0xe0, 0x3f, 0x2f, 0x7a, 0xd7, 0xde, 0x01 }, + { 0xee, 0xdd, 0xb7, 0xc0, 0xdb, 0x6e, 0x30, 0xab, + 0xe6, 0x6d, 0x79, 0xe3, 0x27, 0x51, 0x1e, 0x61, + 0xfc, 0xeb, 0xbc, 0x29, 0xf1, 0x59, 0xb4, 0x0a, + 0x86, 0xb0, 0x46, 0xec, 0xf0, 0x51, 0x38, 0x23 }, + { 0x78, 0x7f, 0xc9, 0x34, 0x40, 0xc1, 0xec, 0x96, + 0xb5, 0xad, 0x01, 0xc1, 0x6c, 0xf7, 0x79, 0x16, + 0xa1, 0x40, 0x5f, 0x94, 0x26, 0x35, 0x6e, 0xc9, + 0x21, 0xd8, 0xdf, 0xf3, 0xea, 0x63, 0xb7, 0xe0 }, + { 0x7f, 0x0d, 0x5e, 0xab, 0x47, 0xee, 0xfd, 0xa6, + 0x96, 0xc0, 0xbf, 0x0f, 0xbf, 0x86, 0xab, 0x21, + 0x6f, 0xce, 0x46, 0x1e, 0x93, 0x03, 0xab, 0xa6, + 0xac, 0x37, 0x41, 0x20, 0xe8, 0x90, 0xe8, 0xdf }, + { 0xb6, 0x80, 0x04, 0xb4, 0x2f, 0x14, 0xad, 0x02, + 0x9f, 0x4c, 0x2e, 0x03, 0xb1, 0xd5, 0xeb, 0x76, + 0xd5, 0x71, 0x60, 0xe2, 0x64, 0x76, 0xd2, 0x11, + 0x31, 0xbe, 0xf2, 0x0a, 0xda, 0x7d, 0x27, 0xf4 }, + { 0xb0, 0xc4, 0xeb, 0x18, 0xae, 0x25, 0x0b, 0x51, + 0xa4, 0x13, 0x82, 0xea, 0xd9, 0x2d, 0x0d, 0xc7, + 0x45, 0x5f, 0x93, 0x79, 0xfc, 0x98, 0x84, 0x42, + 0x8e, 0x47, 0x70, 0x60, 0x8d, 0xb0, 0xfa, 0xec }, + { 0xf9, 0x2b, 0x7a, 0x87, 0x0c, 0x05, 0x9f, 0x4d, + 0x46, 0x46, 0x4c, 0x82, 0x4e, 0xc9, 0x63, 0x55, + 0x14, 0x0b, 0xdc, 0xe6, 0x81, 0x32, 0x2c, 0xc3, + 0xa9, 0x92, 0xff, 0x10, 0x3e, 0x3f, 0xea, 0x52 }, + { 0x53, 0x64, 0x31, 0x26, 0x14, 0x81, 0x33, 0x98, + 0xcc, 0x52, 0x5d, 0x4c, 0x4e, 0x14, 0x6e, 0xde, + 0xb3, 0x71, 0x26, 0x5f, 0xba, 0x19, 0x13, 0x3a, + 0x2c, 0x3d, 0x21, 0x59, 0x29, 0x8a, 0x17, 0x42 }, + { 0xf6, 0x62, 0x0e, 0x68, 0xd3, 0x7f, 0xb2, 0xaf, + 0x50, 0x00, 0xfc, 0x28, 0xe2, 0x3b, 0x83, 0x22, + 0x97, 0xec, 0xd8, 0xbc, 0xe9, 0x9e, 0x8b, 0xe4, + 0xd0, 0x4e, 0x85, 0x30, 0x9e, 0x3d, 0x33, 0x74 }, + { 0x53, 0x16, 0xa2, 0x79, 0x69, 0xd7, 0xfe, 0x04, + 0xff, 0x27, 0xb2, 0x83, 0x96, 0x1b, 0xff, 0xc3, + 0xbf, 0x5d, 0xfb, 0x32, 0xfb, 0x6a, 0x89, 0xd1, + 0x01, 0xc6, 0xc3, 0xb1, 0x93, 0x7c, 0x28, 0x71 }, + { 0x81, 0xd1, 0x66, 0x4f, 0xdf, 0x3c, 0xb3, 0x3c, + 0x24, 0xee, 0xba, 0xc0, 0xbd, 0x64, 0x24, 0x4b, + 0x77, 0xc4, 0xab, 0xea, 0x90, 0xbb, 0xe8, 0xb5, + 0xee, 0x0b, 0x2a, 0xaf, 0xcf, 0x2d, 0x6a, 0x53 }, + { 0x34, 0x57, 0x82, 0xf2, 0x95, 0xb0, 0x88, 0x03, + 0x52, 0xe9, 0x24, 0xa0, 0x46, 0x7b, 0x5f, 0xbc, + 0x3e, 0x8f, 0x3b, 0xfb, 0xc3, 0xc7, 0xe4, 0x8b, + 0x67, 0x09, 0x1f, 0xb5, 0xe8, 0x0a, 0x94, 0x42 }, + { 0x79, 0x41, 0x11, 0xea, 0x6c, 0xd6, 0x5e, 0x31, + 0x1f, 0x74, 0xee, 0x41, 0xd4, 0x76, 0xcb, 0x63, + 0x2c, 0xe1, 0xe4, 0xb0, 0x51, 0xdc, 0x1d, 0x9e, + 0x9d, 0x06, 0x1a, 0x19, 0xe1, 0xd0, 0xbb, 0x49 }, + { 0x2a, 0x85, 0xda, 0xf6, 0x13, 0x88, 0x16, 0xb9, + 0x9b, 0xf8, 0xd0, 0x8b, 0xa2, 0x11, 0x4b, 0x7a, + 0xb0, 0x79, 0x75, 0xa7, 0x84, 0x20, 0xc1, 0xa3, + 0xb0, 0x6a, 0x77, 0x7c, 0x22, 0xdd, 0x8b, 0xcb }, + { 0x89, 0xb0, 0xd5, 0xf2, 0x89, 0xec, 0x16, 0x40, + 0x1a, 0x06, 0x9a, 0x96, 0x0d, 0x0b, 0x09, 0x3e, + 0x62, 0x5d, 0xa3, 0xcf, 0x41, 0xee, 0x29, 0xb5, + 0x9b, 0x93, 0x0c, 0x58, 0x20, 0x14, 0x54, 0x55 }, + { 0xd0, 0xfd, 0xcb, 0x54, 0x39, 0x43, 0xfc, 0x27, + 0xd2, 0x08, 0x64, 0xf5, 0x21, 0x81, 0x47, 0x1b, + 0x94, 0x2c, 0xc7, 0x7c, 0xa6, 0x75, 0xbc, 0xb3, + 0x0d, 0xf3, 0x1d, 0x35, 0x8e, 0xf7, 0xb1, 0xeb }, + { 0xb1, 0x7e, 0xa8, 0xd7, 0x70, 0x63, 0xc7, 0x09, + 0xd4, 0xdc, 0x6b, 0x87, 0x94, 0x13, 0xc3, 0x43, + 0xe3, 0x79, 0x0e, 0x9e, 0x62, 0xca, 0x85, 0xb7, + 0x90, 0x0b, 0x08, 0x6f, 0x6b, 0x75, 0xc6, 0x72 }, + { 0xe7, 0x1a, 0x3e, 0x2c, 0x27, 0x4d, 0xb8, 0x42, + 0xd9, 0x21, 0x14, 0xf2, 0x17, 0xe2, 0xc0, 0xea, + 0xc8, 0xb4, 0x50, 0x93, 0xfd, 0xfd, 0x9d, 0xf4, + 0xca, 0x71, 0x62, 0x39, 0x48, 0x62, 0xd5, 0x01 }, + { 0xc0, 0x47, 0x67, 0x59, 0xab, 0x7a, 0xa3, 0x33, + 0x23, 0x4f, 0x6b, 0x44, 0xf5, 0xfd, 0x85, 0x83, + 0x90, 0xec, 0x23, 0x69, 0x4c, 0x62, 0x2c, 0xb9, + 0x86, 0xe7, 0x69, 0xc7, 0x8e, 0xdd, 0x73, 0x3e }, + { 0x9a, 0xb8, 0xea, 0xbb, 0x14, 0x16, 0x43, 0x4d, + 0x85, 0x39, 0x13, 0x41, 0xd5, 0x69, 0x93, 0xc5, + 0x54, 0x58, 0x16, 0x7d, 0x44, 0x18, 0xb1, 0x9a, + 0x0f, 0x2a, 0xd8, 0xb7, 0x9a, 0x83, 0xa7, 0x5b }, + { 0x79, 0x92, 0xd0, 0xbb, 0xb1, 0x5e, 0x23, 0x82, + 0x6f, 0x44, 0x3e, 0x00, 0x50, 0x5d, 0x68, 0xd3, + 0xed, 0x73, 0x72, 0x99, 0x5a, 0x5c, 0x3e, 0x49, + 0x86, 0x54, 0x10, 0x2f, 0xbc, 0xd0, 0x96, 0x4e }, + { 0xc0, 0x21, 0xb3, 0x00, 0x85, 0x15, 0x14, 0x35, + 0xdf, 0x33, 0xb0, 0x07, 0xcc, 0xec, 0xc6, 0x9d, + 0xf1, 0x26, 0x9f, 0x39, 0xba, 0x25, 0x09, 0x2b, + 0xed, 0x59, 0xd9, 0x32, 0xac, 0x0f, 0xdc, 0x28 }, + { 0x91, 0xa2, 0x5e, 0xc0, 0xec, 0x0d, 0x9a, 0x56, + 0x7f, 0x89, 0xc4, 0xbf, 0xe1, 0xa6, 0x5a, 0x0e, + 0x43, 0x2d, 0x07, 0x06, 0x4b, 0x41, 0x90, 0xe2, + 0x7d, 0xfb, 0x81, 0x90, 0x1f, 0xd3, 0x13, 0x9b }, + { 0x59, 0x50, 0xd3, 0x9a, 0x23, 0xe1, 0x54, 0x5f, + 0x30, 0x12, 0x70, 0xaa, 0x1a, 0x12, 0xf2, 0xe6, + 0xc4, 0x53, 0x77, 0x6e, 0x4d, 0x63, 0x55, 0xde, + 0x42, 0x5c, 0xc1, 0x53, 0xf9, 0x81, 0x88, 0x67 }, + { 0xd7, 0x9f, 0x14, 0x72, 0x0c, 0x61, 0x0a, 0xf1, + 0x79, 0xa3, 0x76, 0x5d, 0x4b, 0x7c, 0x09, 0x68, + 0xf9, 0x77, 0x96, 0x2d, 0xbf, 0x65, 0x5b, 0x52, + 0x12, 0x72, 0xb6, 0xf1, 0xe1, 0x94, 0x48, 0x8e }, + { 0xe9, 0x53, 0x1b, 0xfc, 0x8b, 0x02, 0x99, 0x5a, + 0xea, 0xa7, 0x5b, 0xa2, 0x70, 0x31, 0xfa, 0xdb, + 0xcb, 0xf4, 0xa0, 0xda, 0xb8, 0x96, 0x1d, 0x92, + 0x96, 0xcd, 0x7e, 0x84, 0xd2, 0x5d, 0x60, 0x06 }, + { 0x34, 0xe9, 0xc2, 0x6a, 0x01, 0xd7, 0xf1, 0x61, + 0x81, 0xb4, 0x54, 0xa9, 0xd1, 0x62, 0x3c, 0x23, + 0x3c, 0xb9, 0x9d, 0x31, 0xc6, 0x94, 0x65, 0x6e, + 0x94, 0x13, 0xac, 0xa3, 0xe9, 0x18, 0x69, 0x2f }, + { 0xd9, 0xd7, 0x42, 0x2f, 0x43, 0x7b, 0xd4, 0x39, + 0xdd, 0xd4, 0xd8, 0x83, 0xda, 0xe2, 0xa0, 0x83, + 0x50, 0x17, 0x34, 0x14, 0xbe, 0x78, 0x15, 0x51, + 0x33, 0xff, 0xf1, 0x96, 0x4c, 0x3d, 0x79, 0x72 }, + { 0x4a, 0xee, 0x0c, 0x7a, 0xaf, 0x07, 0x54, 0x14, + 0xff, 0x17, 0x93, 0xea, 0xd7, 0xea, 0xca, 0x60, + 0x17, 0x75, 0xc6, 0x15, 0xdb, 0xd6, 0x0b, 0x64, + 0x0b, 0x0a, 0x9f, 0x0c, 0xe5, 0x05, 0xd4, 0x35 }, + { 0x6b, 0xfd, 0xd1, 0x54, 0x59, 0xc8, 0x3b, 0x99, + 0xf0, 0x96, 0xbf, 0xb4, 0x9e, 0xe8, 0x7b, 0x06, + 0x3d, 0x69, 0xc1, 0x97, 0x4c, 0x69, 0x28, 0xac, + 0xfc, 0xfb, 0x40, 0x99, 0xf8, 0xc4, 0xef, 0x67 }, + { 0x9f, 0xd1, 0xc4, 0x08, 0xfd, 0x75, 0xc3, 0x36, + 0x19, 0x3a, 0x2a, 0x14, 0xd9, 0x4f, 0x6a, 0xf5, + 0xad, 0xf0, 0x50, 0xb8, 0x03, 0x87, 0xb4, 0xb0, + 0x10, 0xfb, 0x29, 0xf4, 0xcc, 0x72, 0x70, 0x7c }, + { 0x13, 0xc8, 0x84, 0x80, 0xa5, 0xd0, 0x0d, 0x6c, + 0x8c, 0x7a, 0xd2, 0x11, 0x0d, 0x76, 0xa8, 0x2d, + 0x9b, 0x70, 0xf4, 0xfa, 0x66, 0x96, 0xd4, 0xe5, + 0xdd, 0x42, 0xa0, 0x66, 0xdc, 0xaf, 0x99, 0x20 }, + { 0x82, 0x0e, 0x72, 0x5e, 0xe2, 0x5f, 0xe8, 0xfd, + 0x3a, 0x8d, 0x5a, 0xbe, 0x4c, 0x46, 0xc3, 0xba, + 0x88, 0x9d, 0xe6, 0xfa, 0x91, 0x91, 0xaa, 0x22, + 0xba, 0x67, 0xd5, 0x70, 0x54, 0x21, 0x54, 0x2b }, + { 0x32, 0xd9, 0x3a, 0x0e, 0xb0, 0x2f, 0x42, 0xfb, + 0xbc, 0xaf, 0x2b, 0xad, 0x00, 0x85, 0xb2, 0x82, + 0xe4, 0x60, 0x46, 0xa4, 0xdf, 0x7a, 0xd1, 0x06, + 0x57, 0xc9, 0xd6, 0x47, 0x63, 0x75, 0xb9, 0x3e }, + { 0xad, 0xc5, 0x18, 0x79, 0x05, 0xb1, 0x66, 0x9c, + 0xd8, 0xec, 0x9c, 0x72, 0x1e, 0x19, 0x53, 0x78, + 0x6b, 0x9d, 0x89, 0xa9, 0xba, 0xe3, 0x07, 0x80, + 0xf1, 0xe1, 0xea, 0xb2, 0x4a, 0x00, 0x52, 0x3c }, + { 0xe9, 0x07, 0x56, 0xff, 0x7f, 0x9a, 0xd8, 0x10, + 0xb2, 0x39, 0xa1, 0x0c, 0xed, 0x2c, 0xf9, 0xb2, + 0x28, 0x43, 0x54, 0xc1, 0xf8, 0xc7, 0xe0, 0xac, + 0xcc, 0x24, 0x61, 0xdc, 0x79, 0x6d, 0x6e, 0x89 }, + { 0x12, 0x51, 0xf7, 0x6e, 0x56, 0x97, 0x84, 0x81, + 0x87, 0x53, 0x59, 0x80, 0x1d, 0xb5, 0x89, 0xa0, + 0xb2, 0x2f, 0x86, 0xd8, 0xd6, 0x34, 0xdc, 0x04, + 0x50, 0x6f, 0x32, 0x2e, 0xd7, 0x8f, 0x17, 0xe8 }, + { 0x3a, 0xfa, 0x89, 0x9f, 0xd9, 0x80, 0xe7, 0x3e, + 0xcb, 0x7f, 0x4d, 0x8b, 0x8f, 0x29, 0x1d, 0xc9, + 0xaf, 0x79, 0x6b, 0xc6, 0x5d, 0x27, 0xf9, 0x74, + 0xc6, 0xf1, 0x93, 0xc9, 0x19, 0x1a, 0x09, 0xfd }, + { 0xaa, 0x30, 0x5b, 0xe2, 0x6e, 0x5d, 0xed, 0xdc, + 0x3c, 0x10, 0x10, 0xcb, 0xc2, 0x13, 0xf9, 0x5f, + 0x05, 0x1c, 0x78, 0x5c, 0x5b, 0x43, 0x1e, 0x6a, + 0x7c, 0xd0, 0x48, 0xf1, 0x61, 0x78, 0x75, 0x28 }, + { 0x8e, 0xa1, 0x88, 0x4f, 0xf3, 0x2e, 0x9d, 0x10, + 0xf0, 0x39, 0xb4, 0x07, 0xd0, 0xd4, 0x4e, 0x7e, + 0x67, 0x0a, 0xbd, 0x88, 0x4a, 0xee, 0xe0, 0xfb, + 0x75, 0x7a, 0xe9, 0x4e, 0xaa, 0x97, 0x37, 0x3d }, + { 0xd4, 0x82, 0xb2, 0x15, 0x5d, 0x4d, 0xec, 0x6b, + 0x47, 0x36, 0xa1, 0xf1, 0x61, 0x7b, 0x53, 0xaa, + 0xa3, 0x73, 0x10, 0x27, 0x7d, 0x3f, 0xef, 0x0c, + 0x37, 0xad, 0x41, 0x76, 0x8f, 0xc2, 0x35, 0xb4 }, + { 0x4d, 0x41, 0x39, 0x71, 0x38, 0x7e, 0x7a, 0x88, + 0x98, 0xa8, 0xdc, 0x2a, 0x27, 0x50, 0x07, 0x78, + 0x53, 0x9e, 0xa2, 0x14, 0xa2, 0xdf, 0xe9, 0xb3, + 0xd7, 0xe8, 0xeb, 0xdc, 0xe5, 0xcf, 0x3d, 0xb3 }, + { 0x69, 0x6e, 0x5d, 0x46, 0xe6, 0xc5, 0x7e, 0x87, + 0x96, 0xe4, 0x73, 0x5d, 0x08, 0x91, 0x6e, 0x0b, + 0x79, 0x29, 0xb3, 0xcf, 0x29, 0x8c, 0x29, 0x6d, + 0x22, 0xe9, 0xd3, 0x01, 0x96, 0x53, 0x37, 0x1c }, + { 0x1f, 0x56, 0x47, 0xc1, 0xd3, 0xb0, 0x88, 0x22, + 0x88, 0x85, 0x86, 0x5c, 0x89, 0x40, 0x90, 0x8b, + 0xf4, 0x0d, 0x1a, 0x82, 0x72, 0x82, 0x19, 0x73, + 0xb1, 0x60, 0x00, 0x8e, 0x7a, 0x3c, 0xe2, 0xeb }, + { 0xb6, 0xe7, 0x6c, 0x33, 0x0f, 0x02, 0x1a, 0x5b, + 0xda, 0x65, 0x87, 0x50, 0x10, 0xb0, 0xed, 0xf0, + 0x91, 0x26, 0xc0, 0xf5, 0x10, 0xea, 0x84, 0x90, + 0x48, 0x19, 0x20, 0x03, 0xae, 0xf4, 0xc6, 0x1c }, + { 0x3c, 0xd9, 0x52, 0xa0, 0xbe, 0xad, 0xa4, 0x1a, + 0xbb, 0x42, 0x4c, 0xe4, 0x7f, 0x94, 0xb4, 0x2b, + 0xe6, 0x4e, 0x1f, 0xfb, 0x0f, 0xd0, 0x78, 0x22, + 0x76, 0x80, 0x79, 0x46, 0xd0, 0xd0, 0xbc, 0x55 }, + { 0x98, 0xd9, 0x26, 0x77, 0x43, 0x9b, 0x41, 0xb7, + 0xbb, 0x51, 0x33, 0x12, 0xaf, 0xb9, 0x2b, 0xcc, + 0x8e, 0xe9, 0x68, 0xb2, 0xe3, 0xb2, 0x38, 0xce, + 0xcb, 0x9b, 0x0f, 0x34, 0xc9, 0xbb, 0x63, 0xd0 }, + { 0xec, 0xbc, 0xa2, 0xcf, 0x08, 0xae, 0x57, 0xd5, + 0x17, 0xad, 0x16, 0x15, 0x8a, 0x32, 0xbf, 0xa7, + 0xdc, 0x03, 0x82, 0xea, 0xed, 0xa1, 0x28, 0xe9, + 0x18, 0x86, 0x73, 0x4c, 0x24, 0xa0, 0xb2, 0x9d }, + { 0x94, 0x2c, 0xc7, 0xc0, 0xb5, 0x2e, 0x2b, 0x16, + 0xa4, 0xb8, 0x9f, 0xa4, 0xfc, 0x7e, 0x0b, 0xf6, + 0x09, 0xe2, 0x9a, 0x08, 0xc1, 0xa8, 0x54, 0x34, + 0x52, 0xb7, 0x7c, 0x7b, 0xfd, 0x11, 0xbb, 0x28 }, + { 0x8a, 0x06, 0x5d, 0x8b, 0x61, 0xa0, 0xdf, 0xfb, + 0x17, 0x0d, 0x56, 0x27, 0x73, 0x5a, 0x76, 0xb0, + 0xe9, 0x50, 0x60, 0x37, 0x80, 0x8c, 0xba, 0x16, + 0xc3, 0x45, 0x00, 0x7c, 0x9f, 0x79, 0xcf, 0x8f }, + { 0x1b, 0x9f, 0xa1, 0x97, 0x14, 0x65, 0x9c, 0x78, + 0xff, 0x41, 0x38, 0x71, 0x84, 0x92, 0x15, 0x36, + 0x10, 0x29, 0xac, 0x80, 0x2b, 0x1c, 0xbc, 0xd5, + 0x4e, 0x40, 0x8b, 0xd8, 0x72, 0x87, 0xf8, 0x1f }, + { 0x8d, 0xab, 0x07, 0x1b, 0xcd, 0x6c, 0x72, 0x92, + 0xa9, 0xef, 0x72, 0x7b, 0x4a, 0xe0, 0xd8, 0x67, + 0x13, 0x30, 0x1d, 0xa8, 0x61, 0x8d, 0x9a, 0x48, + 0xad, 0xce, 0x55, 0xf3, 0x03, 0xa8, 0x69, 0xa1 }, + { 0x82, 0x53, 0xe3, 0xe7, 0xc7, 0xb6, 0x84, 0xb9, + 0xcb, 0x2b, 0xeb, 0x01, 0x4c, 0xe3, 0x30, 0xff, + 0x3d, 0x99, 0xd1, 0x7a, 0xbb, 0xdb, 0xab, 0xe4, + 0xf4, 0xd6, 0x74, 0xde, 0xd5, 0x3f, 0xfc, 0x6b }, + { 0xf1, 0x95, 0xf3, 0x21, 0xe9, 0xe3, 0xd6, 0xbd, + 0x7d, 0x07, 0x45, 0x04, 0xdd, 0x2a, 0xb0, 0xe6, + 0x24, 0x1f, 0x92, 0xe7, 0x84, 0xb1, 0xaa, 0x27, + 0x1f, 0xf6, 0x48, 0xb1, 0xca, 0xb6, 0xd7, 0xf6 }, + { 0x27, 0xe4, 0xcc, 0x72, 0x09, 0x0f, 0x24, 0x12, + 0x66, 0x47, 0x6a, 0x7c, 0x09, 0x49, 0x5f, 0x2d, + 0xb1, 0x53, 0xd5, 0xbc, 0xbd, 0x76, 0x19, 0x03, + 0xef, 0x79, 0x27, 0x5e, 0xc5, 0x6b, 0x2e, 0xd8 }, + { 0x89, 0x9c, 0x24, 0x05, 0x78, 0x8e, 0x25, 0xb9, + 0x9a, 0x18, 0x46, 0x35, 0x5e, 0x64, 0x6d, 0x77, + 0xcf, 0x40, 0x00, 0x83, 0x41, 0x5f, 0x7d, 0xc5, + 0xaf, 0xe6, 0x9d, 0x6e, 0x17, 0xc0, 0x00, 0x23 }, + { 0xa5, 0x9b, 0x78, 0xc4, 0x90, 0x57, 0x44, 0x07, + 0x6b, 0xfe, 0xe8, 0x94, 0xde, 0x70, 0x7d, 0x4f, + 0x12, 0x0b, 0x5c, 0x68, 0x93, 0xea, 0x04, 0x00, + 0x29, 0x7d, 0x0b, 0xb8, 0x34, 0x72, 0x76, 0x32 }, + { 0x59, 0xdc, 0x78, 0xb1, 0x05, 0x64, 0x97, 0x07, + 0xa2, 0xbb, 0x44, 0x19, 0xc4, 0x8f, 0x00, 0x54, + 0x00, 0xd3, 0x97, 0x3d, 0xe3, 0x73, 0x66, 0x10, + 0x23, 0x04, 0x35, 0xb1, 0x04, 0x24, 0xb2, 0x4f }, + { 0xc0, 0x14, 0x9d, 0x1d, 0x7e, 0x7a, 0x63, 0x53, + 0xa6, 0xd9, 0x06, 0xef, 0xe7, 0x28, 0xf2, 0xf3, + 0x29, 0xfe, 0x14, 0xa4, 0x14, 0x9a, 0x3e, 0xa7, + 0x76, 0x09, 0xbc, 0x42, 0xb9, 0x75, 0xdd, 0xfa }, + { 0xa3, 0x2f, 0x24, 0x14, 0x74, 0xa6, 0xc1, 0x69, + 0x32, 0xe9, 0x24, 0x3b, 0xe0, 0xcf, 0x09, 0xbc, + 0xdc, 0x7e, 0x0c, 0xa0, 0xe7, 0xa6, 0xa1, 0xb9, + 0xb1, 0xa0, 0xf0, 0x1e, 0x41, 0x50, 0x23, 0x77 }, + { 0xb2, 0x39, 0xb2, 0xe4, 0xf8, 0x18, 0x41, 0x36, + 0x1c, 0x13, 0x39, 0xf6, 0x8e, 0x2c, 0x35, 0x9f, + 0x92, 0x9a, 0xf9, 0xad, 0x9f, 0x34, 0xe0, 0x1a, + 0xab, 0x46, 0x31, 0xad, 0x6d, 0x55, 0x00, 0xb0 }, + { 0x85, 0xfb, 0x41, 0x9c, 0x70, 0x02, 0xa3, 0xe0, + 0xb4, 0xb6, 0xea, 0x09, 0x3b, 0x4c, 0x1a, 0xc6, + 0x93, 0x66, 0x45, 0xb6, 0x5d, 0xac, 0x5a, 0xc1, + 0x5a, 0x85, 0x28, 0xb7, 0xb9, 0x4c, 0x17, 0x54 }, + { 0x96, 0x19, 0x72, 0x06, 0x25, 0xf1, 0x90, 0xb9, + 0x3a, 0x3f, 0xad, 0x18, 0x6a, 0xb3, 0x14, 0x18, + 0x96, 0x33, 0xc0, 0xd3, 0xa0, 0x1e, 0x6f, 0x9b, + 0xc8, 0xc4, 0xa8, 0xf8, 0x2f, 0x38, 0x3d, 0xbf }, + { 0x7d, 0x62, 0x0d, 0x90, 0xfe, 0x69, 0xfa, 0x46, + 0x9a, 0x65, 0x38, 0x38, 0x89, 0x70, 0xa1, 0xaa, + 0x09, 0xbb, 0x48, 0xa2, 0xd5, 0x9b, 0x34, 0x7b, + 0x97, 0xe8, 0xce, 0x71, 0xf4, 0x8c, 0x7f, 0x46 }, + { 0x29, 0x43, 0x83, 0x56, 0x85, 0x96, 0xfb, 0x37, + 0xc7, 0x5b, 0xba, 0xcd, 0x97, 0x9c, 0x5f, 0xf6, + 0xf2, 0x0a, 0x55, 0x6b, 0xf8, 0x87, 0x9c, 0xc7, + 0x29, 0x24, 0x85, 0x5d, 0xf9, 0xb8, 0x24, 0x0e }, + { 0x16, 0xb1, 0x8a, 0xb3, 0x14, 0x35, 0x9c, 0x2b, + 0x83, 0x3c, 0x1c, 0x69, 0x86, 0xd4, 0x8c, 0x55, + 0xa9, 0xfc, 0x97, 0xcd, 0xe9, 0xa3, 0xc1, 0xf1, + 0x0a, 0x31, 0x77, 0x14, 0x0f, 0x73, 0xf7, 0x38 }, + { 0x8c, 0xbb, 0xdd, 0x14, 0xbc, 0x33, 0xf0, 0x4c, + 0xf4, 0x58, 0x13, 0xe4, 0xa1, 0x53, 0xa2, 0x73, + 0xd3, 0x6a, 0xda, 0xd5, 0xce, 0x71, 0xf4, 0x99, + 0xee, 0xb8, 0x7f, 0xb8, 0xac, 0x63, 0xb7, 0x29 }, + { 0x69, 0xc9, 0xa4, 0x98, 0xdb, 0x17, 0x4e, 0xca, + 0xef, 0xcc, 0x5a, 0x3a, 0xc9, 0xfd, 0xed, 0xf0, + 0xf8, 0x13, 0xa5, 0xbe, 0xc7, 0x27, 0xf1, 0xe7, + 0x75, 0xba, 0xbd, 0xec, 0x77, 0x18, 0x81, 0x6e }, + { 0xb4, 0x62, 0xc3, 0xbe, 0x40, 0x44, 0x8f, 0x1d, + 0x4f, 0x80, 0x62, 0x62, 0x54, 0xe5, 0x35, 0xb0, + 0x8b, 0xc9, 0xcd, 0xcf, 0xf5, 0x99, 0xa7, 0x68, + 0x57, 0x8d, 0x4b, 0x28, 0x81, 0xa8, 0xe3, 0xf0 }, + { 0x55, 0x3e, 0x9d, 0x9c, 0x5f, 0x36, 0x0a, 0xc0, + 0xb7, 0x4a, 0x7d, 0x44, 0xe5, 0xa3, 0x91, 0xda, + 0xd4, 0xce, 0xd0, 0x3e, 0x0c, 0x24, 0x18, 0x3b, + 0x7e, 0x8e, 0xca, 0xbd, 0xf1, 0x71, 0x5a, 0x64 }, + { 0x7a, 0x7c, 0x55, 0xa5, 0x6f, 0xa9, 0xae, 0x51, + 0xe6, 0x55, 0xe0, 0x19, 0x75, 0xd8, 0xa6, 0xff, + 0x4a, 0xe9, 0xe4, 0xb4, 0x86, 0xfc, 0xbe, 0x4e, + 0xac, 0x04, 0x45, 0x88, 0xf2, 0x45, 0xeb, 0xea }, + { 0x2a, 0xfd, 0xf3, 0xc8, 0x2a, 0xbc, 0x48, 0x67, + 0xf5, 0xde, 0x11, 0x12, 0x86, 0xc2, 0xb3, 0xbe, + 0x7d, 0x6e, 0x48, 0x65, 0x7b, 0xa9, 0x23, 0xcf, + 0xbf, 0x10, 0x1a, 0x6d, 0xfc, 0xf9, 0xdb, 0x9a }, + { 0x41, 0x03, 0x7d, 0x2e, 0xdc, 0xdc, 0xe0, 0xc4, + 0x9b, 0x7f, 0xb4, 0xa6, 0xaa, 0x09, 0x99, 0xca, + 0x66, 0x97, 0x6c, 0x74, 0x83, 0xaf, 0xe6, 0x31, + 0xd4, 0xed, 0xa2, 0x83, 0x14, 0x4f, 0x6d, 0xfc }, + { 0xc4, 0x46, 0x6f, 0x84, 0x97, 0xca, 0x2e, 0xeb, + 0x45, 0x83, 0xa0, 0xb0, 0x8e, 0x9d, 0x9a, 0xc7, + 0x43, 0x95, 0x70, 0x9f, 0xda, 0x10, 0x9d, 0x24, + 0xf2, 0xe4, 0x46, 0x21, 0x96, 0x77, 0x9c, 0x5d }, + { 0x75, 0xf6, 0x09, 0x33, 0x8a, 0xa6, 0x7d, 0x96, + 0x9a, 0x2a, 0xe2, 0xa2, 0x36, 0x2b, 0x2d, 0xa9, + 0xd7, 0x7c, 0x69, 0x5d, 0xfd, 0x1d, 0xf7, 0x22, + 0x4a, 0x69, 0x01, 0xdb, 0x93, 0x2c, 0x33, 0x64 }, + { 0x68, 0x60, 0x6c, 0xeb, 0x98, 0x9d, 0x54, 0x88, + 0xfc, 0x7c, 0xf6, 0x49, 0xf3, 0xd7, 0xc2, 0x72, + 0xef, 0x05, 0x5d, 0xa1, 0xa9, 0x3f, 0xae, 0xcd, + 0x55, 0xfe, 0x06, 0xf6, 0x96, 0x70, 0x98, 0xca }, + { 0x44, 0x34, 0x6b, 0xde, 0xb7, 0xe0, 0x52, 0xf6, + 0x25, 0x50, 0x48, 0xf0, 0xd9, 0xb4, 0x2c, 0x42, + 0x5b, 0xab, 0x9c, 0x3d, 0xd2, 0x41, 0x68, 0x21, + 0x2c, 0x3e, 0xcf, 0x1e, 0xbf, 0x34, 0xe6, 0xae }, + { 0x8e, 0x9c, 0xf6, 0xe1, 0xf3, 0x66, 0x47, 0x1f, + 0x2a, 0xc7, 0xd2, 0xee, 0x9b, 0x5e, 0x62, 0x66, + 0xfd, 0xa7, 0x1f, 0x8f, 0x2e, 0x41, 0x09, 0xf2, + 0x23, 0x7e, 0xd5, 0xf8, 0x81, 0x3f, 0xc7, 0x18 }, + { 0x84, 0xbb, 0xeb, 0x84, 0x06, 0xd2, 0x50, 0x95, + 0x1f, 0x8c, 0x1b, 0x3e, 0x86, 0xa7, 0xc0, 0x10, + 0x08, 0x29, 0x21, 0x83, 0x3d, 0xfd, 0x95, 0x55, + 0xa2, 0xf9, 0x09, 0xb1, 0x08, 0x6e, 0xb4, 0xb8 }, + { 0xee, 0x66, 0x6f, 0x3e, 0xef, 0x0f, 0x7e, 0x2a, + 0x9c, 0x22, 0x29, 0x58, 0xc9, 0x7e, 0xaf, 0x35, + 0xf5, 0x1c, 0xed, 0x39, 0x3d, 0x71, 0x44, 0x85, + 0xab, 0x09, 0xa0, 0x69, 0x34, 0x0f, 0xdf, 0x88 }, + { 0xc1, 0x53, 0xd3, 0x4a, 0x65, 0xc4, 0x7b, 0x4a, + 0x62, 0xc5, 0xca, 0xcf, 0x24, 0x01, 0x09, 0x75, + 0xd0, 0x35, 0x6b, 0x2f, 0x32, 0xc8, 0xf5, 0xda, + 0x53, 0x0d, 0x33, 0x88, 0x16, 0xad, 0x5d, 0xe6 }, + { 0x9f, 0xc5, 0x45, 0x01, 0x09, 0xe1, 0xb7, 0x79, + 0xf6, 0xc7, 0xae, 0x79, 0xd5, 0x6c, 0x27, 0x63, + 0x5c, 0x8d, 0xd4, 0x26, 0xc5, 0xa9, 0xd5, 0x4e, + 0x25, 0x78, 0xdb, 0x98, 0x9b, 0x8c, 0x3b, 0x4e }, + { 0xd1, 0x2b, 0xf3, 0x73, 0x2e, 0xf4, 0xaf, 0x5c, + 0x22, 0xfa, 0x90, 0x35, 0x6a, 0xf8, 0xfc, 0x50, + 0xfc, 0xb4, 0x0f, 0x8f, 0x2e, 0xa5, 0xc8, 0x59, + 0x47, 0x37, 0xa3, 0xb3, 0xd5, 0xab, 0xdb, 0xd7 }, + { 0x11, 0x03, 0x0b, 0x92, 0x89, 0xbb, 0xa5, 0xaf, + 0x65, 0x26, 0x06, 0x72, 0xab, 0x6f, 0xee, 0x88, + 0xb8, 0x74, 0x20, 0xac, 0xef, 0x4a, 0x17, 0x89, + 0xa2, 0x07, 0x3b, 0x7e, 0xc2, 0xf2, 0xa0, 0x9e }, + { 0x69, 0xcb, 0x19, 0x2b, 0x84, 0x44, 0x00, 0x5c, + 0x8c, 0x0c, 0xeb, 0x12, 0xc8, 0x46, 0x86, 0x07, + 0x68, 0x18, 0x8c, 0xda, 0x0a, 0xec, 0x27, 0xa9, + 0xc8, 0xa5, 0x5c, 0xde, 0xe2, 0x12, 0x36, 0x32 }, + { 0xdb, 0x44, 0x4c, 0x15, 0x59, 0x7b, 0x5f, 0x1a, + 0x03, 0xd1, 0xf9, 0xed, 0xd1, 0x6e, 0x4a, 0x9f, + 0x43, 0xa6, 0x67, 0xcc, 0x27, 0x51, 0x75, 0xdf, + 0xa2, 0xb7, 0x04, 0xe3, 0xbb, 0x1a, 0x9b, 0x83 }, + { 0x3f, 0xb7, 0x35, 0x06, 0x1a, 0xbc, 0x51, 0x9d, + 0xfe, 0x97, 0x9e, 0x54, 0xc1, 0xee, 0x5b, 0xfa, + 0xd0, 0xa9, 0xd8, 0x58, 0xb3, 0x31, 0x5b, 0xad, + 0x34, 0xbd, 0xe9, 0x99, 0xef, 0xd7, 0x24, 0xdd } +}; + +static bool __init blake2s_selftest(void) +{ + u8 key[BLAKE2S_KEY_SIZE]; + u8 buf[ARRAY_SIZE(blake2s_testvecs)]; + u8 hash[BLAKE2S_HASH_SIZE]; + size_t i; + bool success = true; + + for (i = 0; i < BLAKE2S_KEY_SIZE; ++i) + key[i] = (u8)i; + + for (i = 0; i < ARRAY_SIZE(blake2s_testvecs); ++i) + buf[i] = (u8)i; + + for (i = 0; i < ARRAY_SIZE(blake2s_keyed_testvecs); ++i) { + blake2s(hash, buf, key, BLAKE2S_HASH_SIZE, i, BLAKE2S_KEY_SIZE); + if (memcmp(hash, blake2s_keyed_testvecs[i], BLAKE2S_HASH_SIZE)) { + pr_err("blake2s keyed self-test %zu: FAIL\n", i + 1); + success = false; + } + } + + for (i = 0; i < ARRAY_SIZE(blake2s_testvecs); ++i) { + blake2s(hash, buf, NULL, BLAKE2S_HASH_SIZE, i, 0); + if (memcmp(hash, blake2s_testvecs[i], BLAKE2S_HASH_SIZE)) { + pr_err("blake2s unkeyed self-test %zu: FAIL\n", i + i); + success = false; + } + } + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/chacha20.c b/net/wireguard/crypto/zinc/selftest/chacha20.c new file mode 100644 index 000000000000..1a2390aaf6c2 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/chacha20.c @@ -0,0 +1,2698 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +struct chacha20_testvec { + const u8 *input, *output, *key; + u64 nonce; + size_t ilen; +}; + +struct hchacha20_testvec { + u8 key[HCHACHA20_KEY_SIZE]; + u8 nonce[HCHACHA20_NONCE_SIZE]; + u8 output[CHACHA20_KEY_SIZE]; +}; + +/* These test vectors are generated by reference implementations and are + * designed to check chacha20 implementation block handling, as well as from + * the draft-arciszewski-xchacha-01 document. + */ + +static const u8 input01[] __initconst = { }; +static const u8 output01[] __initconst = { }; +static const u8 key01[] __initconst = { + 0x09, 0xf4, 0xe8, 0x57, 0x10, 0xf2, 0x12, 0xc3, + 0xc6, 0x91, 0xc4, 0x09, 0x97, 0x46, 0xef, 0xfe, + 0x02, 0x00, 0xe4, 0x5c, 0x82, 0xed, 0x16, 0xf3, + 0x32, 0xbe, 0xec, 0x7a, 0xe6, 0x68, 0x12, 0x26 +}; +enum { nonce01 = 0x3834e2afca3c66d3ULL }; + +static const u8 input02[] __initconst = { + 0x9d +}; +static const u8 output02[] __initconst = { + 0x94 +}; +static const u8 key02[] __initconst = { + 0x8c, 0x01, 0xac, 0xaf, 0x62, 0x63, 0x56, 0x7a, + 0xad, 0x23, 0x4c, 0x58, 0x29, 0x29, 0xbe, 0xab, + 0xe9, 0xf8, 0xdf, 0x6c, 0x8c, 0x74, 0x4d, 0x7d, + 0x13, 0x94, 0x10, 0x02, 0x3d, 0x8e, 0x9f, 0x94 +}; +enum { nonce02 = 0x5d1b3bfdedd9f73aULL }; + +static const u8 input03[] __initconst = { + 0x04, 0x16 +}; +static const u8 output03[] __initconst = { + 0x92, 0x07 +}; +static const u8 key03[] __initconst = { + 0x22, 0x0c, 0x79, 0x2c, 0x38, 0x51, 0xbe, 0x99, + 0xa9, 0x59, 0x24, 0x50, 0xef, 0x87, 0x38, 0xa6, + 0xa0, 0x97, 0x20, 0xcb, 0xb4, 0x0c, 0x94, 0x67, + 0x1f, 0x98, 0xdc, 0xc4, 0x83, 0xbc, 0x35, 0x4d +}; +enum { nonce03 = 0x7a3353ad720a3e2eULL }; + +static const u8 input04[] __initconst = { + 0xc7, 0xcc, 0xd0 +}; +static const u8 output04[] __initconst = { + 0xd8, 0x41, 0x80 +}; +static const u8 key04[] __initconst = { + 0x81, 0x5e, 0x12, 0x01, 0xc4, 0x36, 0x15, 0x03, + 0x11, 0xa0, 0xe9, 0x86, 0xbb, 0x5a, 0xdc, 0x45, + 0x7d, 0x5e, 0x98, 0xf8, 0x06, 0x76, 0x1c, 0xec, + 0xc0, 0xf7, 0xca, 0x4e, 0x99, 0xd9, 0x42, 0x38 +}; +enum { nonce04 = 0x6816e2fc66176da2ULL }; + +static const u8 input05[] __initconst = { + 0x48, 0xf1, 0x31, 0x5f +}; +static const u8 output05[] __initconst = { + 0x48, 0xf7, 0x13, 0x67 +}; +static const u8 key05[] __initconst = { + 0x3f, 0xd6, 0xb6, 0x5e, 0x2f, 0xda, 0x82, 0x39, + 0x97, 0x06, 0xd3, 0x62, 0x4f, 0xbd, 0xcb, 0x9b, + 0x1d, 0xe6, 0x4a, 0x76, 0xab, 0xdd, 0x14, 0x50, + 0x59, 0x21, 0xe3, 0xb2, 0xc7, 0x95, 0xbc, 0x45 +}; +enum { nonce05 = 0xc41a7490e228cc42ULL }; + +static const u8 input06[] __initconst = { + 0xae, 0xa2, 0x85, 0x1d, 0xc8 +}; +static const u8 output06[] __initconst = { + 0xfa, 0xff, 0x45, 0x6b, 0x6f +}; +static const u8 key06[] __initconst = { + 0x04, 0x8d, 0xea, 0x67, 0x20, 0x78, 0xfb, 0x8f, + 0x49, 0x80, 0x35, 0xb5, 0x7b, 0xe4, 0x31, 0x74, + 0x57, 0x43, 0x3a, 0x64, 0x64, 0xb9, 0xe6, 0x23, + 0x4d, 0xfe, 0xb8, 0x7b, 0x71, 0x4d, 0x9d, 0x21 +}; +enum { nonce06 = 0x251366db50b10903ULL }; + +static const u8 input07[] __initconst = { + 0x1a, 0x32, 0x85, 0xb6, 0xe8, 0x52 +}; +static const u8 output07[] __initconst = { + 0xd3, 0x5f, 0xf0, 0x07, 0x69, 0xec +}; +static const u8 key07[] __initconst = { + 0xbf, 0x2d, 0x42, 0x99, 0x97, 0x76, 0x04, 0xad, + 0xd3, 0x8f, 0x6e, 0x6a, 0x34, 0x85, 0xaf, 0x81, + 0xef, 0x36, 0x33, 0xd5, 0x43, 0xa2, 0xaa, 0x08, + 0x0f, 0x77, 0x42, 0x83, 0x58, 0xc5, 0x42, 0x2a +}; +enum { nonce07 = 0xe0796da17dba9b58ULL }; + +static const u8 input08[] __initconst = { + 0x40, 0xae, 0xcd, 0xe4, 0x3d, 0x22, 0xe0 +}; +static const u8 output08[] __initconst = { + 0xfd, 0x8a, 0x9f, 0x3d, 0x05, 0xc9, 0xd3 +}; +static const u8 key08[] __initconst = { + 0xdc, 0x3f, 0x41, 0xe3, 0x23, 0x2a, 0x8d, 0xf6, + 0x41, 0x2a, 0xa7, 0x66, 0x05, 0x68, 0xe4, 0x7b, + 0xc4, 0x58, 0xd6, 0xcc, 0xdf, 0x0d, 0xc6, 0x25, + 0x1b, 0x61, 0x32, 0x12, 0x4e, 0xf1, 0xe6, 0x29 +}; +enum { nonce08 = 0xb1d2536d9e159832ULL }; + +static const u8 input09[] __initconst = { + 0xba, 0x1d, 0x14, 0x16, 0x9f, 0x83, 0x67, 0x24 +}; +static const u8 output09[] __initconst = { + 0x7c, 0xe3, 0x78, 0x1d, 0xa2, 0xe7, 0xe9, 0x39 +}; +static const u8 key09[] __initconst = { + 0x17, 0x55, 0x90, 0x52, 0xa4, 0xce, 0x12, 0xae, + 0xd4, 0xfd, 0xd4, 0xfb, 0xd5, 0x18, 0x59, 0x50, + 0x4e, 0x51, 0x99, 0x32, 0x09, 0x31, 0xfc, 0xf7, + 0x27, 0x10, 0x8e, 0xa2, 0x4b, 0xa5, 0xf5, 0x62 +}; +enum { nonce09 = 0x495fc269536d003ULL }; + +static const u8 input10[] __initconst = { + 0x09, 0xfd, 0x3c, 0x0b, 0x3d, 0x0e, 0xf3, 0x9d, + 0x27 +}; +static const u8 output10[] __initconst = { + 0xdc, 0xe4, 0x33, 0x60, 0x0c, 0x07, 0xcb, 0x51, + 0x6b +}; +static const u8 key10[] __initconst = { + 0x4e, 0x00, 0x72, 0x37, 0x0f, 0x52, 0x4d, 0x6f, + 0x37, 0x50, 0x3c, 0xb3, 0x51, 0x81, 0x49, 0x16, + 0x7e, 0xfd, 0xb1, 0x51, 0x72, 0x2e, 0xe4, 0x16, + 0x68, 0x5c, 0x5b, 0x8a, 0xc3, 0x90, 0x70, 0x04 +}; +enum { nonce10 = 0x1ad9d1114d88cbbdULL }; + +static const u8 input11[] __initconst = { + 0x70, 0x18, 0x52, 0x85, 0xba, 0x66, 0xff, 0x2c, + 0x9a, 0x46 +}; +static const u8 output11[] __initconst = { + 0xf5, 0x2a, 0x7a, 0xfd, 0x31, 0x7c, 0x91, 0x41, + 0xb1, 0xcf +}; +static const u8 key11[] __initconst = { + 0x48, 0xb4, 0xd0, 0x7c, 0x88, 0xd1, 0x96, 0x0d, + 0x80, 0x33, 0xb4, 0xd5, 0x31, 0x9a, 0x88, 0xca, + 0x14, 0xdc, 0xf0, 0xa8, 0xf3, 0xac, 0xb8, 0x47, + 0x75, 0x86, 0x7c, 0x88, 0x50, 0x11, 0x43, 0x40 +}; +enum { nonce11 = 0x47c35dd1f4f8aa4fULL }; + +static const u8 input12[] __initconst = { + 0x9e, 0x8e, 0x3d, 0x2a, 0x05, 0xfd, 0xe4, 0x90, + 0x24, 0x1c, 0xd3 +}; +static const u8 output12[] __initconst = { + 0x97, 0x72, 0x40, 0x9f, 0xc0, 0x6b, 0x05, 0x33, + 0x42, 0x7e, 0x28 +}; +static const u8 key12[] __initconst = { + 0xee, 0xff, 0x33, 0x33, 0xe0, 0x28, 0xdf, 0xa2, + 0xb6, 0x5e, 0x25, 0x09, 0x52, 0xde, 0xa5, 0x9c, + 0x8f, 0x95, 0xa9, 0x03, 0x77, 0x0f, 0xbe, 0xa1, + 0xd0, 0x7d, 0x73, 0x2f, 0xf8, 0x7e, 0x51, 0x44 +}; +enum { nonce12 = 0xc22d044dc6ea4af3ULL }; + +static const u8 input13[] __initconst = { + 0x9c, 0x16, 0xa2, 0x22, 0x4d, 0xbe, 0x04, 0x9a, + 0xb3, 0xb5, 0xc6, 0x58 +}; +static const u8 output13[] __initconst = { + 0xf0, 0x81, 0xdb, 0x6d, 0xa3, 0xe9, 0xb2, 0xc6, + 0x32, 0x50, 0x16, 0x9f +}; +static const u8 key13[] __initconst = { + 0x96, 0xb3, 0x01, 0xd2, 0x7a, 0x8c, 0x94, 0x09, + 0x4f, 0x58, 0xbe, 0x80, 0xcc, 0xa9, 0x7e, 0x2d, + 0xad, 0x58, 0x3b, 0x63, 0xb8, 0x5c, 0x17, 0xce, + 0xbf, 0x43, 0x33, 0x7a, 0x7b, 0x82, 0x28, 0x2f +}; +enum { nonce13 = 0x2a5d05d88cd7b0daULL }; + +static const u8 input14[] __initconst = { + 0x57, 0x4f, 0xaa, 0x30, 0xe6, 0x23, 0x50, 0x86, + 0x91, 0xa5, 0x60, 0x96, 0x2b +}; +static const u8 output14[] __initconst = { + 0x6c, 0x1f, 0x3b, 0x42, 0xb6, 0x2f, 0xf0, 0xbd, + 0x76, 0x60, 0xc7, 0x7e, 0x8d +}; +static const u8 key14[] __initconst = { + 0x22, 0x85, 0xaf, 0x8f, 0xa3, 0x53, 0xa0, 0xc4, + 0xb5, 0x75, 0xc0, 0xba, 0x30, 0x92, 0xc3, 0x32, + 0x20, 0x5a, 0x8f, 0x7e, 0x93, 0xda, 0x65, 0x18, + 0xd1, 0xf6, 0x9a, 0x9b, 0x8f, 0x85, 0x30, 0xe6 +}; +enum { nonce14 = 0xf9946c166aa4475fULL }; + +static const u8 input15[] __initconst = { + 0x89, 0x81, 0xc7, 0xe2, 0x00, 0xac, 0x52, 0x70, + 0xa4, 0x79, 0xab, 0xeb, 0x74, 0xf7 +}; +static const u8 output15[] __initconst = { + 0xb4, 0xd0, 0xa9, 0x9d, 0x15, 0x5f, 0x48, 0xd6, + 0x00, 0x7e, 0x4c, 0x77, 0x5a, 0x46 +}; +static const u8 key15[] __initconst = { + 0x0a, 0x66, 0x36, 0xca, 0x5d, 0x82, 0x23, 0xb6, + 0xe4, 0x9b, 0xad, 0x5e, 0xd0, 0x7f, 0xf6, 0x7a, + 0x7b, 0x03, 0xa7, 0x4c, 0xfd, 0xec, 0xd5, 0xa1, + 0xfc, 0x25, 0x54, 0xda, 0x5a, 0x5c, 0xf0, 0x2c +}; +enum { nonce15 = 0x9ab2b87a35e772c8ULL }; + +static const u8 input16[] __initconst = { + 0x5f, 0x09, 0xc0, 0x8b, 0x1e, 0xde, 0xca, 0xd9, + 0xb7, 0x5c, 0x23, 0xc9, 0x55, 0x1e, 0xcf +}; +static const u8 output16[] __initconst = { + 0x76, 0x9b, 0x53, 0xf3, 0x66, 0x88, 0x28, 0x60, + 0x98, 0x80, 0x2c, 0xa8, 0x80, 0xa6, 0x48 +}; +static const u8 key16[] __initconst = { + 0x80, 0xb5, 0x51, 0xdf, 0x17, 0x5b, 0xb0, 0xef, + 0x8b, 0x5b, 0x2e, 0x3e, 0xc5, 0xe3, 0xa5, 0x86, + 0xac, 0x0d, 0x8e, 0x32, 0x90, 0x9d, 0x82, 0x27, + 0xf1, 0x23, 0x26, 0xc3, 0xea, 0x55, 0xb6, 0x63 +}; +enum { nonce16 = 0xa82e9d39e4d02ef5ULL }; + +static const u8 input17[] __initconst = { + 0x87, 0x0b, 0x36, 0x71, 0x7c, 0xb9, 0x0b, 0x80, + 0x4d, 0x77, 0x5c, 0x4f, 0xf5, 0x51, 0x0e, 0x1a +}; +static const u8 output17[] __initconst = { + 0xf1, 0x12, 0x4a, 0x8a, 0xd9, 0xd0, 0x08, 0x67, + 0x66, 0xd7, 0x34, 0xea, 0x32, 0x3b, 0x54, 0x0e +}; +static const u8 key17[] __initconst = { + 0xfb, 0x71, 0x5f, 0x3f, 0x7a, 0xc0, 0x9a, 0xc8, + 0xc8, 0xcf, 0xe8, 0xbc, 0xfb, 0x09, 0xbf, 0x89, + 0x6a, 0xef, 0xd5, 0xe5, 0x36, 0x87, 0x14, 0x76, + 0x00, 0xb9, 0x32, 0x28, 0xb2, 0x00, 0x42, 0x53 +}; +enum { nonce17 = 0x229b87e73d557b96ULL }; + +static const u8 input18[] __initconst = { + 0x38, 0x42, 0xb5, 0x37, 0xb4, 0x3d, 0xfe, 0x59, + 0x38, 0x68, 0x88, 0xfa, 0x89, 0x8a, 0x5f, 0x90, + 0x3c +}; +static const u8 output18[] __initconst = { + 0xac, 0xad, 0x14, 0xe8, 0x7e, 0xd7, 0xce, 0x96, + 0x3d, 0xb3, 0x78, 0x85, 0x22, 0x5a, 0xcb, 0x39, + 0xd4 +}; +static const u8 key18[] __initconst = { + 0xe1, 0xc1, 0xa8, 0xe0, 0x91, 0xe7, 0x38, 0x66, + 0x80, 0x17, 0x12, 0x3c, 0x5e, 0x2d, 0xbb, 0xea, + 0xeb, 0x6c, 0x8b, 0xc8, 0x1b, 0x6f, 0x7c, 0xea, + 0x50, 0x57, 0x23, 0x1e, 0x65, 0x6f, 0x6d, 0x81 +}; +enum { nonce18 = 0xfaf5fcf8f30e57a9ULL }; + +static const u8 input19[] __initconst = { + 0x1c, 0x4a, 0x30, 0x26, 0xef, 0x9a, 0x32, 0xa7, + 0x8f, 0xe5, 0xc0, 0x0f, 0x30, 0x3a, 0xbf, 0x38, + 0x54, 0xba +}; +static const u8 output19[] __initconst = { + 0x57, 0x67, 0x54, 0x4f, 0x31, 0xd6, 0xef, 0x35, + 0x0b, 0xd9, 0x52, 0xa7, 0x46, 0x7d, 0x12, 0x17, + 0x1e, 0xe3 +}; +static const u8 key19[] __initconst = { + 0x5a, 0x79, 0xc1, 0xea, 0x33, 0xb3, 0xc7, 0x21, + 0xec, 0xf8, 0xcb, 0xd2, 0x58, 0x96, 0x23, 0xd6, + 0x4d, 0xed, 0x2f, 0xdf, 0x8a, 0x79, 0xe6, 0x8b, + 0x38, 0xa3, 0xc3, 0x7a, 0x33, 0xda, 0x02, 0xc7 +}; +enum { nonce19 = 0x2b23b61840429604ULL }; + +static const u8 input20[] __initconst = { + 0xab, 0xe9, 0x32, 0xbb, 0x35, 0x17, 0xe0, 0x60, + 0x80, 0xb1, 0x27, 0xdc, 0xe6, 0x62, 0x9e, 0x0c, + 0x77, 0xf4, 0x50 +}; +static const u8 output20[] __initconst = { + 0x54, 0x6d, 0xaa, 0xfc, 0x08, 0xfb, 0x71, 0xa8, + 0xd6, 0x1d, 0x7d, 0xf3, 0x45, 0x10, 0xb5, 0x4c, + 0xcc, 0x4b, 0x45 +}; +static const u8 key20[] __initconst = { + 0xa3, 0xfd, 0x3d, 0xa9, 0xeb, 0xea, 0x2c, 0x69, + 0xcf, 0x59, 0x38, 0x13, 0x5b, 0xa7, 0x53, 0x8f, + 0x5e, 0xa2, 0x33, 0x86, 0x4c, 0x75, 0x26, 0xaf, + 0x35, 0x12, 0x09, 0x71, 0x81, 0xea, 0x88, 0x66 +}; +enum { nonce20 = 0x7459667a8fadff58ULL }; + +static const u8 input21[] __initconst = { + 0xa6, 0x82, 0x21, 0x23, 0xad, 0x27, 0x3f, 0xc6, + 0xd7, 0x16, 0x0d, 0x6d, 0x24, 0x15, 0x54, 0xc5, + 0x96, 0x72, 0x59, 0x8a +}; +static const u8 output21[] __initconst = { + 0x5f, 0x34, 0x32, 0xea, 0x06, 0xd4, 0x9e, 0x01, + 0xdc, 0x32, 0x32, 0x40, 0x66, 0x73, 0x6d, 0x4a, + 0x6b, 0x12, 0x20, 0xe8 +}; +static const u8 key21[] __initconst = { + 0x96, 0xfd, 0x13, 0x23, 0xa9, 0x89, 0x04, 0xe6, + 0x31, 0xa5, 0x2c, 0xc1, 0x40, 0xd5, 0x69, 0x5c, + 0x32, 0x79, 0x56, 0xe0, 0x29, 0x93, 0x8f, 0xe8, + 0x5f, 0x65, 0x53, 0x7f, 0xc1, 0xe9, 0xaf, 0xaf +}; +enum { nonce21 = 0xba8defee9d8e13b5ULL }; + +static const u8 input22[] __initconst = { + 0xb8, 0x32, 0x1a, 0x81, 0xd8, 0x38, 0x89, 0x5a, + 0xb0, 0x05, 0xbe, 0xf4, 0xd2, 0x08, 0xc6, 0xee, + 0x79, 0x7b, 0x3a, 0x76, 0x59 +}; +static const u8 output22[] __initconst = { + 0xb7, 0xba, 0xae, 0x80, 0xe4, 0x9f, 0x79, 0x84, + 0x5a, 0x48, 0x50, 0x6d, 0xcb, 0xd0, 0x06, 0x0c, + 0x15, 0x63, 0xa7, 0x5e, 0xbd +}; +static const u8 key22[] __initconst = { + 0x0f, 0x35, 0x3d, 0xeb, 0x5f, 0x0a, 0x82, 0x0d, + 0x24, 0x59, 0x71, 0xd8, 0xe6, 0x2d, 0x5f, 0xe1, + 0x7e, 0x0c, 0xae, 0xf6, 0xdc, 0x2c, 0xc5, 0x4a, + 0x38, 0x88, 0xf2, 0xde, 0xd9, 0x5f, 0x76, 0x7c +}; +enum { nonce22 = 0xe77f1760e9f5e192ULL }; + +static const u8 input23[] __initconst = { + 0x4b, 0x1e, 0x79, 0x99, 0xcf, 0xef, 0x64, 0x4b, + 0xb0, 0x66, 0xae, 0x99, 0x2e, 0x68, 0x97, 0xf5, + 0x5d, 0x9b, 0x3f, 0x7a, 0xa9, 0xd9 +}; +static const u8 output23[] __initconst = { + 0x5f, 0xa4, 0x08, 0x39, 0xca, 0xfa, 0x2b, 0x83, + 0x5d, 0x95, 0x70, 0x7c, 0x2e, 0xd4, 0xae, 0xfa, + 0x45, 0x4a, 0x77, 0x7f, 0xa7, 0x65 +}; +static const u8 key23[] __initconst = { + 0x4a, 0x06, 0x83, 0x64, 0xaa, 0xe3, 0x38, 0x32, + 0x28, 0x5d, 0xa4, 0xb2, 0x5a, 0xee, 0xcf, 0x8e, + 0x19, 0x67, 0xf1, 0x09, 0xe8, 0xc9, 0xf6, 0x40, + 0x02, 0x6d, 0x0b, 0xde, 0xfa, 0x81, 0x03, 0xb1 +}; +enum { nonce23 = 0x9b3f349158709849ULL }; + +static const u8 input24[] __initconst = { + 0xc6, 0xfc, 0x47, 0x5e, 0xd8, 0xed, 0xa9, 0xe5, + 0x4f, 0x82, 0x79, 0x35, 0xee, 0x3e, 0x7e, 0x3e, + 0x35, 0x70, 0x6e, 0xfa, 0x6d, 0x08, 0xe8 +}; +static const u8 output24[] __initconst = { + 0x3b, 0xc5, 0xf8, 0xc2, 0xbf, 0x2b, 0x90, 0x33, + 0xa6, 0xae, 0xf5, 0x5a, 0x65, 0xb3, 0x3d, 0xe1, + 0xcd, 0x5f, 0x55, 0xfa, 0xe7, 0xa5, 0x4a +}; +static const u8 key24[] __initconst = { + 0x00, 0x24, 0xc3, 0x65, 0x5f, 0xe6, 0x31, 0xbb, + 0x6d, 0xfc, 0x20, 0x7b, 0x1b, 0xa8, 0x96, 0x26, + 0x55, 0x21, 0x62, 0x25, 0x7e, 0xba, 0x23, 0x97, + 0xc9, 0xb8, 0x53, 0xa8, 0xef, 0xab, 0xad, 0x61 +}; +enum { nonce24 = 0x13ee0b8f526177c3ULL }; + +static const u8 input25[] __initconst = { + 0x33, 0x07, 0x16, 0xb1, 0x34, 0x33, 0x67, 0x04, + 0x9b, 0x0a, 0xce, 0x1b, 0xe9, 0xde, 0x1a, 0xec, + 0xd0, 0x55, 0xfb, 0xc6, 0x33, 0xaf, 0x2d, 0xe3 +}; +static const u8 output25[] __initconst = { + 0x05, 0x93, 0x10, 0xd1, 0x58, 0x6f, 0x68, 0x62, + 0x45, 0xdb, 0x91, 0xae, 0x70, 0xcf, 0xd4, 0x5f, + 0xee, 0xdf, 0xd5, 0xba, 0x9e, 0xde, 0x68, 0xe6 +}; +static const u8 key25[] __initconst = { + 0x83, 0xa9, 0x4f, 0x5d, 0x74, 0xd5, 0x91, 0xb3, + 0xc9, 0x97, 0x19, 0x15, 0xdb, 0x0d, 0x0b, 0x4a, + 0x3d, 0x55, 0xcf, 0xab, 0xb2, 0x05, 0x21, 0x35, + 0x45, 0x50, 0xeb, 0xf8, 0xf5, 0xbf, 0x36, 0x35 +}; +enum { nonce25 = 0x7c6f459e49ebfebcULL }; + +static const u8 input26[] __initconst = { + 0xc2, 0xd4, 0x7a, 0xa3, 0x92, 0xe1, 0xac, 0x46, + 0x1a, 0x15, 0x38, 0xc9, 0xb5, 0xfd, 0xdf, 0x84, + 0x38, 0xbc, 0x6b, 0x1d, 0xb0, 0x83, 0x43, 0x04, + 0x39 +}; +static const u8 output26[] __initconst = { + 0x7f, 0xde, 0xd6, 0x87, 0xcc, 0x34, 0xf4, 0x12, + 0xae, 0x55, 0xa5, 0x89, 0x95, 0x29, 0xfc, 0x18, + 0xd8, 0xc7, 0x7c, 0xd3, 0xcb, 0x85, 0x95, 0x21, + 0xd2 +}; +static const u8 key26[] __initconst = { + 0xe4, 0xd0, 0x54, 0x1d, 0x7d, 0x47, 0xa8, 0xc1, + 0x08, 0xca, 0xe2, 0x42, 0x52, 0x95, 0x16, 0x43, + 0xa3, 0x01, 0x23, 0x03, 0xcc, 0x3b, 0x81, 0x78, + 0x23, 0xcc, 0xa7, 0x36, 0xd7, 0xa0, 0x97, 0x8d +}; +enum { nonce26 = 0x524401012231683ULL }; + +static const u8 input27[] __initconst = { + 0x0d, 0xb0, 0xcf, 0xec, 0xfc, 0x38, 0x9d, 0x9d, + 0x89, 0x00, 0x96, 0xf2, 0x79, 0x8a, 0xa1, 0x8d, + 0x32, 0x5e, 0xc6, 0x12, 0x22, 0xec, 0xf6, 0x52, + 0xc1, 0x0b +}; +static const u8 output27[] __initconst = { + 0xef, 0xe1, 0xf2, 0x67, 0x8e, 0x2c, 0x00, 0x9f, + 0x1d, 0x4c, 0x66, 0x1f, 0x94, 0x58, 0xdc, 0xbb, + 0xb9, 0x11, 0x8f, 0x74, 0xfd, 0x0e, 0x14, 0x01, + 0xa8, 0x21 +}; +static const u8 key27[] __initconst = { + 0x78, 0x71, 0xa4, 0xe6, 0xb2, 0x95, 0x44, 0x12, + 0x81, 0xaa, 0x7e, 0x94, 0xa7, 0x8d, 0x44, 0xea, + 0xc4, 0xbc, 0x01, 0xb7, 0x9e, 0xf7, 0x82, 0x9e, + 0x3b, 0x23, 0x9f, 0x31, 0xdd, 0xb8, 0x0d, 0x18 +}; +enum { nonce27 = 0xd58fe0e58fb254d6ULL }; + +static const u8 input28[] __initconst = { + 0xaa, 0xb7, 0xaa, 0xd9, 0xa8, 0x91, 0xd7, 0x8a, + 0x97, 0x9b, 0xdb, 0x7c, 0x47, 0x2b, 0xdb, 0xd2, + 0xda, 0x77, 0xb1, 0xfa, 0x2d, 0x12, 0xe3, 0xe9, + 0xc4, 0x7f, 0x54 +}; +static const u8 output28[] __initconst = { + 0x87, 0x84, 0xa9, 0xa6, 0xad, 0x8f, 0xe6, 0x0f, + 0x69, 0xf8, 0x21, 0xc3, 0x54, 0x95, 0x0f, 0xb0, + 0x4e, 0xc7, 0x02, 0xe4, 0x04, 0xb0, 0x6c, 0x42, + 0x8c, 0x63, 0xe3 +}; +static const u8 key28[] __initconst = { + 0x12, 0x23, 0x37, 0x95, 0x04, 0xb4, 0x21, 0xe8, + 0xbc, 0x65, 0x46, 0x7a, 0xf4, 0x01, 0x05, 0x3f, + 0xb1, 0x34, 0x73, 0xd2, 0x49, 0xbf, 0x6f, 0x20, + 0xbd, 0x23, 0x58, 0x5f, 0xd1, 0x73, 0x57, 0xa6 +}; +enum { nonce28 = 0x3a04d51491eb4e07ULL }; + +static const u8 input29[] __initconst = { + 0x55, 0xd0, 0xd4, 0x4b, 0x17, 0xc8, 0xc4, 0x2b, + 0xc0, 0x28, 0xbd, 0x9d, 0x65, 0x4d, 0xaf, 0x77, + 0x72, 0x7c, 0x36, 0x68, 0xa7, 0xb6, 0x87, 0x4d, + 0xb9, 0x27, 0x25, 0x6c +}; +static const u8 output29[] __initconst = { + 0x0e, 0xac, 0x4c, 0xf5, 0x12, 0xb5, 0x56, 0xa5, + 0x00, 0x9a, 0xd6, 0xe5, 0x1a, 0x59, 0x2c, 0xf6, + 0x42, 0x22, 0xcf, 0x23, 0x98, 0x34, 0x29, 0xac, + 0x6e, 0xe3, 0x37, 0x6d +}; +static const u8 key29[] __initconst = { + 0xda, 0x9d, 0x05, 0x0c, 0x0c, 0xba, 0x75, 0xb9, + 0x9e, 0xb1, 0x8d, 0xd9, 0x73, 0x26, 0x2c, 0xa9, + 0x3a, 0xb5, 0xcb, 0x19, 0x49, 0xa7, 0x4f, 0xf7, + 0x64, 0x35, 0x23, 0x20, 0x2a, 0x45, 0x78, 0xc7 +}; +enum { nonce29 = 0xc25ac9982431cbfULL }; + +static const u8 input30[] __initconst = { + 0x4e, 0xd6, 0x85, 0xbb, 0xe7, 0x99, 0xfa, 0x04, + 0x33, 0x24, 0xfd, 0x75, 0x18, 0xe3, 0xd3, 0x25, + 0xcd, 0xca, 0xae, 0x00, 0xbe, 0x52, 0x56, 0x4a, + 0x31, 0xe9, 0x4f, 0xae, 0x8a +}; +static const u8 output30[] __initconst = { + 0x30, 0x36, 0x32, 0xa2, 0x3c, 0xb6, 0xf9, 0xf9, + 0x76, 0x70, 0xad, 0xa6, 0x10, 0x41, 0x00, 0x4a, + 0xfa, 0xce, 0x1b, 0x86, 0x05, 0xdb, 0x77, 0x96, + 0xb3, 0xb7, 0x8f, 0x61, 0x24 +}; +static const u8 key30[] __initconst = { + 0x49, 0x35, 0x4c, 0x15, 0x98, 0xfb, 0xc6, 0x57, + 0x62, 0x6d, 0x06, 0xc3, 0xd4, 0x79, 0x20, 0x96, + 0x05, 0x2a, 0x31, 0x63, 0xc0, 0x44, 0x42, 0x09, + 0x13, 0x13, 0xff, 0x1b, 0xc8, 0x63, 0x1f, 0x0b +}; +enum { nonce30 = 0x4967f9c08e41568bULL }; + +static const u8 input31[] __initconst = { + 0x91, 0x04, 0x20, 0x47, 0x59, 0xee, 0xa6, 0x0f, + 0x04, 0x75, 0xc8, 0x18, 0x95, 0x44, 0x01, 0x28, + 0x20, 0x6f, 0x73, 0x68, 0x66, 0xb5, 0x03, 0xb3, + 0x58, 0x27, 0x6e, 0x7a, 0x76, 0xb8 +}; +static const u8 output31[] __initconst = { + 0xe8, 0x03, 0x78, 0x9d, 0x13, 0x15, 0x98, 0xef, + 0x64, 0x68, 0x12, 0x41, 0xb0, 0x29, 0x94, 0x0c, + 0x83, 0x35, 0x46, 0xa9, 0x74, 0xe1, 0x75, 0xf0, + 0xb6, 0x96, 0xc3, 0x6f, 0xd7, 0x70 +}; +static const u8 key31[] __initconst = { + 0xef, 0xcd, 0x5a, 0x4a, 0xf4, 0x7e, 0x6a, 0x3a, + 0x11, 0x88, 0x72, 0x94, 0xb8, 0xae, 0x84, 0xc3, + 0x66, 0xe0, 0xde, 0x4b, 0x00, 0xa5, 0xd6, 0x2d, + 0x50, 0xb7, 0x28, 0xff, 0x76, 0x57, 0x18, 0x1f +}; +enum { nonce31 = 0xcb6f428fa4192e19ULL }; + +static const u8 input32[] __initconst = { + 0x90, 0x06, 0x50, 0x4b, 0x98, 0x14, 0x30, 0xf1, + 0xb8, 0xd7, 0xf0, 0xa4, 0x3e, 0x4e, 0xd8, 0x00, + 0xea, 0xdb, 0x4f, 0x93, 0x05, 0xef, 0x02, 0x71, + 0x1a, 0xcd, 0xa3, 0xb1, 0xae, 0xd3, 0x18 +}; +static const u8 output32[] __initconst = { + 0xcb, 0x4a, 0x37, 0x3f, 0xea, 0x40, 0xab, 0x86, + 0xfe, 0xcc, 0x07, 0xd5, 0xdc, 0xb2, 0x25, 0xb6, + 0xfd, 0x2a, 0x72, 0xbc, 0x5e, 0xd4, 0x75, 0xff, + 0x71, 0xfc, 0xce, 0x1e, 0x6f, 0x22, 0xc1 +}; +static const u8 key32[] __initconst = { + 0xfc, 0x6d, 0xc3, 0x80, 0xce, 0xa4, 0x31, 0xa1, + 0xcc, 0xfa, 0x9d, 0x10, 0x0b, 0xc9, 0x11, 0x77, + 0x34, 0xdb, 0xad, 0x1b, 0xc4, 0xfc, 0xeb, 0x79, + 0x91, 0xda, 0x59, 0x3b, 0x0d, 0xb1, 0x19, 0x3b +}; +enum { nonce32 = 0x88551bf050059467ULL }; + +static const u8 input33[] __initconst = { + 0x88, 0x94, 0x71, 0x92, 0xe8, 0xd7, 0xf9, 0xbd, + 0x55, 0xe3, 0x22, 0xdb, 0x99, 0x51, 0xfb, 0x50, + 0xbf, 0x82, 0xb5, 0x70, 0x8b, 0x2b, 0x6a, 0x03, + 0x37, 0xa0, 0xc6, 0x19, 0x5d, 0xc9, 0xbc, 0xcc +}; +static const u8 output33[] __initconst = { + 0xb6, 0x17, 0x51, 0xc8, 0xea, 0x8a, 0x14, 0xdc, + 0x23, 0x1b, 0xd4, 0xed, 0xbf, 0x50, 0xb9, 0x38, + 0x00, 0xc2, 0x3f, 0x78, 0x3d, 0xbf, 0xa0, 0x84, + 0xef, 0x45, 0xb2, 0x7d, 0x48, 0x7b, 0x62, 0xa7 +}; +static const u8 key33[] __initconst = { + 0xb9, 0x8f, 0x6a, 0xad, 0xb4, 0x6f, 0xb5, 0xdc, + 0x48, 0xfa, 0x43, 0x57, 0x62, 0x97, 0xef, 0x89, + 0x4c, 0x5a, 0x7b, 0x67, 0xb8, 0x9d, 0xf0, 0x42, + 0x2b, 0x8f, 0xf3, 0x18, 0x05, 0x2e, 0x48, 0xd0 +}; +enum { nonce33 = 0x31f16488fe8447f5ULL }; + +static const u8 input34[] __initconst = { + 0xda, 0x2b, 0x3d, 0x63, 0x9e, 0x4f, 0xc2, 0xb8, + 0x7f, 0xc2, 0x1a, 0x8b, 0x0d, 0x95, 0x65, 0x55, + 0x52, 0xba, 0x51, 0x51, 0xc0, 0x61, 0x9f, 0x0a, + 0x5d, 0xb0, 0x59, 0x8c, 0x64, 0x6a, 0xab, 0xf5, + 0x57 +}; +static const u8 output34[] __initconst = { + 0x5c, 0xf6, 0x62, 0x24, 0x8c, 0x45, 0xa3, 0x26, + 0xd0, 0xe4, 0x88, 0x1c, 0xed, 0xc4, 0x26, 0x58, + 0xb5, 0x5d, 0x92, 0xc4, 0x17, 0x44, 0x1c, 0xb8, + 0x2c, 0xf3, 0x55, 0x7e, 0xd6, 0xe5, 0xb3, 0x65, + 0xa8 +}; +static const u8 key34[] __initconst = { + 0xde, 0xd1, 0x27, 0xb7, 0x7c, 0xfa, 0xa6, 0x78, + 0x39, 0x80, 0xdf, 0xb7, 0x46, 0xac, 0x71, 0x26, + 0xd0, 0x2a, 0x56, 0x79, 0x12, 0xeb, 0x26, 0x37, + 0x01, 0x0d, 0x30, 0xe0, 0xe3, 0x66, 0xb2, 0xf4 +}; +enum { nonce34 = 0x92d0d9b252c24149ULL }; + +static const u8 input35[] __initconst = { + 0x3a, 0x15, 0x5b, 0x75, 0x6e, 0xd0, 0x52, 0x20, + 0x6c, 0x82, 0xfa, 0xce, 0x5b, 0xea, 0xf5, 0x43, + 0xc1, 0x81, 0x7c, 0xb2, 0xac, 0x16, 0x3f, 0xd3, + 0x5a, 0xaf, 0x55, 0x98, 0xf4, 0xc6, 0xba, 0x71, + 0x25, 0x8b +}; +static const u8 output35[] __initconst = { + 0xb3, 0xaf, 0xac, 0x6d, 0x4d, 0xc7, 0x68, 0x56, + 0x50, 0x5b, 0x69, 0x2a, 0xe5, 0x90, 0xf9, 0x5f, + 0x99, 0x88, 0xff, 0x0c, 0xa6, 0xb1, 0x83, 0xd6, + 0x80, 0xa6, 0x1b, 0xde, 0x94, 0xa4, 0x2c, 0xc3, + 0x74, 0xfa +}; +static const u8 key35[] __initconst = { + 0xd8, 0x24, 0xe2, 0x06, 0xd7, 0x7a, 0xce, 0x81, + 0x52, 0x72, 0x02, 0x69, 0x89, 0xc4, 0xe9, 0x53, + 0x3b, 0x08, 0x5f, 0x98, 0x1e, 0x1b, 0x99, 0x6e, + 0x28, 0x17, 0x6d, 0xba, 0xc0, 0x96, 0xf9, 0x3c +}; +enum { nonce35 = 0x7baf968c4c8e3a37ULL }; + +static const u8 input36[] __initconst = { + 0x31, 0x5d, 0x4f, 0xe3, 0xac, 0xad, 0x17, 0xa6, + 0xb5, 0x01, 0xe2, 0xc6, 0xd4, 0x7e, 0xc4, 0x80, + 0xc0, 0x59, 0x72, 0xbb, 0x4b, 0x74, 0x6a, 0x41, + 0x0f, 0x9c, 0xf6, 0xca, 0x20, 0xb3, 0x73, 0x07, + 0x6b, 0x02, 0x2a +}; +static const u8 output36[] __initconst = { + 0xf9, 0x09, 0x92, 0x94, 0x7e, 0x31, 0xf7, 0x53, + 0xe8, 0x8a, 0x5b, 0x20, 0xef, 0x9b, 0x45, 0x81, + 0xba, 0x5e, 0x45, 0x63, 0xc1, 0xc7, 0x9e, 0x06, + 0x0e, 0xd9, 0x62, 0x8e, 0x96, 0xf9, 0xfa, 0x43, + 0x4d, 0xd4, 0x28 +}; +static const u8 key36[] __initconst = { + 0x13, 0x30, 0x4c, 0x06, 0xae, 0x18, 0xde, 0x03, + 0x1d, 0x02, 0x40, 0xf5, 0xbb, 0x19, 0xe3, 0x88, + 0x41, 0xb1, 0x29, 0x15, 0x97, 0xc2, 0x69, 0x3f, + 0x32, 0x2a, 0x0c, 0x8b, 0xcf, 0x83, 0x8b, 0x6c +}; +enum { nonce36 = 0x226d251d475075a0ULL }; + +static const u8 input37[] __initconst = { + 0x10, 0x18, 0xbe, 0xfd, 0x66, 0xc9, 0x77, 0xcc, + 0x43, 0xe5, 0x46, 0x0b, 0x08, 0x8b, 0xae, 0x11, + 0x86, 0x15, 0xc2, 0xf6, 0x45, 0xd4, 0x5f, 0xd6, + 0xb6, 0x5f, 0x9f, 0x3e, 0x97, 0xb7, 0xd4, 0xad, + 0x0b, 0xe8, 0x31, 0x94 +}; +static const u8 output37[] __initconst = { + 0x03, 0x2c, 0x1c, 0xee, 0xc6, 0xdd, 0xed, 0x38, + 0x80, 0x6d, 0x84, 0x16, 0xc3, 0xc2, 0x04, 0x63, + 0xcd, 0xa7, 0x6e, 0x36, 0x8b, 0xed, 0x78, 0x63, + 0x95, 0xfc, 0x69, 0x7a, 0x3f, 0x8d, 0x75, 0x6b, + 0x6c, 0x26, 0x56, 0x4d +}; +static const u8 key37[] __initconst = { + 0xac, 0x84, 0x4d, 0xa9, 0x29, 0x49, 0x3c, 0x39, + 0x7f, 0xd9, 0xa6, 0x01, 0xf3, 0x7e, 0xfa, 0x4a, + 0x14, 0x80, 0x22, 0x74, 0xf0, 0x29, 0x30, 0x2d, + 0x07, 0x21, 0xda, 0xc0, 0x4d, 0x70, 0x56, 0xa2 +}; +enum { nonce37 = 0x167823ce3b64925aULL }; + +static const u8 input38[] __initconst = { + 0x30, 0x8f, 0xfa, 0x24, 0x29, 0xb1, 0xfb, 0xce, + 0x31, 0x62, 0xdc, 0xd0, 0x46, 0xab, 0xe1, 0x31, + 0xd9, 0xae, 0x60, 0x0d, 0xca, 0x0a, 0x49, 0x12, + 0x3d, 0x92, 0xe9, 0x91, 0x67, 0x12, 0x62, 0x18, + 0x89, 0xe2, 0xf9, 0x1c, 0xcc +}; +static const u8 output38[] __initconst = { + 0x56, 0x9c, 0xc8, 0x7a, 0xc5, 0x98, 0xa3, 0x0f, + 0xba, 0xd5, 0x3e, 0xe1, 0xc9, 0x33, 0x64, 0x33, + 0xf0, 0xd5, 0xf7, 0x43, 0x66, 0x0e, 0x08, 0x9a, + 0x6e, 0x09, 0xe4, 0x01, 0x0d, 0x1e, 0x2f, 0x4b, + 0xed, 0x9c, 0x08, 0x8c, 0x03 +}; +static const u8 key38[] __initconst = { + 0x77, 0x52, 0x2a, 0x23, 0xf1, 0xc5, 0x96, 0x2b, + 0x89, 0x4f, 0x3e, 0xf3, 0xff, 0x0e, 0x94, 0xce, + 0xf1, 0xbd, 0x53, 0xf5, 0x77, 0xd6, 0x9e, 0x47, + 0x49, 0x3d, 0x16, 0x64, 0xff, 0x95, 0x42, 0x42 +}; +enum { nonce38 = 0xff629d7b82cef357ULL }; + +static const u8 input39[] __initconst = { + 0x38, 0x26, 0x27, 0xd0, 0xc2, 0xf5, 0x34, 0xba, + 0xda, 0x0f, 0x1c, 0x1c, 0x9a, 0x70, 0xe5, 0x8a, + 0x78, 0x2d, 0x8f, 0x9a, 0xbf, 0x89, 0x6a, 0xfd, + 0xd4, 0x9c, 0x33, 0xf1, 0xb6, 0x89, 0x16, 0xe3, + 0x6a, 0x00, 0xfa, 0x3a, 0x0f, 0x26 +}; +static const u8 output39[] __initconst = { + 0x0f, 0xaf, 0x91, 0x6d, 0x9c, 0x99, 0xa4, 0xf7, + 0x3b, 0x9d, 0x9a, 0x98, 0xca, 0xbb, 0x50, 0x48, + 0xee, 0xcb, 0x5d, 0xa1, 0x37, 0x2d, 0x36, 0x09, + 0x2a, 0xe2, 0x1c, 0x3d, 0x98, 0x40, 0x1c, 0x16, + 0x56, 0xa7, 0x98, 0xe9, 0x7d, 0x2b +}; +static const u8 key39[] __initconst = { + 0x6e, 0x83, 0x15, 0x4d, 0xf8, 0x78, 0xa8, 0x0e, + 0x71, 0x37, 0xd4, 0x6e, 0x28, 0x5c, 0x06, 0xa1, + 0x2d, 0x6c, 0x72, 0x7a, 0xfd, 0xf8, 0x65, 0x1a, + 0xb8, 0xe6, 0x29, 0x7b, 0xe5, 0xb3, 0x23, 0x79 +}; +enum { nonce39 = 0xa4d8c491cf093e9dULL }; + +static const u8 input40[] __initconst = { + 0x8f, 0x32, 0x7c, 0x40, 0x37, 0x95, 0x08, 0x00, + 0x00, 0xfe, 0x2f, 0x95, 0x20, 0x12, 0x40, 0x18, + 0x5e, 0x7e, 0x5e, 0x99, 0xee, 0x8d, 0x91, 0x7d, + 0x50, 0x7d, 0x21, 0x45, 0x27, 0xe1, 0x7f, 0xd4, + 0x73, 0x10, 0xe1, 0x33, 0xbc, 0xf8, 0xdd +}; +static const u8 output40[] __initconst = { + 0x78, 0x7c, 0xdc, 0x55, 0x2b, 0xd9, 0x2b, 0x3a, + 0xdd, 0x56, 0x11, 0x52, 0xd3, 0x2e, 0xe0, 0x0d, + 0x23, 0x20, 0x8a, 0xf1, 0x4f, 0xee, 0xf1, 0x68, + 0xf6, 0xdc, 0x53, 0xcf, 0x17, 0xd4, 0xf0, 0x6c, + 0xdc, 0x80, 0x5f, 0x1c, 0xa4, 0x91, 0x05 +}; +static const u8 key40[] __initconst = { + 0x0d, 0x86, 0xbf, 0x8a, 0xba, 0x9e, 0x39, 0x91, + 0xa8, 0xe7, 0x22, 0xf0, 0x0c, 0x43, 0x18, 0xe4, + 0x1f, 0xb0, 0xaf, 0x8a, 0x34, 0x31, 0xf4, 0x41, + 0xf0, 0x89, 0x85, 0xca, 0x5d, 0x05, 0x3b, 0x94 +}; +enum { nonce40 = 0xae7acc4f5986439eULL }; + +static const u8 input41[] __initconst = { + 0x20, 0x5f, 0xc1, 0x83, 0x36, 0x02, 0x76, 0x96, + 0xf0, 0xbf, 0x8e, 0x0e, 0x1a, 0xd1, 0xc7, 0x88, + 0x18, 0xc7, 0x09, 0xc4, 0x15, 0xd9, 0x4f, 0x5e, + 0x1f, 0xb3, 0xb4, 0x6d, 0xcb, 0xa0, 0xd6, 0x8a, + 0x3b, 0x40, 0x8e, 0x80, 0xf1, 0xe8, 0x8f, 0x5f +}; +static const u8 output41[] __initconst = { + 0x0b, 0xd1, 0x49, 0x9a, 0x9d, 0xe8, 0x97, 0xb8, + 0xd1, 0xeb, 0x90, 0x62, 0x37, 0xd2, 0x99, 0x15, + 0x67, 0x6d, 0x27, 0x93, 0xce, 0x37, 0x65, 0xa2, + 0x94, 0x88, 0xd6, 0x17, 0xbc, 0x1c, 0x6e, 0xa2, + 0xcc, 0xfb, 0x81, 0x0e, 0x30, 0x60, 0x5a, 0x6f +}; +static const u8 key41[] __initconst = { + 0x36, 0x27, 0x57, 0x01, 0x21, 0x68, 0x97, 0xc7, + 0x00, 0x67, 0x7b, 0xe9, 0x0f, 0x55, 0x49, 0xbb, + 0x92, 0x18, 0x98, 0xf5, 0x5e, 0xbc, 0xe7, 0x5a, + 0x9d, 0x3d, 0xc7, 0xbd, 0x59, 0xec, 0x82, 0x8e +}; +enum { nonce41 = 0x5da05e4c8dfab464ULL }; + +static const u8 input42[] __initconst = { + 0xca, 0x30, 0xcd, 0x63, 0xf0, 0x2d, 0xf1, 0x03, + 0x4d, 0x0d, 0xf2, 0xf7, 0x6f, 0xae, 0xd6, 0x34, + 0xea, 0xf6, 0x13, 0xcf, 0x1c, 0xa0, 0xd0, 0xe8, + 0xa4, 0x78, 0x80, 0x3b, 0x1e, 0xa5, 0x32, 0x4c, + 0x73, 0x12, 0xd4, 0x6a, 0x94, 0xbc, 0xba, 0x80, + 0x5e +}; +static const u8 output42[] __initconst = { + 0xec, 0x3f, 0x18, 0x31, 0xc0, 0x7b, 0xb5, 0xe2, + 0xad, 0xf3, 0xec, 0xa0, 0x16, 0x9d, 0xef, 0xce, + 0x05, 0x65, 0x59, 0x9d, 0x5a, 0xca, 0x3e, 0x13, + 0xb9, 0x5d, 0x5d, 0xb5, 0xeb, 0xae, 0xc0, 0x87, + 0xbb, 0xfd, 0xe7, 0xe4, 0x89, 0x5b, 0xd2, 0x6c, + 0x56 +}; +static const u8 key42[] __initconst = { + 0x7c, 0x6b, 0x7e, 0x77, 0xcc, 0x8c, 0x1b, 0x03, + 0x8b, 0x2a, 0xb3, 0x7c, 0x5a, 0x73, 0xcc, 0xac, + 0xdd, 0x53, 0x54, 0x0c, 0x85, 0xed, 0xcd, 0x47, + 0x24, 0xc1, 0xb8, 0x9b, 0x2e, 0x41, 0x92, 0x36 +}; +enum { nonce42 = 0xe4d7348b09682c9cULL }; + +static const u8 input43[] __initconst = { + 0x52, 0xf2, 0x4b, 0x7c, 0xe5, 0x58, 0xe8, 0xd2, + 0xb7, 0xf3, 0xa1, 0x29, 0x68, 0xa2, 0x50, 0x50, + 0xae, 0x9c, 0x1b, 0xe2, 0x67, 0x77, 0xe2, 0xdb, + 0x85, 0x55, 0x7e, 0x84, 0x8a, 0x12, 0x3c, 0xb6, + 0x2e, 0xed, 0xd3, 0xec, 0x47, 0x68, 0xfa, 0x52, + 0x46, 0x9d +}; +static const u8 output43[] __initconst = { + 0x1b, 0xf0, 0x05, 0xe4, 0x1c, 0xd8, 0x74, 0x9a, + 0xf0, 0xee, 0x00, 0x54, 0xce, 0x02, 0x83, 0x15, + 0xfb, 0x23, 0x35, 0x78, 0xc3, 0xda, 0x98, 0xd8, + 0x9d, 0x1b, 0xb2, 0x51, 0x82, 0xb0, 0xff, 0xbe, + 0x05, 0xa9, 0xa4, 0x04, 0xba, 0xea, 0x4b, 0x73, + 0x47, 0x6e +}; +static const u8 key43[] __initconst = { + 0xeb, 0xec, 0x0e, 0xa1, 0x65, 0xe2, 0x99, 0x46, + 0xd8, 0x54, 0x8c, 0x4a, 0x93, 0xdf, 0x6d, 0xbf, + 0x93, 0x34, 0x94, 0x57, 0xc9, 0x12, 0x9d, 0x68, + 0x05, 0xc5, 0x05, 0xad, 0x5a, 0xc9, 0x2a, 0x3b +}; +enum { nonce43 = 0xe14f6a902b7827fULL }; + +static const u8 input44[] __initconst = { + 0x3e, 0x22, 0x3e, 0x8e, 0xcd, 0x18, 0xe2, 0xa3, + 0x8d, 0x8b, 0x38, 0xc3, 0x02, 0xa3, 0x31, 0x48, + 0xc6, 0x0e, 0xec, 0x99, 0x51, 0x11, 0x6d, 0x8b, + 0x32, 0x35, 0x3b, 0x08, 0x58, 0x76, 0x25, 0x30, + 0xe2, 0xfc, 0xa2, 0x46, 0x7d, 0x6e, 0x34, 0x87, + 0xac, 0x42, 0xbf +}; +static const u8 output44[] __initconst = { + 0x08, 0x92, 0x58, 0x02, 0x1a, 0xf4, 0x1f, 0x3d, + 0x38, 0x7b, 0x6b, 0xf6, 0x84, 0x07, 0xa3, 0x19, + 0x17, 0x2a, 0xed, 0x57, 0x1c, 0xf9, 0x55, 0x37, + 0x4e, 0xf4, 0x68, 0x68, 0x82, 0x02, 0x4f, 0xca, + 0x21, 0x00, 0xc6, 0x66, 0x79, 0x53, 0x19, 0xef, + 0x7f, 0xdd, 0x74 +}; +static const u8 key44[] __initconst = { + 0x73, 0xb6, 0x3e, 0xf4, 0x57, 0x52, 0xa6, 0x43, + 0x51, 0xd8, 0x25, 0x00, 0xdb, 0xb4, 0x52, 0x69, + 0xd6, 0x27, 0x49, 0xeb, 0x9b, 0xf1, 0x7b, 0xa0, + 0xd6, 0x7c, 0x9c, 0xd8, 0x95, 0x03, 0x69, 0x26 +}; +enum { nonce44 = 0xf5e6dc4f35ce24e5ULL }; + +static const u8 input45[] __initconst = { + 0x55, 0x76, 0xc0, 0xf1, 0x74, 0x03, 0x7a, 0x6d, + 0x14, 0xd8, 0x36, 0x2c, 0x9f, 0x9a, 0x59, 0x7a, + 0x2a, 0xf5, 0x77, 0x84, 0x70, 0x7c, 0x1d, 0x04, + 0x90, 0x45, 0xa4, 0xc1, 0x5e, 0xdd, 0x2e, 0x07, + 0x18, 0x34, 0xa6, 0x85, 0x56, 0x4f, 0x09, 0xaf, + 0x2f, 0x83, 0xe1, 0xc6 +}; +static const u8 output45[] __initconst = { + 0x22, 0x46, 0xe4, 0x0b, 0x3a, 0x55, 0xcc, 0x9b, + 0xf0, 0xc0, 0x53, 0xcd, 0x95, 0xc7, 0x57, 0x6c, + 0x77, 0x46, 0x41, 0x72, 0x07, 0xbf, 0xa8, 0xe5, + 0x68, 0x69, 0xd8, 0x1e, 0x45, 0xc1, 0xa2, 0x50, + 0xa5, 0xd1, 0x62, 0xc9, 0x5a, 0x7d, 0x08, 0x14, + 0xae, 0x44, 0x16, 0xb9 +}; +static const u8 key45[] __initconst = { + 0x41, 0xf3, 0x88, 0xb2, 0x51, 0x25, 0x47, 0x02, + 0x39, 0xe8, 0x15, 0x3a, 0x22, 0x78, 0x86, 0x0b, + 0xf9, 0x1e, 0x8d, 0x98, 0xb2, 0x22, 0x82, 0xac, + 0x42, 0x94, 0xde, 0x64, 0xf0, 0xfd, 0xb3, 0x6c +}; +enum { nonce45 = 0xf51a582daf4aa01aULL }; + +static const u8 input46[] __initconst = { + 0xf6, 0xff, 0x20, 0xf9, 0x26, 0x7e, 0x0f, 0xa8, + 0x6a, 0x45, 0x5a, 0x91, 0x73, 0xc4, 0x4c, 0x63, + 0xe5, 0x61, 0x59, 0xca, 0xec, 0xc0, 0x20, 0x35, + 0xbc, 0x9f, 0x58, 0x9c, 0x5e, 0xa1, 0x17, 0x46, + 0xcc, 0xab, 0x6e, 0xd0, 0x4f, 0x24, 0xeb, 0x05, + 0x4d, 0x40, 0x41, 0xe0, 0x9d +}; +static const u8 output46[] __initconst = { + 0x31, 0x6e, 0x63, 0x3f, 0x9c, 0xe6, 0xb1, 0xb7, + 0xef, 0x47, 0x46, 0xd7, 0xb1, 0x53, 0x42, 0x2f, + 0x2c, 0xc8, 0x01, 0xae, 0x8b, 0xec, 0x42, 0x2c, + 0x6b, 0x2c, 0x9c, 0xb2, 0xf0, 0x29, 0x06, 0xa5, + 0xcd, 0x7e, 0xc7, 0x3a, 0x38, 0x98, 0x8a, 0xde, + 0x03, 0x29, 0x14, 0x8f, 0xf9 +}; +static const u8 key46[] __initconst = { + 0xac, 0xa6, 0x44, 0x4a, 0x0d, 0x42, 0x10, 0xbc, + 0xd3, 0xc9, 0x8e, 0x9e, 0x71, 0xa3, 0x1c, 0x14, + 0x9d, 0x65, 0x0d, 0x49, 0x4d, 0x8c, 0xec, 0x46, + 0xe1, 0x41, 0xcd, 0xf5, 0xfc, 0x82, 0x75, 0x34 +}; +enum { nonce46 = 0x25f85182df84dec5ULL }; + +static const u8 input47[] __initconst = { + 0xa1, 0xd2, 0xf2, 0x52, 0x2f, 0x79, 0x50, 0xb2, + 0x42, 0x29, 0x5b, 0x44, 0x20, 0xf9, 0xbd, 0x85, + 0xb7, 0x65, 0x77, 0x86, 0xce, 0x3e, 0x1c, 0xe4, + 0x70, 0x80, 0xdd, 0x72, 0x07, 0x48, 0x0f, 0x84, + 0x0d, 0xfd, 0x97, 0xc0, 0xb7, 0x48, 0x9b, 0xb4, + 0xec, 0xff, 0x73, 0x14, 0x99, 0xe4 +}; +static const u8 output47[] __initconst = { + 0xe5, 0x3c, 0x78, 0x66, 0x31, 0x1e, 0xd6, 0xc4, + 0x9e, 0x71, 0xb3, 0xd7, 0xd5, 0xad, 0x84, 0xf2, + 0x78, 0x61, 0x77, 0xf8, 0x31, 0xf0, 0x13, 0xad, + 0x66, 0xf5, 0x31, 0x7d, 0xeb, 0xdf, 0xaf, 0xcb, + 0xac, 0x28, 0x6c, 0xc2, 0x9e, 0xe7, 0x78, 0xa2, + 0xa2, 0x58, 0xce, 0x84, 0x76, 0x70 +}; +static const u8 key47[] __initconst = { + 0x05, 0x7f, 0xc0, 0x7f, 0x37, 0x20, 0x71, 0x02, + 0x3a, 0xe7, 0x20, 0x5a, 0x0a, 0x8f, 0x79, 0x5a, + 0xfe, 0xbb, 0x43, 0x4d, 0x2f, 0xcb, 0xf6, 0x9e, + 0xa2, 0x97, 0x00, 0xad, 0x0d, 0x51, 0x7e, 0x17 +}; +enum { nonce47 = 0xae707c60f54de32bULL }; + +static const u8 input48[] __initconst = { + 0x80, 0x93, 0x77, 0x2e, 0x8d, 0xe8, 0xe6, 0xc1, + 0x27, 0xe6, 0xf2, 0x89, 0x5b, 0x33, 0x62, 0x18, + 0x80, 0x6e, 0x17, 0x22, 0x8e, 0x83, 0x31, 0x40, + 0x8f, 0xc9, 0x5c, 0x52, 0x6c, 0x0e, 0xa5, 0xe9, + 0x6c, 0x7f, 0xd4, 0x6a, 0x27, 0x56, 0x99, 0xce, + 0x8d, 0x37, 0x59, 0xaf, 0xc0, 0x0e, 0xe1 +}; +static const u8 output48[] __initconst = { + 0x02, 0xa4, 0x2e, 0x33, 0xb7, 0x7c, 0x2b, 0x9a, + 0x18, 0x5a, 0xba, 0x53, 0x38, 0xaf, 0x00, 0xeb, + 0xd8, 0x3d, 0x02, 0x77, 0x43, 0x45, 0x03, 0x91, + 0xe2, 0x5e, 0x4e, 0xeb, 0x50, 0xd5, 0x5b, 0xe0, + 0xf3, 0x33, 0xa7, 0xa2, 0xac, 0x07, 0x6f, 0xeb, + 0x3f, 0x6c, 0xcd, 0xf2, 0x6c, 0x61, 0x64 +}; +static const u8 key48[] __initconst = { + 0xf3, 0x79, 0xe7, 0xf8, 0x0e, 0x02, 0x05, 0x6b, + 0x83, 0x1a, 0xe7, 0x86, 0x6b, 0xe6, 0x8f, 0x3f, + 0xd3, 0xa3, 0xe4, 0x6e, 0x29, 0x06, 0xad, 0xbc, + 0xe8, 0x33, 0x56, 0x39, 0xdf, 0xb0, 0xe2, 0xfe +}; +enum { nonce48 = 0xd849b938c6569da0ULL }; + +static const u8 input49[] __initconst = { + 0x89, 0x3b, 0x88, 0x9e, 0x7b, 0x38, 0x16, 0x9f, + 0xa1, 0x28, 0xf6, 0xf5, 0x23, 0x74, 0x28, 0xb0, + 0xdf, 0x6c, 0x9e, 0x8a, 0x71, 0xaf, 0xed, 0x7a, + 0x39, 0x21, 0x57, 0x7d, 0x31, 0x6c, 0xee, 0x0d, + 0x11, 0x8d, 0x41, 0x9a, 0x5f, 0xb7, 0x27, 0x40, + 0x08, 0xad, 0xc6, 0xe0, 0x00, 0x43, 0x9e, 0xae +}; +static const u8 output49[] __initconst = { + 0x4d, 0xfd, 0xdb, 0x4c, 0x77, 0xc1, 0x05, 0x07, + 0x4d, 0x6d, 0x32, 0xcb, 0x2e, 0x0e, 0xff, 0x65, + 0xc9, 0x27, 0xeb, 0xa9, 0x46, 0x5b, 0xab, 0x06, + 0xe6, 0xb6, 0x5a, 0x1e, 0x00, 0xfb, 0xcf, 0xe4, + 0xb9, 0x71, 0x40, 0x10, 0xef, 0x12, 0x39, 0xf0, + 0xea, 0x40, 0xb8, 0x9a, 0xa2, 0x85, 0x38, 0x48 +}; +static const u8 key49[] __initconst = { + 0xe7, 0x10, 0x40, 0xd9, 0x66, 0xc0, 0xa8, 0x6d, + 0xa3, 0xcc, 0x8b, 0xdd, 0x93, 0xf2, 0x6e, 0xe0, + 0x90, 0x7f, 0xd0, 0xf4, 0x37, 0x0c, 0x8b, 0x9b, + 0x4c, 0x4d, 0xe6, 0xf2, 0x1f, 0xe9, 0x95, 0x24 +}; +enum { nonce49 = 0xf269817bdae01bc0ULL }; + +static const u8 input50[] __initconst = { + 0xda, 0x5b, 0x60, 0xcd, 0xed, 0x58, 0x8e, 0x7f, + 0xae, 0xdd, 0xc8, 0x2e, 0x16, 0x90, 0xea, 0x4b, + 0x0c, 0x74, 0x14, 0x35, 0xeb, 0xee, 0x2c, 0xff, + 0x46, 0x99, 0x97, 0x6e, 0xae, 0xa7, 0x8e, 0x6e, + 0x38, 0xfe, 0x63, 0xe7, 0x51, 0xd9, 0xaa, 0xce, + 0x7b, 0x1e, 0x7e, 0x5d, 0xc0, 0xe8, 0x10, 0x06, + 0x14 +}; +static const u8 output50[] __initconst = { + 0xe4, 0xe5, 0x86, 0x1b, 0x66, 0x19, 0xac, 0x49, + 0x1c, 0xbd, 0xee, 0x03, 0xaf, 0x11, 0xfc, 0x1f, + 0x6a, 0xd2, 0x50, 0x5c, 0xea, 0x2c, 0xa5, 0x75, + 0xfd, 0xb7, 0x0e, 0x80, 0x8f, 0xed, 0x3f, 0x31, + 0x47, 0xac, 0x67, 0x43, 0xb8, 0x2e, 0xb4, 0x81, + 0x6d, 0xe4, 0x1e, 0xb7, 0x8b, 0x0c, 0x53, 0xa9, + 0x26 +}; +static const u8 key50[] __initconst = { + 0xd7, 0xb2, 0x04, 0x76, 0x30, 0xcc, 0x38, 0x45, + 0xef, 0xdb, 0xc5, 0x86, 0x08, 0x61, 0xf0, 0xee, + 0x6d, 0xd8, 0x22, 0x04, 0x8c, 0xfb, 0xcb, 0x37, + 0xa6, 0xfb, 0x95, 0x22, 0xe1, 0x87, 0xb7, 0x6f +}; +enum { nonce50 = 0x3b44d09c45607d38ULL }; + +static const u8 input51[] __initconst = { + 0xa9, 0x41, 0x02, 0x4b, 0xd7, 0xd5, 0xd1, 0xf1, + 0x21, 0x55, 0xb2, 0x75, 0x6d, 0x77, 0x1b, 0x86, + 0xa9, 0xc8, 0x90, 0xfd, 0xed, 0x4a, 0x7b, 0x6c, + 0xb2, 0x5f, 0x9b, 0x5f, 0x16, 0xa1, 0x54, 0xdb, + 0xd6, 0x3f, 0x6a, 0x7f, 0x2e, 0x51, 0x9d, 0x49, + 0x5b, 0xa5, 0x0e, 0xf9, 0xfb, 0x2a, 0x38, 0xff, + 0x20, 0x8c +}; +static const u8 output51[] __initconst = { + 0x18, 0xf7, 0x88, 0xc1, 0x72, 0xfd, 0x90, 0x4b, + 0xa9, 0x2d, 0xdb, 0x47, 0xb0, 0xa5, 0xc4, 0x37, + 0x01, 0x95, 0xc4, 0xb1, 0xab, 0xc5, 0x5b, 0xcd, + 0xe1, 0x97, 0x78, 0x13, 0xde, 0x6a, 0xff, 0x36, + 0xce, 0xa4, 0x67, 0xc5, 0x4a, 0x45, 0x2b, 0xd9, + 0xff, 0x8f, 0x06, 0x7c, 0x63, 0xbb, 0x83, 0x17, + 0xb4, 0x6b +}; +static const u8 key51[] __initconst = { + 0x82, 0x1a, 0x79, 0xab, 0x9a, 0xb5, 0x49, 0x6a, + 0x30, 0x6b, 0x99, 0x19, 0x11, 0xc7, 0xa2, 0xf4, + 0xca, 0x55, 0xb9, 0xdd, 0xe7, 0x2f, 0xe7, 0xc1, + 0xdd, 0x27, 0xad, 0x80, 0xf2, 0x56, 0xad, 0xf3 +}; +enum { nonce51 = 0xe93aff94ca71a4a6ULL }; + +static const u8 input52[] __initconst = { + 0x89, 0xdd, 0xf3, 0xfa, 0xb6, 0xc1, 0xaa, 0x9a, + 0xc8, 0xad, 0x6b, 0x00, 0xa1, 0x65, 0xea, 0x14, + 0x55, 0x54, 0x31, 0x8f, 0xf0, 0x03, 0x84, 0x51, + 0x17, 0x1e, 0x0a, 0x93, 0x6e, 0x79, 0x96, 0xa3, + 0x2a, 0x85, 0x9c, 0x89, 0xf8, 0xd1, 0xe2, 0x15, + 0x95, 0x05, 0xf4, 0x43, 0x4d, 0x6b, 0xf0, 0x71, + 0x3b, 0x3e, 0xba +}; +static const u8 output52[] __initconst = { + 0x0c, 0x42, 0x6a, 0xb3, 0x66, 0x63, 0x5d, 0x2c, + 0x9f, 0x3d, 0xa6, 0x6e, 0xc7, 0x5f, 0x79, 0x2f, + 0x50, 0xe3, 0xd6, 0x07, 0x56, 0xa4, 0x2b, 0x2d, + 0x8d, 0x10, 0xc0, 0x6c, 0xa2, 0xfc, 0x97, 0xec, + 0x3f, 0x5c, 0x8d, 0x59, 0xbe, 0x84, 0xf1, 0x3e, + 0x38, 0x47, 0x4f, 0x75, 0x25, 0x66, 0x88, 0x14, + 0x03, 0xdd, 0xde +}; +static const u8 key52[] __initconst = { + 0x4f, 0xb0, 0x27, 0xb6, 0xdd, 0x24, 0x0c, 0xdb, + 0x6b, 0x71, 0x2e, 0xac, 0xfc, 0x3f, 0xa6, 0x48, + 0x5d, 0xd5, 0xff, 0x53, 0xb5, 0x62, 0xf1, 0xe0, + 0x93, 0xfe, 0x39, 0x4c, 0x9f, 0x03, 0x11, 0xa7 +}; +enum { nonce52 = 0xed8becec3bdf6f25ULL }; + +static const u8 input53[] __initconst = { + 0x68, 0xd1, 0xc7, 0x74, 0x44, 0x1c, 0x84, 0xde, + 0x27, 0x27, 0x35, 0xf0, 0x18, 0x0b, 0x57, 0xaa, + 0xd0, 0x1a, 0xd3, 0x3b, 0x5e, 0x5c, 0x62, 0x93, + 0xd7, 0x6b, 0x84, 0x3b, 0x71, 0x83, 0x77, 0x01, + 0x3e, 0x59, 0x45, 0xf4, 0x77, 0x6c, 0x6b, 0xcb, + 0x88, 0x45, 0x09, 0x1d, 0xc6, 0x45, 0x6e, 0xdc, + 0x6e, 0x51, 0xb8, 0x28 +}; +static const u8 output53[] __initconst = { + 0xc5, 0x90, 0x96, 0x78, 0x02, 0xf5, 0xc4, 0x3c, + 0xde, 0xd4, 0xd4, 0xc6, 0xa7, 0xad, 0x12, 0x47, + 0x45, 0xce, 0xcd, 0x8c, 0x35, 0xcc, 0xa6, 0x9e, + 0x5a, 0xc6, 0x60, 0xbb, 0xe3, 0xed, 0xec, 0x68, + 0x3f, 0x64, 0xf7, 0x06, 0x63, 0x9c, 0x8c, 0xc8, + 0x05, 0x3a, 0xad, 0x32, 0x79, 0x8b, 0x45, 0x96, + 0x93, 0x73, 0x4c, 0xe0 +}; +static const u8 key53[] __initconst = { + 0x42, 0x4b, 0x20, 0x81, 0x49, 0x50, 0xe9, 0xc2, + 0x43, 0x69, 0x36, 0xe7, 0x68, 0xae, 0xd5, 0x7e, + 0x42, 0x1a, 0x1b, 0xb4, 0x06, 0x4d, 0xa7, 0x17, + 0xb5, 0x31, 0xd6, 0x0c, 0xb0, 0x5c, 0x41, 0x0b +}; +enum { nonce53 = 0xf44ce1931fbda3d7ULL }; + +static const u8 input54[] __initconst = { + 0x7b, 0xf6, 0x8b, 0xae, 0xc0, 0xcb, 0x10, 0x8e, + 0xe8, 0xd8, 0x2e, 0x3b, 0x14, 0xba, 0xb4, 0xd2, + 0x58, 0x6b, 0x2c, 0xec, 0xc1, 0x81, 0x71, 0xb4, + 0xc6, 0xea, 0x08, 0xc5, 0xc9, 0x78, 0xdb, 0xa2, + 0xfa, 0x44, 0x50, 0x9b, 0xc8, 0x53, 0x8d, 0x45, + 0x42, 0xe7, 0x09, 0xc4, 0x29, 0xd8, 0x75, 0x02, + 0xbb, 0xb2, 0x78, 0xcf, 0xe7 +}; +static const u8 output54[] __initconst = { + 0xaf, 0x2c, 0x83, 0x26, 0x6e, 0x7f, 0xa6, 0xe9, + 0x03, 0x75, 0xfe, 0xfe, 0x87, 0x58, 0xcf, 0xb5, + 0xbc, 0x3c, 0x9d, 0xa1, 0x6e, 0x13, 0xf1, 0x0f, + 0x9e, 0xbc, 0xe0, 0x54, 0x24, 0x32, 0xce, 0x95, + 0xe6, 0xa5, 0x59, 0x3d, 0x24, 0x1d, 0x8f, 0xb1, + 0x74, 0x6c, 0x56, 0xe7, 0x96, 0xc1, 0x91, 0xc8, + 0x2d, 0x0e, 0xb7, 0x51, 0x10 +}; +static const u8 key54[] __initconst = { + 0x00, 0x68, 0x74, 0xdc, 0x30, 0x9e, 0xe3, 0x52, + 0xa9, 0xae, 0xb6, 0x7c, 0xa1, 0xdc, 0x12, 0x2d, + 0x98, 0x32, 0x7a, 0x77, 0xe1, 0xdd, 0xa3, 0x76, + 0x72, 0x34, 0x83, 0xd8, 0xb7, 0x69, 0xba, 0x77 +}; +enum { nonce54 = 0xbea57d79b798b63aULL }; + +static const u8 input55[] __initconst = { + 0xb5, 0xf4, 0x2f, 0xc1, 0x5e, 0x10, 0xa7, 0x4e, + 0x74, 0x3d, 0xa3, 0x96, 0xc0, 0x4d, 0x7b, 0x92, + 0x8f, 0xdb, 0x2d, 0x15, 0x52, 0x6a, 0x95, 0x5e, + 0x40, 0x81, 0x4f, 0x70, 0x73, 0xea, 0x84, 0x65, + 0x3d, 0x9a, 0x4e, 0x03, 0x95, 0xf8, 0x5d, 0x2f, + 0x07, 0x02, 0x13, 0x13, 0xdd, 0x82, 0xe6, 0x3b, + 0xe1, 0x5f, 0xb3, 0x37, 0x9b, 0x88 +}; +static const u8 output55[] __initconst = { + 0xc1, 0x88, 0xbd, 0x92, 0x77, 0xad, 0x7c, 0x5f, + 0xaf, 0xa8, 0x57, 0x0e, 0x40, 0x0a, 0xdc, 0x70, + 0xfb, 0xc6, 0x71, 0xfd, 0xc4, 0x74, 0x60, 0xcc, + 0xa0, 0x89, 0x8e, 0x99, 0xf0, 0x06, 0xa6, 0x7c, + 0x97, 0x42, 0x21, 0x81, 0x6a, 0x07, 0xe7, 0xb3, + 0xf7, 0xa5, 0x03, 0x71, 0x50, 0x05, 0x63, 0x17, + 0xa9, 0x46, 0x0b, 0xff, 0x30, 0x78 +}; +static const u8 key55[] __initconst = { + 0x19, 0x8f, 0xe7, 0xd7, 0x6b, 0x7f, 0x6f, 0x69, + 0x86, 0x91, 0x0f, 0xa7, 0x4a, 0x69, 0x8e, 0x34, + 0xf3, 0xdb, 0xde, 0xaf, 0xf2, 0x66, 0x1d, 0x64, + 0x97, 0x0c, 0xcf, 0xfa, 0x33, 0x84, 0xfd, 0x0c +}; +enum { nonce55 = 0x80aa3d3e2c51ef06ULL }; + +static const u8 input56[] __initconst = { + 0x6b, 0xe9, 0x73, 0x42, 0x27, 0x5e, 0x12, 0xcd, + 0xaa, 0x45, 0x12, 0x8b, 0xb3, 0xe6, 0x54, 0x33, + 0x31, 0x7d, 0xe2, 0x25, 0xc6, 0x86, 0x47, 0x67, + 0x86, 0x83, 0xe4, 0x46, 0xb5, 0x8f, 0x2c, 0xbb, + 0xe4, 0xb8, 0x9f, 0xa2, 0xa4, 0xe8, 0x75, 0x96, + 0x92, 0x51, 0x51, 0xac, 0x8e, 0x2e, 0x6f, 0xfc, + 0xbd, 0x0d, 0xa3, 0x9f, 0x16, 0x55, 0x3e +}; +static const u8 output56[] __initconst = { + 0x42, 0x99, 0x73, 0x6c, 0xd9, 0x4b, 0x16, 0xe5, + 0x18, 0x63, 0x1a, 0xd9, 0x0e, 0xf1, 0x15, 0x2e, + 0x0f, 0x4b, 0xe4, 0x5f, 0xa0, 0x4d, 0xde, 0x9f, + 0xa7, 0x18, 0xc1, 0x0c, 0x0b, 0xae, 0x55, 0xe4, + 0x89, 0x18, 0xa4, 0x78, 0x9d, 0x25, 0x0d, 0xd5, + 0x94, 0x0f, 0xf9, 0x78, 0xa3, 0xa6, 0xe9, 0x9e, + 0x2c, 0x73, 0xf0, 0xf7, 0x35, 0xf3, 0x2b +}; +static const u8 key56[] __initconst = { + 0x7d, 0x12, 0xad, 0x51, 0xd5, 0x6f, 0x8f, 0x96, + 0xc0, 0x5d, 0x9a, 0xd1, 0x7e, 0x20, 0x98, 0x0e, + 0x3c, 0x0a, 0x67, 0x6b, 0x1b, 0x88, 0x69, 0xd4, + 0x07, 0x8c, 0xaf, 0x0f, 0x3a, 0x28, 0xe4, 0x5d +}; +enum { nonce56 = 0x70f4c372fb8b5984ULL }; + +static const u8 input57[] __initconst = { + 0x28, 0xa3, 0x06, 0xe8, 0xe7, 0x08, 0xb9, 0xef, + 0x0d, 0x63, 0x15, 0x99, 0xb2, 0x78, 0x7e, 0xaf, + 0x30, 0x50, 0xcf, 0xea, 0xc9, 0x91, 0x41, 0x2f, + 0x3b, 0x38, 0x70, 0xc4, 0x87, 0xb0, 0x3a, 0xee, + 0x4a, 0xea, 0xe3, 0x83, 0x68, 0x8b, 0xcf, 0xda, + 0x04, 0xa5, 0xbd, 0xb2, 0xde, 0x3c, 0x55, 0x13, + 0xfe, 0x96, 0xad, 0xc1, 0x61, 0x1b, 0x98, 0xde +}; +static const u8 output57[] __initconst = { + 0xf4, 0x44, 0xe9, 0xd2, 0x6d, 0xc2, 0x5a, 0xe9, + 0xfd, 0x7e, 0x41, 0x54, 0x3f, 0xf4, 0x12, 0xd8, + 0x55, 0x0d, 0x12, 0x9b, 0xd5, 0x2e, 0x95, 0xe5, + 0x77, 0x42, 0x3f, 0x2c, 0xfb, 0x28, 0x9d, 0x72, + 0x6d, 0x89, 0x82, 0x27, 0x64, 0x6f, 0x0d, 0x57, + 0xa1, 0x25, 0xa3, 0x6b, 0x88, 0x9a, 0xac, 0x0c, + 0x76, 0x19, 0x90, 0xe2, 0x50, 0x5a, 0xf8, 0x12 +}; +static const u8 key57[] __initconst = { + 0x08, 0x26, 0xb8, 0xac, 0xf3, 0xa5, 0xc6, 0xa3, + 0x7f, 0x09, 0x87, 0xf5, 0x6c, 0x5a, 0x85, 0x6c, + 0x3d, 0xbd, 0xde, 0xd5, 0x87, 0xa3, 0x98, 0x7a, + 0xaa, 0x40, 0x3e, 0xf7, 0xff, 0x44, 0x5d, 0xee +}; +enum { nonce57 = 0xc03a6130bf06b089ULL }; + +static const u8 input58[] __initconst = { + 0x82, 0xa5, 0x38, 0x6f, 0xaa, 0xb4, 0xaf, 0xb2, + 0x42, 0x01, 0xa8, 0x39, 0x3f, 0x15, 0x51, 0xa8, + 0x11, 0x1b, 0x93, 0xca, 0x9c, 0xa0, 0x57, 0x68, + 0x8f, 0xdb, 0x68, 0x53, 0x51, 0x6d, 0x13, 0x22, + 0x12, 0x9b, 0xbd, 0x33, 0xa8, 0x52, 0x40, 0x57, + 0x80, 0x9b, 0x98, 0xef, 0x56, 0x70, 0x11, 0xfa, + 0x36, 0x69, 0x7d, 0x15, 0x48, 0xf9, 0x3b, 0xeb, + 0x42 +}; +static const u8 output58[] __initconst = { + 0xff, 0x3a, 0x74, 0xc3, 0x3e, 0x44, 0x64, 0x4d, + 0x0e, 0x5f, 0x9d, 0xa8, 0xdb, 0xbe, 0x12, 0xef, + 0xba, 0x56, 0x65, 0x50, 0x76, 0xaf, 0xa4, 0x4e, + 0x01, 0xc1, 0xd3, 0x31, 0x14, 0xe2, 0xbe, 0x7b, + 0xa5, 0x67, 0xb4, 0xe3, 0x68, 0x40, 0x9c, 0xb0, + 0xb1, 0x78, 0xef, 0x49, 0x03, 0x0f, 0x2d, 0x56, + 0xb4, 0x37, 0xdb, 0xbc, 0x2d, 0x68, 0x1c, 0x3c, + 0xf1 +}; +static const u8 key58[] __initconst = { + 0x7e, 0xf1, 0x7c, 0x20, 0x65, 0xed, 0xcd, 0xd7, + 0x57, 0xe8, 0xdb, 0x90, 0x87, 0xdb, 0x5f, 0x63, + 0x3d, 0xdd, 0xb8, 0x2b, 0x75, 0x8e, 0x04, 0xb5, + 0xf4, 0x12, 0x79, 0xa9, 0x4d, 0x42, 0x16, 0x7f +}; +enum { nonce58 = 0x92838183f80d2f7fULL }; + +static const u8 input59[] __initconst = { + 0x37, 0xf1, 0x9d, 0xdd, 0xd7, 0x08, 0x9f, 0x13, + 0xc5, 0x21, 0x82, 0x75, 0x08, 0x9e, 0x25, 0x16, + 0xb1, 0xd1, 0x71, 0x42, 0x28, 0x63, 0xac, 0x47, + 0x71, 0x54, 0xb1, 0xfc, 0x39, 0xf0, 0x61, 0x4f, + 0x7c, 0x6d, 0x4f, 0xc8, 0x33, 0xef, 0x7e, 0xc8, + 0xc0, 0x97, 0xfc, 0x1a, 0x61, 0xb4, 0x87, 0x6f, + 0xdd, 0x5a, 0x15, 0x7b, 0x1b, 0x95, 0x50, 0x94, + 0x1d, 0xba +}; +static const u8 output59[] __initconst = { + 0x73, 0x67, 0xc5, 0x07, 0xbb, 0x57, 0x79, 0xd5, + 0xc9, 0x04, 0xdd, 0x88, 0xf3, 0x86, 0xe5, 0x70, + 0x49, 0x31, 0xe0, 0xcc, 0x3b, 0x1d, 0xdf, 0xb0, + 0xaf, 0xf4, 0x2d, 0xe0, 0x06, 0x10, 0x91, 0x8d, + 0x1c, 0xcf, 0x31, 0x0b, 0xf6, 0x73, 0xda, 0x1c, + 0xf0, 0x17, 0x52, 0x9e, 0x20, 0x2e, 0x9f, 0x8c, + 0xb3, 0x59, 0xce, 0xd4, 0xd3, 0xc1, 0x81, 0xe9, + 0x11, 0x36 +}; +static const u8 key59[] __initconst = { + 0xbd, 0x07, 0xd0, 0x53, 0x2c, 0xb3, 0xcc, 0x3f, + 0xc4, 0x95, 0xfd, 0xe7, 0x81, 0xb3, 0x29, 0x99, + 0x05, 0x45, 0xd6, 0x95, 0x25, 0x0b, 0x72, 0xd3, + 0xcd, 0xbb, 0x73, 0xf8, 0xfa, 0xc0, 0x9b, 0x7a +}; +enum { nonce59 = 0x4a0db819b0d519e2ULL }; + +static const u8 input60[] __initconst = { + 0x58, 0x4e, 0xdf, 0x94, 0x3c, 0x76, 0x0a, 0x79, + 0x47, 0xf1, 0xbe, 0x88, 0xd3, 0xba, 0x94, 0xd8, + 0xe2, 0x8f, 0xe3, 0x2f, 0x2f, 0x74, 0x82, 0x55, + 0xc3, 0xda, 0xe2, 0x4e, 0x2c, 0x8c, 0x45, 0x1d, + 0x72, 0x8f, 0x54, 0x41, 0xb5, 0xb7, 0x69, 0xe4, + 0xdc, 0xd2, 0x36, 0x21, 0x5c, 0x28, 0x52, 0xf7, + 0x98, 0x8e, 0x72, 0xa7, 0x6d, 0x57, 0xed, 0xdc, + 0x3c, 0xe6, 0x6a +}; +static const u8 output60[] __initconst = { + 0xda, 0xaf, 0xb5, 0xe3, 0x30, 0x65, 0x5c, 0xb1, + 0x48, 0x08, 0x43, 0x7b, 0x9e, 0xd2, 0x6a, 0x62, + 0x56, 0x7c, 0xad, 0xd9, 0xe5, 0xf6, 0x09, 0x71, + 0xcd, 0xe6, 0x05, 0x6b, 0x3f, 0x44, 0x3a, 0x5c, + 0xf6, 0xf8, 0xd7, 0xce, 0x7d, 0xd1, 0xe0, 0x4f, + 0x88, 0x15, 0x04, 0xd8, 0x20, 0xf0, 0x3e, 0xef, + 0xae, 0xa6, 0x27, 0xa3, 0x0e, 0xfc, 0x18, 0x90, + 0x33, 0xcd, 0xd3 +}; +static const u8 key60[] __initconst = { + 0xbf, 0xfd, 0x25, 0xb5, 0xb2, 0xfc, 0x78, 0x0c, + 0x8e, 0xb9, 0x57, 0x2f, 0x26, 0x4a, 0x7e, 0x71, + 0xcc, 0xf2, 0xe0, 0xfd, 0x24, 0x11, 0x20, 0x23, + 0x57, 0x00, 0xff, 0x80, 0x11, 0x0c, 0x1e, 0xff +}; +enum { nonce60 = 0xf18df56fdb7954adULL }; + +static const u8 input61[] __initconst = { + 0xb0, 0xf3, 0x06, 0xbc, 0x22, 0xae, 0x49, 0x40, + 0xae, 0xff, 0x1b, 0x31, 0xa7, 0x98, 0xab, 0x1d, + 0xe7, 0x40, 0x23, 0x18, 0x4f, 0xab, 0x8e, 0x93, + 0x82, 0xf4, 0x56, 0x61, 0xfd, 0x2b, 0xcf, 0xa7, + 0xc4, 0xb4, 0x0a, 0xf4, 0xcb, 0xc7, 0x8c, 0x40, + 0x57, 0xac, 0x0b, 0x3e, 0x2a, 0x0a, 0x67, 0x83, + 0x50, 0xbf, 0xec, 0xb0, 0xc7, 0xf1, 0x32, 0x26, + 0x98, 0x80, 0x33, 0xb4 +}; +static const u8 output61[] __initconst = { + 0x9d, 0x23, 0x0e, 0xff, 0xcc, 0x7c, 0xd5, 0xcf, + 0x1a, 0xb8, 0x59, 0x1e, 0x92, 0xfd, 0x7f, 0xca, + 0xca, 0x3c, 0x18, 0x81, 0xde, 0xfa, 0x59, 0xc8, + 0x6f, 0x9c, 0x24, 0x3f, 0x3a, 0xe6, 0x0b, 0xb4, + 0x34, 0x48, 0x69, 0xfc, 0xb6, 0xea, 0xb2, 0xde, + 0x9f, 0xfd, 0x92, 0x36, 0x18, 0x98, 0x99, 0xaa, + 0x65, 0xe2, 0xea, 0xf4, 0xb1, 0x47, 0x8e, 0xb0, + 0xe7, 0xd4, 0x7a, 0x2c +}; +static const u8 key61[] __initconst = { + 0xd7, 0xfd, 0x9b, 0xbd, 0x8f, 0x65, 0x0d, 0x00, + 0xca, 0xa1, 0x6c, 0x85, 0x85, 0xa4, 0x6d, 0xf1, + 0xb1, 0x68, 0x0c, 0x8b, 0x5d, 0x37, 0x72, 0xd0, + 0xd8, 0xd2, 0x25, 0xab, 0x9f, 0x7b, 0x7d, 0x95 +}; +enum { nonce61 = 0xd82caf72a9c4864fULL }; + +static const u8 input62[] __initconst = { + 0x10, 0x77, 0xf3, 0x2f, 0xc2, 0x50, 0xd6, 0x0c, + 0xba, 0xa8, 0x8d, 0xce, 0x0d, 0x58, 0x9e, 0x87, + 0xb1, 0x59, 0x66, 0x0a, 0x4a, 0xb3, 0xd8, 0xca, + 0x0a, 0x6b, 0xf8, 0xc6, 0x2b, 0x3f, 0x8e, 0x09, + 0xe0, 0x0a, 0x15, 0x85, 0xfe, 0xaa, 0xc6, 0xbd, + 0x30, 0xef, 0xe4, 0x10, 0x78, 0x03, 0xc1, 0xc7, + 0x8a, 0xd9, 0xde, 0x0b, 0x51, 0x07, 0xc4, 0x7b, + 0xe2, 0x2e, 0x36, 0x3a, 0xc2 +}; +static const u8 output62[] __initconst = { + 0xa0, 0x0c, 0xfc, 0xc1, 0xf6, 0xaf, 0xc2, 0xb8, + 0x5c, 0xef, 0x6e, 0xf3, 0xce, 0x15, 0x48, 0x05, + 0xb5, 0x78, 0x49, 0x51, 0x1f, 0x9d, 0xf4, 0xbf, + 0x2f, 0x53, 0xa2, 0xd1, 0x15, 0x20, 0x82, 0x6b, + 0xd2, 0x22, 0x6c, 0x4e, 0x14, 0x87, 0xe3, 0xd7, + 0x49, 0x45, 0x84, 0xdb, 0x5f, 0x68, 0x60, 0xc4, + 0xb3, 0xe6, 0x3f, 0xd1, 0xfc, 0xa5, 0x73, 0xf3, + 0xfc, 0xbb, 0xbe, 0xc8, 0x9d +}; +static const u8 key62[] __initconst = { + 0x6e, 0xc9, 0xaf, 0xce, 0x35, 0xb9, 0x86, 0xd1, + 0xce, 0x5f, 0xd9, 0xbb, 0xd5, 0x1f, 0x7c, 0xcd, + 0xfe, 0x19, 0xaa, 0x3d, 0xea, 0x64, 0xc1, 0x28, + 0x40, 0xba, 0xa1, 0x28, 0xcd, 0x40, 0xb6, 0xf2 +}; +enum { nonce62 = 0xa1c0c265f900cde8ULL }; + +static const u8 input63[] __initconst = { + 0x7a, 0x70, 0x21, 0x2c, 0xef, 0xa6, 0x36, 0xd4, + 0xe0, 0xab, 0x8c, 0x25, 0x73, 0x34, 0xc8, 0x94, + 0x6c, 0x81, 0xcb, 0x19, 0x8d, 0x5a, 0x49, 0xaa, + 0x6f, 0xba, 0x83, 0x72, 0x02, 0x5e, 0xf5, 0x89, + 0xce, 0x79, 0x7e, 0x13, 0x3d, 0x5b, 0x98, 0x60, + 0x5d, 0xd9, 0xfb, 0x15, 0x93, 0x4c, 0xf3, 0x51, + 0x49, 0x55, 0xd1, 0x58, 0xdd, 0x7e, 0x6d, 0xfe, + 0xdd, 0x84, 0x23, 0x05, 0xba, 0xe9 +}; +static const u8 output63[] __initconst = { + 0x20, 0xb3, 0x5c, 0x03, 0x03, 0x78, 0x17, 0xfc, + 0x3b, 0x35, 0x30, 0x9a, 0x00, 0x18, 0xf5, 0xc5, + 0x06, 0x53, 0xf5, 0x04, 0x24, 0x9d, 0xd1, 0xb2, + 0xac, 0x5a, 0xb6, 0x2a, 0xa5, 0xda, 0x50, 0x00, + 0xec, 0xff, 0xa0, 0x7a, 0x14, 0x7b, 0xe4, 0x6b, + 0x63, 0xe8, 0x66, 0x86, 0x34, 0xfd, 0x74, 0x44, + 0xa2, 0x50, 0x97, 0x0d, 0xdc, 0xc3, 0x84, 0xf8, + 0x71, 0x02, 0x31, 0x95, 0xed, 0x54 +}; +static const u8 key63[] __initconst = { + 0x7d, 0x64, 0xb4, 0x12, 0x81, 0xe4, 0xe6, 0x8f, + 0xcc, 0xe7, 0xd1, 0x1f, 0x70, 0x20, 0xfd, 0xb8, + 0x3a, 0x7d, 0xa6, 0x53, 0x65, 0x30, 0x5d, 0xe3, + 0x1a, 0x44, 0xbe, 0x62, 0xed, 0x90, 0xc4, 0xd1 +}; +enum { nonce63 = 0xe8e849596c942276ULL }; + +static const u8 input64[] __initconst = { + 0x84, 0xf8, 0xda, 0x87, 0x23, 0x39, 0x60, 0xcf, + 0xc5, 0x50, 0x7e, 0xc5, 0x47, 0x29, 0x7c, 0x05, + 0xc2, 0xb4, 0xf4, 0xb2, 0xec, 0x5d, 0x48, 0x36, + 0xbf, 0xfc, 0x06, 0x8c, 0xf2, 0x0e, 0x88, 0xe7, + 0xc9, 0xc5, 0xa4, 0xa2, 0x83, 0x20, 0xa1, 0x6f, + 0x37, 0xe5, 0x2d, 0xa1, 0x72, 0xa1, 0x19, 0xef, + 0x05, 0x42, 0x08, 0xf2, 0x57, 0x47, 0x31, 0x1e, + 0x17, 0x76, 0x13, 0xd3, 0xcc, 0x75, 0x2c +}; +static const u8 output64[] __initconst = { + 0xcb, 0xec, 0x90, 0x88, 0xeb, 0x31, 0x69, 0x20, + 0xa6, 0xdc, 0xff, 0x76, 0x98, 0xb0, 0x24, 0x49, + 0x7b, 0x20, 0xd9, 0xd1, 0x1b, 0xe3, 0x61, 0xdc, + 0xcf, 0x51, 0xf6, 0x70, 0x72, 0x33, 0x28, 0x94, + 0xac, 0x73, 0x18, 0xcf, 0x93, 0xfd, 0xca, 0x08, + 0x0d, 0xa2, 0xb9, 0x57, 0x1e, 0x51, 0xb6, 0x07, + 0x5c, 0xc1, 0x13, 0x64, 0x1d, 0x18, 0x6f, 0xe6, + 0x0b, 0xb7, 0x14, 0x03, 0x43, 0xb6, 0xaf +}; +static const u8 key64[] __initconst = { + 0xbf, 0x82, 0x65, 0xe4, 0x50, 0xf9, 0x5e, 0xea, + 0x28, 0x91, 0xd1, 0xd2, 0x17, 0x7c, 0x13, 0x7e, + 0xf5, 0xd5, 0x6b, 0x06, 0x1c, 0x20, 0xc2, 0x82, + 0xa1, 0x7a, 0xa2, 0x14, 0xa1, 0xb0, 0x54, 0x58 +}; +enum { nonce64 = 0xe57c5095aa5723c9ULL }; + +static const u8 input65[] __initconst = { + 0x1c, 0xfb, 0xd3, 0x3f, 0x85, 0xd7, 0xba, 0x7b, + 0xae, 0xb1, 0xa5, 0xd2, 0xe5, 0x40, 0xce, 0x4d, + 0x3e, 0xab, 0x17, 0x9d, 0x7d, 0x9f, 0x03, 0x98, + 0x3f, 0x9f, 0xc8, 0xdd, 0x36, 0x17, 0x43, 0x5c, + 0x34, 0xd1, 0x23, 0xe0, 0x77, 0xbf, 0x35, 0x5d, + 0x8f, 0xb1, 0xcb, 0x82, 0xbb, 0x39, 0x69, 0xd8, + 0x90, 0x45, 0x37, 0xfd, 0x98, 0x25, 0xf7, 0x5b, + 0xce, 0x06, 0x43, 0xba, 0x61, 0xa8, 0x47, 0xb9 +}; +static const u8 output65[] __initconst = { + 0x73, 0xa5, 0x68, 0xab, 0x8b, 0xa5, 0xc3, 0x7e, + 0x74, 0xf8, 0x9d, 0xf5, 0x93, 0x6e, 0xf2, 0x71, + 0x6d, 0xde, 0x82, 0xc5, 0x40, 0xa0, 0x46, 0xb3, + 0x9a, 0x78, 0xa8, 0xf7, 0xdf, 0xb1, 0xc3, 0xdd, + 0x8d, 0x90, 0x00, 0x68, 0x21, 0x48, 0xe8, 0xba, + 0x56, 0x9f, 0x8f, 0xe7, 0xa4, 0x4d, 0x36, 0x55, + 0xd0, 0x34, 0x99, 0xa6, 0x1c, 0x4c, 0xc1, 0xe2, + 0x65, 0x98, 0x14, 0x8e, 0x6a, 0x05, 0xb1, 0x2b +}; +static const u8 key65[] __initconst = { + 0xbd, 0x5c, 0x8a, 0xb0, 0x11, 0x29, 0xf3, 0x00, + 0x7a, 0x78, 0x32, 0x63, 0x34, 0x00, 0xe6, 0x7d, + 0x30, 0x54, 0xde, 0x37, 0xda, 0xc2, 0xc4, 0x3d, + 0x92, 0x6b, 0x4c, 0xc2, 0x92, 0xe9, 0x9e, 0x2a +}; +enum { nonce65 = 0xf654a3031de746f2ULL }; + +static const u8 input66[] __initconst = { + 0x4b, 0x27, 0x30, 0x8f, 0x28, 0xd8, 0x60, 0x46, + 0x39, 0x06, 0x49, 0xea, 0x1b, 0x71, 0x26, 0xe0, + 0x99, 0x2b, 0xd4, 0x8f, 0x64, 0x64, 0xcd, 0xac, + 0x1d, 0x78, 0x88, 0x90, 0xe1, 0x5c, 0x24, 0x4b, + 0xdc, 0x2d, 0xb7, 0xee, 0x3a, 0xe6, 0x86, 0x2c, + 0x21, 0xe4, 0x2b, 0xfc, 0xe8, 0x19, 0xca, 0x65, + 0xe7, 0xdd, 0x6f, 0x52, 0xb3, 0x11, 0xe1, 0xe2, + 0xbf, 0xe8, 0x70, 0xe3, 0x0d, 0x45, 0xb8, 0xa5, + 0x20, 0xb7, 0xb5, 0xaf, 0xff, 0x08, 0xcf, 0x23, + 0x65, 0xdf, 0x8d, 0xc3, 0x31, 0xf3, 0x1e, 0x6a, + 0x58, 0x8d, 0xcc, 0x45, 0x16, 0x86, 0x1f, 0x31, + 0x5c, 0x27, 0xcd, 0xc8, 0x6b, 0x19, 0x1e, 0xec, + 0x44, 0x75, 0x63, 0x97, 0xfd, 0x79, 0xf6, 0x62, + 0xc5, 0xba, 0x17, 0xc7, 0xab, 0x8f, 0xbb, 0xed, + 0x85, 0x2a, 0x98, 0x79, 0x21, 0xec, 0x6e, 0x4d, + 0xdc, 0xfa, 0x72, 0x52, 0xba, 0xc8, 0x4c +}; +static const u8 output66[] __initconst = { + 0x76, 0x5b, 0x2c, 0xa7, 0x62, 0xb9, 0x08, 0x4a, + 0xc6, 0x4a, 0x92, 0xc3, 0xbb, 0x10, 0xb3, 0xee, + 0xff, 0xb9, 0x07, 0xc7, 0x27, 0xcb, 0x1e, 0xcf, + 0x58, 0x6f, 0xa1, 0x64, 0xe8, 0xf1, 0x4e, 0xe1, + 0xef, 0x18, 0x96, 0xab, 0x97, 0x28, 0xd1, 0x7c, + 0x71, 0x6c, 0xd1, 0xe2, 0xfa, 0xd9, 0x75, 0xcb, + 0xeb, 0xea, 0x0c, 0x86, 0x82, 0xd8, 0xf4, 0xcc, + 0xea, 0xa3, 0x00, 0xfa, 0x82, 0xd2, 0xcd, 0xcb, + 0xdb, 0x63, 0x28, 0xe2, 0x82, 0xe9, 0x01, 0xed, + 0x31, 0xe6, 0x71, 0x45, 0x08, 0x89, 0x8a, 0x23, + 0xa8, 0xb5, 0xc2, 0xe2, 0x9f, 0xe9, 0xb8, 0x9a, + 0xc4, 0x79, 0x6d, 0x71, 0x52, 0x61, 0x74, 0x6c, + 0x1b, 0xd7, 0x65, 0x6d, 0x03, 0xc4, 0x1a, 0xc0, + 0x50, 0xba, 0xd6, 0xc9, 0x43, 0x50, 0xbe, 0x09, + 0x09, 0x8a, 0xdb, 0xaa, 0x76, 0x4e, 0x3b, 0x61, + 0x3c, 0x7c, 0x44, 0xe7, 0xdb, 0x10, 0xa7 +}; +static const u8 key66[] __initconst = { + 0x88, 0xdf, 0xca, 0x68, 0xaf, 0x4f, 0xb3, 0xfd, + 0x6e, 0xa7, 0x95, 0x35, 0x8a, 0xe8, 0x37, 0xe8, + 0xc8, 0x55, 0xa2, 0x2a, 0x6d, 0x77, 0xf8, 0x93, + 0x7a, 0x41, 0xf3, 0x7b, 0x95, 0xdf, 0x89, 0xf5 +}; +enum { nonce66 = 0x1024b4fdd415cf82ULL }; + +static const u8 input67[] __initconst = { + 0xd4, 0x2e, 0xfa, 0x92, 0xe9, 0x29, 0x68, 0xb7, + 0x54, 0x2c, 0xf7, 0xa4, 0x2d, 0xb7, 0x50, 0xb5, + 0xc5, 0xb2, 0x9d, 0x17, 0x5e, 0x0a, 0xca, 0x37, + 0xbf, 0x60, 0xae, 0xd2, 0x98, 0xe9, 0xfa, 0x59, + 0x67, 0x62, 0xe6, 0x43, 0x0c, 0x77, 0x80, 0x82, + 0x33, 0x61, 0xa3, 0xff, 0xc1, 0xa0, 0x8f, 0x56, + 0xbc, 0xec, 0x65, 0x43, 0x88, 0xa5, 0xff, 0x51, + 0x64, 0x30, 0xee, 0x34, 0xb7, 0x5c, 0x28, 0x68, + 0xc3, 0x52, 0xd2, 0xac, 0x78, 0x2a, 0xa6, 0x10, + 0xb8, 0xb2, 0x4c, 0x80, 0x4f, 0x99, 0xb2, 0x36, + 0x94, 0x8f, 0x66, 0xcb, 0xa1, 0x91, 0xed, 0x06, + 0x42, 0x6d, 0xc1, 0xae, 0x55, 0x93, 0xdd, 0x93, + 0x9e, 0x88, 0x34, 0x7f, 0x98, 0xeb, 0xbe, 0x61, + 0xf9, 0xa9, 0x0f, 0xd9, 0xc4, 0x87, 0xd5, 0xef, + 0xcc, 0x71, 0x8c, 0x0e, 0xce, 0xad, 0x02, 0xcf, + 0xa2, 0x61, 0xdf, 0xb1, 0xfe, 0x3b, 0xdc, 0xc0, + 0x58, 0xb5, 0x71, 0xa1, 0x83, 0xc9, 0xb4, 0xaf, + 0x9d, 0x54, 0x12, 0xcd, 0xea, 0x06, 0xd6, 0x4e, + 0xe5, 0x27, 0x0c, 0xc3, 0xbb, 0xa8, 0x0a, 0x81, + 0x75, 0xc3, 0xc9, 0xd4, 0x35, 0x3e, 0x53, 0x9f, + 0xaa, 0x20, 0xc0, 0x68, 0x39, 0x2c, 0x96, 0x39, + 0x53, 0x81, 0xda, 0x07, 0x0f, 0x44, 0xa5, 0x47, + 0x0e, 0xb3, 0x87, 0x0d, 0x1b, 0xc1, 0xe5, 0x41, + 0x35, 0x12, 0x58, 0x96, 0x69, 0x8a, 0x1a, 0xa3, + 0x9d, 0x3d, 0xd4, 0xb1, 0x8e, 0x1f, 0x96, 0x87, + 0xda, 0xd3, 0x19, 0xe2, 0xb1, 0x3a, 0x19, 0x74, + 0xa0, 0x00, 0x9f, 0x4d, 0xbc, 0xcb, 0x0c, 0xe9, + 0xec, 0x10, 0xdf, 0x2a, 0x88, 0xdc, 0x30, 0x51, + 0x46, 0x56, 0x53, 0x98, 0x6a, 0x26, 0x14, 0x05, + 0x54, 0x81, 0x55, 0x0b, 0x3c, 0x85, 0xdd, 0x33, + 0x81, 0x11, 0x29, 0x82, 0x46, 0x35, 0xe1, 0xdb, + 0x59, 0x7b +}; +static const u8 output67[] __initconst = { + 0x64, 0x6c, 0xda, 0x7f, 0xd4, 0xa9, 0x2a, 0x5e, + 0x22, 0xae, 0x8d, 0x67, 0xdb, 0xee, 0xfd, 0xd0, + 0x44, 0x80, 0x17, 0xb2, 0xe3, 0x87, 0xad, 0x57, + 0x15, 0xcb, 0x88, 0x64, 0xc0, 0xf1, 0x49, 0x3d, + 0xfa, 0xbe, 0xa8, 0x9f, 0x12, 0xc3, 0x57, 0x56, + 0x70, 0xa5, 0xc5, 0x6b, 0xf1, 0xab, 0xd5, 0xde, + 0x77, 0x92, 0x6a, 0x56, 0x03, 0xf5, 0x21, 0x0d, + 0xb6, 0xc4, 0xcc, 0x62, 0x44, 0x3f, 0xb1, 0xc1, + 0x61, 0x41, 0x90, 0xb2, 0xd5, 0xb8, 0xf3, 0x57, + 0xfb, 0xc2, 0x6b, 0x25, 0x58, 0xc8, 0x45, 0x20, + 0x72, 0x29, 0x6f, 0x9d, 0xb5, 0x81, 0x4d, 0x2b, + 0xb2, 0x89, 0x9e, 0x91, 0x53, 0x97, 0x1c, 0xd9, + 0x3d, 0x79, 0xdc, 0x14, 0xae, 0x01, 0x73, 0x75, + 0xf0, 0xca, 0xd5, 0xab, 0x62, 0x5c, 0x7a, 0x7d, + 0x3f, 0xfe, 0x22, 0x7d, 0xee, 0xe2, 0xcb, 0x76, + 0x55, 0xec, 0x06, 0xdd, 0x41, 0x47, 0x18, 0x62, + 0x1d, 0x57, 0xd0, 0xd6, 0xb6, 0x0f, 0x4b, 0xfc, + 0x79, 0x19, 0xf4, 0xd6, 0x37, 0x86, 0x18, 0x1f, + 0x98, 0x0d, 0x9e, 0x15, 0x2d, 0xb6, 0x9a, 0x8a, + 0x8c, 0x80, 0x22, 0x2f, 0x82, 0xc4, 0xc7, 0x36, + 0xfa, 0xfa, 0x07, 0xbd, 0xc2, 0x2a, 0xe2, 0xea, + 0x93, 0xc8, 0xb2, 0x90, 0x33, 0xf2, 0xee, 0x4b, + 0x1b, 0xf4, 0x37, 0x92, 0x13, 0xbb, 0xe2, 0xce, + 0xe3, 0x03, 0xcf, 0x07, 0x94, 0xab, 0x9a, 0xc9, + 0xff, 0x83, 0x69, 0x3a, 0xda, 0x2c, 0xd0, 0x47, + 0x3d, 0x6c, 0x1a, 0x60, 0x68, 0x47, 0xb9, 0x36, + 0x52, 0xdd, 0x16, 0xef, 0x6c, 0xbf, 0x54, 0x11, + 0x72, 0x62, 0xce, 0x8c, 0x9d, 0x90, 0xa0, 0x25, + 0x06, 0x92, 0x3e, 0x12, 0x7e, 0x1a, 0x1d, 0xe5, + 0xa2, 0x71, 0xce, 0x1c, 0x4c, 0x6a, 0x7c, 0xdc, + 0x3d, 0xe3, 0x6e, 0x48, 0x9d, 0xb3, 0x64, 0x7d, + 0x78, 0x40 +}; +static const u8 key67[] __initconst = { + 0xa9, 0x20, 0x75, 0x89, 0x7e, 0x37, 0x85, 0x48, + 0xa3, 0xfb, 0x7b, 0xe8, 0x30, 0xa7, 0xe3, 0x6e, + 0xa6, 0xc1, 0x71, 0x17, 0xc1, 0x6c, 0x9b, 0xc2, + 0xde, 0xf0, 0xa7, 0x19, 0xec, 0xce, 0xc6, 0x53 +}; +enum { nonce67 = 0x4adc4d1f968c8a10ULL }; + +static const u8 input68[] __initconst = { + 0x99, 0xae, 0x72, 0xfb, 0x16, 0xe1, 0xf1, 0x59, + 0x43, 0x15, 0x4e, 0x33, 0xa0, 0x95, 0xe7, 0x6c, + 0x74, 0x24, 0x31, 0xca, 0x3b, 0x2e, 0xeb, 0xd7, + 0x11, 0xd8, 0xe0, 0x56, 0x92, 0x91, 0x61, 0x57, + 0xe2, 0x82, 0x9f, 0x8f, 0x37, 0xf5, 0x3d, 0x24, + 0x92, 0x9d, 0x87, 0x00, 0x8d, 0x89, 0xe0, 0x25, + 0x8b, 0xe4, 0x20, 0x5b, 0x8a, 0x26, 0x2c, 0x61, + 0x78, 0xb0, 0xa6, 0x3e, 0x82, 0x18, 0xcf, 0xdc, + 0x2d, 0x24, 0xdd, 0x81, 0x42, 0xc4, 0x95, 0xf0, + 0x48, 0x60, 0x71, 0xe3, 0xe3, 0xac, 0xec, 0xbe, + 0x98, 0x6b, 0x0c, 0xb5, 0x6a, 0xa9, 0xc8, 0x79, + 0x23, 0x2e, 0x38, 0x0b, 0x72, 0x88, 0x8c, 0xe7, + 0x71, 0x8b, 0x36, 0xe3, 0x58, 0x3d, 0x9c, 0xa0, + 0xa2, 0xea, 0xcf, 0x0c, 0x6a, 0x6c, 0x64, 0xdf, + 0x97, 0x21, 0x8f, 0x93, 0xfb, 0xba, 0xf3, 0x5a, + 0xd7, 0x8f, 0xa6, 0x37, 0x15, 0x50, 0x43, 0x02, + 0x46, 0x7f, 0x93, 0x46, 0x86, 0x31, 0xe2, 0xaa, + 0x24, 0xa8, 0x26, 0xae, 0xe6, 0xc0, 0x05, 0x73, + 0x0b, 0x4f, 0x7e, 0xed, 0x65, 0xeb, 0x56, 0x1e, + 0xb6, 0xb3, 0x0b, 0xc3, 0x0e, 0x31, 0x95, 0xa9, + 0x18, 0x4d, 0xaf, 0x38, 0xd7, 0xec, 0xc6, 0x44, + 0x72, 0x77, 0x4e, 0x25, 0x4b, 0x25, 0xdd, 0x1e, + 0x8c, 0xa2, 0xdf, 0xf6, 0x2a, 0x97, 0x1a, 0x88, + 0x2c, 0x8a, 0x5d, 0xfe, 0xe8, 0xfb, 0x35, 0xe8, + 0x0f, 0x2b, 0x7a, 0x18, 0x69, 0x43, 0x31, 0x1d, + 0x38, 0x6a, 0x62, 0x95, 0x0f, 0x20, 0x4b, 0xbb, + 0x97, 0x3c, 0xe0, 0x64, 0x2f, 0x52, 0xc9, 0x2d, + 0x4d, 0x9d, 0x54, 0x04, 0x3d, 0xc9, 0xea, 0xeb, + 0xd0, 0x86, 0x52, 0xff, 0x42, 0xe1, 0x0d, 0x7a, + 0xad, 0x88, 0xf9, 0x9b, 0x1e, 0x5e, 0x12, 0x27, + 0x95, 0x3e, 0x0c, 0x2c, 0x13, 0x00, 0x6f, 0x8e, + 0x93, 0x69, 0x0e, 0x01, 0x8c, 0xc1, 0xfd, 0xb3 +}; +static const u8 output68[] __initconst = { + 0x26, 0x3e, 0xf2, 0xb1, 0xf5, 0xef, 0x81, 0xa4, + 0xb7, 0x42, 0xd4, 0x26, 0x18, 0x4b, 0xdd, 0x6a, + 0x47, 0x15, 0xcb, 0x0e, 0x57, 0xdb, 0xa7, 0x29, + 0x7e, 0x7b, 0x3f, 0x47, 0x89, 0x57, 0xab, 0xea, + 0x14, 0x7b, 0xcf, 0x37, 0xdb, 0x1c, 0xe1, 0x11, + 0x77, 0xae, 0x2e, 0x4c, 0xd2, 0x08, 0x3f, 0xa6, + 0x62, 0x86, 0xa6, 0xb2, 0x07, 0xd5, 0x3f, 0x9b, + 0xdc, 0xc8, 0x50, 0x4b, 0x7b, 0xb9, 0x06, 0xe6, + 0xeb, 0xac, 0x98, 0x8c, 0x36, 0x0c, 0x1e, 0xb2, + 0xc8, 0xfb, 0x24, 0x60, 0x2c, 0x08, 0x17, 0x26, + 0x5b, 0xc8, 0xc2, 0xdf, 0x9c, 0x73, 0x67, 0x4a, + 0xdb, 0xcf, 0xd5, 0x2c, 0x2b, 0xca, 0x24, 0xcc, + 0xdb, 0xc9, 0xa8, 0xf2, 0x5d, 0x67, 0xdf, 0x5c, + 0x62, 0x0b, 0x58, 0xc0, 0x83, 0xde, 0x8b, 0xf6, + 0x15, 0x0a, 0xd6, 0x32, 0xd8, 0xf5, 0xf2, 0x5f, + 0x33, 0xce, 0x7e, 0xab, 0x76, 0xcd, 0x14, 0x91, + 0xd8, 0x41, 0x90, 0x93, 0xa1, 0xaf, 0xf3, 0x45, + 0x6c, 0x1b, 0x25, 0xbd, 0x48, 0x51, 0x6d, 0x15, + 0x47, 0xe6, 0x23, 0x50, 0x32, 0x69, 0x1e, 0xb5, + 0x94, 0xd3, 0x97, 0xba, 0xd7, 0x37, 0x4a, 0xba, + 0xb9, 0xcd, 0xfb, 0x96, 0x9a, 0x90, 0xe0, 0x37, + 0xf8, 0xdf, 0x91, 0x6c, 0x62, 0x13, 0x19, 0x21, + 0x4b, 0xa9, 0xf1, 0x12, 0x66, 0xe2, 0x74, 0xd7, + 0x81, 0xa0, 0x74, 0x8d, 0x7e, 0x7e, 0xc9, 0xb1, + 0x69, 0x8f, 0xed, 0xb3, 0xf6, 0x97, 0xcd, 0x72, + 0x78, 0x93, 0xd3, 0x54, 0x6b, 0x43, 0xac, 0x29, + 0xb4, 0xbc, 0x7d, 0xa4, 0x26, 0x4b, 0x7b, 0xab, + 0xd6, 0x67, 0x22, 0xff, 0x03, 0x92, 0xb6, 0xd4, + 0x96, 0x94, 0x5a, 0xe5, 0x02, 0x35, 0x77, 0xfa, + 0x3f, 0x54, 0x1d, 0xdd, 0x35, 0x39, 0xfe, 0x03, + 0xdd, 0x8e, 0x3c, 0x8c, 0xc2, 0x69, 0x2a, 0xb1, + 0xb7, 0xb3, 0xa1, 0x89, 0x84, 0xea, 0x16, 0xe2 +}; +static const u8 key68[] __initconst = { + 0xd2, 0x49, 0x7f, 0xd7, 0x49, 0x66, 0x0d, 0xb3, + 0x5a, 0x7e, 0x3c, 0xfc, 0x37, 0x83, 0x0e, 0xf7, + 0x96, 0xd8, 0xd6, 0x33, 0x79, 0x2b, 0x84, 0x53, + 0x06, 0xbc, 0x6c, 0x0a, 0x55, 0x84, 0xfe, 0xab +}; +enum { nonce68 = 0x6a6df7ff0a20de06ULL }; + +static const u8 input69[] __initconst = { + 0xf9, 0x18, 0x4c, 0xd2, 0x3f, 0xf7, 0x22, 0xd9, + 0x58, 0xb6, 0x3b, 0x38, 0x69, 0x79, 0xf4, 0x71, + 0x5f, 0x38, 0x52, 0x1f, 0x17, 0x6f, 0x6f, 0xd9, + 0x09, 0x2b, 0xfb, 0x67, 0xdc, 0xc9, 0xe8, 0x4a, + 0x70, 0x9f, 0x2e, 0x3c, 0x06, 0xe5, 0x12, 0x20, + 0x25, 0x29, 0xd0, 0xdc, 0x81, 0xc5, 0xc6, 0x0f, + 0xd2, 0xa8, 0x81, 0x15, 0x98, 0xb2, 0x71, 0x5a, + 0x9a, 0xe9, 0xfb, 0xaf, 0x0e, 0x5f, 0x8a, 0xf3, + 0x16, 0x4a, 0x47, 0xf2, 0x5c, 0xbf, 0xda, 0x52, + 0x9a, 0xa6, 0x36, 0xfd, 0xc6, 0xf7, 0x66, 0x00, + 0xcc, 0x6c, 0xd4, 0xb3, 0x07, 0x6d, 0xeb, 0xfe, + 0x92, 0x71, 0x25, 0xd0, 0xcf, 0x9c, 0xe8, 0x65, + 0x45, 0x10, 0xcf, 0x62, 0x74, 0x7d, 0xf2, 0x1b, + 0x57, 0xa0, 0xf1, 0x6b, 0xa4, 0xd5, 0xfa, 0x12, + 0x27, 0x5a, 0xf7, 0x99, 0xfc, 0xca, 0xf3, 0xb8, + 0x2c, 0x8b, 0xba, 0x28, 0x74, 0xde, 0x8f, 0x78, + 0xa2, 0x8c, 0xaf, 0x89, 0x4b, 0x05, 0xe2, 0xf3, + 0xf8, 0xd2, 0xef, 0xac, 0xa4, 0xc4, 0xe2, 0xe2, + 0x36, 0xbb, 0x5e, 0xae, 0xe6, 0x87, 0x3d, 0x88, + 0x9f, 0xb8, 0x11, 0xbb, 0xcf, 0x57, 0xce, 0xd0, + 0xba, 0x62, 0xf4, 0xf8, 0x9b, 0x95, 0x04, 0xc9, + 0xcf, 0x01, 0xe9, 0xf1, 0xc8, 0xc6, 0x22, 0xa4, + 0xf2, 0x8b, 0x2f, 0x24, 0x0a, 0xf5, 0x6e, 0xb7, + 0xd4, 0x2c, 0xb6, 0xf7, 0x5c, 0x97, 0x61, 0x0b, + 0xd9, 0xb5, 0x06, 0xcd, 0xed, 0x3e, 0x1f, 0xc5, + 0xb2, 0x6c, 0xa3, 0xea, 0xb8, 0xad, 0xa6, 0x42, + 0x88, 0x7a, 0x52, 0xd5, 0x64, 0xba, 0xb5, 0x20, + 0x10, 0xa0, 0x0f, 0x0d, 0xea, 0xef, 0x5a, 0x9b, + 0x27, 0xb8, 0xca, 0x20, 0x19, 0x6d, 0xa8, 0xc4, + 0x46, 0x04, 0xb3, 0xe8, 0xf8, 0x66, 0x1b, 0x0a, + 0xce, 0x76, 0x5d, 0x59, 0x58, 0x05, 0xee, 0x3e, + 0x3c, 0x86, 0x5b, 0x49, 0x1c, 0x72, 0x18, 0x01, + 0x62, 0x92, 0x0f, 0x3e, 0xd1, 0x57, 0x5e, 0x20, + 0x7b, 0xfb, 0x4d, 0x3c, 0xc5, 0x35, 0x43, 0x2f, + 0xb0, 0xc5, 0x7c, 0xe4, 0xa2, 0x84, 0x13, 0x77 +}; +static const u8 output69[] __initconst = { + 0xbb, 0x4a, 0x7f, 0x7c, 0xd5, 0x2f, 0x89, 0x06, + 0xec, 0x20, 0xf1, 0x9a, 0x11, 0x09, 0x14, 0x2e, + 0x17, 0x50, 0xf9, 0xd5, 0xf5, 0x48, 0x7c, 0x7a, + 0x55, 0xc0, 0x57, 0x03, 0xe3, 0xc4, 0xb2, 0xb7, + 0x18, 0x47, 0x95, 0xde, 0xaf, 0x80, 0x06, 0x3c, + 0x5a, 0xf2, 0xc3, 0x53, 0xe3, 0x29, 0x92, 0xf8, + 0xff, 0x64, 0x85, 0xb9, 0xf7, 0xd3, 0x80, 0xd2, + 0x0c, 0x5d, 0x7b, 0x57, 0x0c, 0x51, 0x79, 0x86, + 0xf3, 0x20, 0xd2, 0xb8, 0x6e, 0x0c, 0x5a, 0xce, + 0xeb, 0x88, 0x02, 0x8b, 0x82, 0x1b, 0x7f, 0xf5, + 0xde, 0x7f, 0x48, 0x48, 0xdf, 0xa0, 0x55, 0xc6, + 0x0c, 0x22, 0xa1, 0x80, 0x8d, 0x3b, 0xcb, 0x40, + 0x2d, 0x3d, 0x0b, 0xf2, 0xe0, 0x22, 0x13, 0x99, + 0xe1, 0xa7, 0x27, 0x68, 0x31, 0xe1, 0x24, 0x5d, + 0xd2, 0xee, 0x16, 0xc1, 0xd7, 0xa8, 0x14, 0x19, + 0x23, 0x72, 0x67, 0x27, 0xdc, 0x5e, 0xb9, 0xc7, + 0xd8, 0xe3, 0x55, 0x50, 0x40, 0x98, 0x7b, 0xe7, + 0x34, 0x1c, 0x3b, 0x18, 0x14, 0xd8, 0x62, 0xc1, + 0x93, 0x84, 0xf3, 0x5b, 0xdd, 0x9e, 0x1f, 0x3b, + 0x0b, 0xbc, 0x4e, 0x5b, 0x79, 0xa3, 0xca, 0x74, + 0x2a, 0x98, 0xe8, 0x04, 0x39, 0xef, 0xc6, 0x76, + 0x6d, 0xee, 0x9f, 0x67, 0x5b, 0x59, 0x3a, 0xe5, + 0xf2, 0x3b, 0xca, 0x89, 0xe8, 0x9b, 0x03, 0x3d, + 0x11, 0xd2, 0x4a, 0x70, 0xaf, 0x88, 0xb0, 0x94, + 0x96, 0x26, 0xab, 0x3c, 0xc1, 0xb8, 0xe4, 0xe7, + 0x14, 0x61, 0x64, 0x3a, 0x61, 0x08, 0x0f, 0xa9, + 0xce, 0x64, 0xb2, 0x40, 0xf8, 0x20, 0x3a, 0xa9, + 0x31, 0xbd, 0x7e, 0x16, 0xca, 0xf5, 0x62, 0x0f, + 0x91, 0x9f, 0x8e, 0x1d, 0xa4, 0x77, 0xf3, 0x87, + 0x61, 0xe8, 0x14, 0xde, 0x18, 0x68, 0x4e, 0x9d, + 0x73, 0xcd, 0x8a, 0xe4, 0x80, 0x84, 0x23, 0xaa, + 0x9d, 0x64, 0x1c, 0x80, 0x41, 0xca, 0x82, 0x40, + 0x94, 0x55, 0xe3, 0x28, 0xa1, 0x97, 0x71, 0xba, + 0xf2, 0x2c, 0x39, 0x62, 0x29, 0x56, 0xd0, 0xff, + 0xb2, 0x82, 0x20, 0x59, 0x1f, 0xc3, 0x64, 0x57 +}; +static const u8 key69[] __initconst = { + 0x19, 0x09, 0xe9, 0x7c, 0xd9, 0x02, 0x4a, 0x0c, + 0x52, 0x25, 0xad, 0x5c, 0x2e, 0x8d, 0x86, 0x10, + 0x85, 0x2b, 0xba, 0xa4, 0x44, 0x5b, 0x39, 0x3e, + 0x18, 0xaa, 0xce, 0x0e, 0xe2, 0x69, 0x3c, 0xcf +}; +enum { nonce69 = 0xdb925a1948f0f060ULL }; + +static const u8 input70[] __initconst = { + 0x10, 0xe7, 0x83, 0xcf, 0x42, 0x9f, 0xf2, 0x41, + 0xc7, 0xe4, 0xdb, 0xf9, 0xa3, 0x02, 0x1d, 0x8d, + 0x50, 0x81, 0x2c, 0x6b, 0x92, 0xe0, 0x4e, 0xea, + 0x26, 0x83, 0x2a, 0xd0, 0x31, 0xf1, 0x23, 0xf3, + 0x0e, 0x88, 0x14, 0x31, 0xf9, 0x01, 0x63, 0x59, + 0x21, 0xd1, 0x8b, 0xdd, 0x06, 0xd0, 0xc6, 0xab, + 0x91, 0x71, 0x82, 0x4d, 0xd4, 0x62, 0x37, 0x17, + 0xf9, 0x50, 0xf9, 0xb5, 0x74, 0xce, 0x39, 0x80, + 0x80, 0x78, 0xf8, 0xdc, 0x1c, 0xdb, 0x7c, 0x3d, + 0xd4, 0x86, 0x31, 0x00, 0x75, 0x7b, 0xd1, 0x42, + 0x9f, 0x1b, 0x97, 0x88, 0x0e, 0x14, 0x0e, 0x1e, + 0x7d, 0x7b, 0xc4, 0xd2, 0xf3, 0xc1, 0x6d, 0x17, + 0x5d, 0xc4, 0x75, 0x54, 0x0f, 0x38, 0x65, 0x89, + 0xd8, 0x7d, 0xab, 0xc9, 0xa7, 0x0a, 0x21, 0x0b, + 0x37, 0x12, 0x05, 0x07, 0xb5, 0x68, 0x32, 0x32, + 0xb9, 0xf8, 0x97, 0x17, 0x03, 0xed, 0x51, 0x8f, + 0x3d, 0x5a, 0xd0, 0x12, 0x01, 0x6e, 0x2e, 0x91, + 0x1c, 0xbe, 0x6b, 0xa3, 0xcc, 0x75, 0x62, 0x06, + 0x8e, 0x65, 0xbb, 0xe2, 0x29, 0x71, 0x4b, 0x89, + 0x6a, 0x9d, 0x85, 0x8c, 0x8c, 0xdf, 0x94, 0x95, + 0x23, 0x66, 0xf8, 0x92, 0xee, 0x56, 0xeb, 0xb3, + 0xeb, 0xd2, 0x4a, 0x3b, 0x77, 0x8a, 0x6e, 0xf6, + 0xca, 0xd2, 0x34, 0x00, 0xde, 0xbe, 0x1d, 0x7a, + 0x73, 0xef, 0x2b, 0x80, 0x56, 0x16, 0x29, 0xbf, + 0x6e, 0x33, 0xed, 0x0d, 0xe2, 0x02, 0x60, 0x74, + 0xe9, 0x0a, 0xbc, 0xd1, 0xc5, 0xe8, 0x53, 0x02, + 0x79, 0x0f, 0x25, 0x0c, 0xef, 0xab, 0xd3, 0xbc, + 0xb7, 0xfc, 0xf3, 0xb0, 0x34, 0xd1, 0x07, 0xd2, + 0x5a, 0x31, 0x1f, 0xec, 0x1f, 0x87, 0xed, 0xdd, + 0x6a, 0xc1, 0xe8, 0xb3, 0x25, 0x4c, 0xc6, 0x9b, + 0x91, 0x73, 0xec, 0x06, 0x73, 0x9e, 0x57, 0x65, + 0x32, 0x75, 0x11, 0x74, 0x6e, 0xa4, 0x7d, 0x0d, + 0x74, 0x9f, 0x51, 0x10, 0x10, 0x47, 0xc9, 0x71, + 0x6e, 0x97, 0xae, 0x44, 0x41, 0xef, 0x98, 0x78, + 0xf4, 0xc5, 0xbd, 0x5e, 0x00, 0xe5, 0xfd, 0xe2, + 0xbe, 0x8c, 0xc2, 0xae, 0xc2, 0xee, 0x59, 0xf6, + 0xcb, 0x20, 0x54, 0x84, 0xc3, 0x31, 0x7e, 0x67, + 0x71, 0xb6, 0x76, 0xbe, 0x81, 0x8f, 0x82, 0xad, + 0x01, 0x8f, 0xc4, 0x00, 0x04, 0x3d, 0x8d, 0x34, + 0xaa, 0xea, 0xc0, 0xea, 0x91, 0x42, 0xb6, 0xb8, + 0x43, 0xf3, 0x17, 0xb2, 0x73, 0x64, 0x82, 0x97, + 0xd5, 0xc9, 0x07, 0x77, 0xb1, 0x26, 0xe2, 0x00, + 0x6a, 0xae, 0x70, 0x0b, 0xbe, 0xe6, 0xb8, 0x42, + 0x81, 0x55, 0xf7, 0xb8, 0x96, 0x41, 0x9d, 0xd4, + 0x2c, 0x27, 0x00, 0xcc, 0x91, 0x28, 0x22, 0xa4, + 0x7b, 0x42, 0x51, 0x9e, 0xd6, 0xec, 0xf3, 0x6b, + 0x00, 0xff, 0x5c, 0xa2, 0xac, 0x47, 0x33, 0x2d, + 0xf8, 0x11, 0x65, 0x5f, 0x4d, 0x79, 0x8b, 0x4f, + 0xad, 0xf0, 0x9d, 0xcd, 0xb9, 0x7b, 0x08, 0xf7, + 0x32, 0x51, 0xfa, 0x39, 0xaa, 0x78, 0x05, 0xb1, + 0xf3, 0x5d, 0xe8, 0x7c, 0x8e, 0x4f, 0xa2, 0xe0, + 0x98, 0x0c, 0xb2, 0xa7, 0xf0, 0x35, 0x8e, 0x70, + 0x7c, 0x82, 0xf3, 0x1b, 0x26, 0x28, 0x12, 0xe5, + 0x23, 0x57, 0xe4, 0xb4, 0x9b, 0x00, 0x39, 0x97, + 0xef, 0x7c, 0x46, 0x9b, 0x34, 0x6b, 0xe7, 0x0e, + 0xa3, 0x2a, 0x18, 0x11, 0x64, 0xc6, 0x7c, 0x8b, + 0x06, 0x02, 0xf5, 0x69, 0x76, 0xf9, 0xaa, 0x09, + 0x5f, 0x68, 0xf8, 0x4a, 0x79, 0x58, 0xec, 0x37, + 0xcf, 0x3a, 0xcc, 0x97, 0x70, 0x1d, 0x3e, 0x52, + 0x18, 0x0a, 0xad, 0x28, 0x5b, 0x3b, 0xe9, 0x03, + 0x84, 0xe9, 0x68, 0x50, 0xce, 0xc4, 0xbc, 0x3e, + 0x21, 0xad, 0x63, 0xfe, 0xc6, 0xfd, 0x6e, 0x69, + 0x84, 0xa9, 0x30, 0xb1, 0x7a, 0xc4, 0x31, 0x10, + 0xc1, 0x1f, 0x6e, 0xeb, 0xa5, 0xa6, 0x01 +}; +static const u8 output70[] __initconst = { + 0x0f, 0x93, 0x2a, 0x20, 0xb3, 0x87, 0x2d, 0xce, + 0xd1, 0x3b, 0x30, 0xfd, 0x06, 0x6d, 0x0a, 0xaa, + 0x3e, 0xc4, 0x29, 0x02, 0x8a, 0xde, 0xa6, 0x4b, + 0x45, 0x1b, 0x4f, 0x25, 0x59, 0xd5, 0x56, 0x6a, + 0x3b, 0x37, 0xbd, 0x3e, 0x47, 0x12, 0x2c, 0x4e, + 0x60, 0x5f, 0x05, 0x75, 0x61, 0x23, 0x05, 0x74, + 0xcb, 0xfc, 0x5a, 0xb3, 0xac, 0x5c, 0x3d, 0xab, + 0x52, 0x5f, 0x05, 0xbc, 0x57, 0xc0, 0x7e, 0xcf, + 0x34, 0x5d, 0x7f, 0x41, 0xa3, 0x17, 0x78, 0xd5, + 0x9f, 0xec, 0x0f, 0x1e, 0xf9, 0xfe, 0xa3, 0xbd, + 0x28, 0xb0, 0xba, 0x4d, 0x84, 0xdb, 0xae, 0x8f, + 0x1d, 0x98, 0xb7, 0xdc, 0xf9, 0xad, 0x55, 0x9c, + 0x89, 0xfe, 0x9b, 0x9c, 0xa9, 0x89, 0xf6, 0x97, + 0x9c, 0x3f, 0x09, 0x3e, 0xc6, 0x02, 0xc2, 0x55, + 0x58, 0x09, 0x54, 0x66, 0xe4, 0x36, 0x81, 0x35, + 0xca, 0x88, 0x17, 0x89, 0x80, 0x24, 0x2b, 0x21, + 0x89, 0xee, 0x45, 0x5a, 0xe7, 0x1f, 0xd5, 0xa5, + 0x16, 0xa4, 0xda, 0x70, 0x7e, 0xe9, 0x4f, 0x24, + 0x61, 0x97, 0xab, 0xa0, 0xe0, 0xe7, 0xb8, 0x5c, + 0x0f, 0x25, 0x17, 0x37, 0x75, 0x12, 0xb5, 0x40, + 0xde, 0x1c, 0x0d, 0x8a, 0x77, 0x62, 0x3c, 0x86, + 0xd9, 0x70, 0x2e, 0x96, 0x30, 0xd2, 0x55, 0xb3, + 0x6b, 0xc3, 0xf2, 0x9c, 0x47, 0xf3, 0x3a, 0x24, + 0x52, 0xc6, 0x38, 0xd8, 0x22, 0xb3, 0x0c, 0xfd, + 0x2f, 0xa3, 0x3c, 0xb5, 0xe8, 0x26, 0xe1, 0xa3, + 0xad, 0xb0, 0x82, 0x17, 0xc1, 0x53, 0xb8, 0x34, + 0x48, 0xee, 0x39, 0xae, 0x51, 0x43, 0xec, 0x82, + 0xce, 0x87, 0xc6, 0x76, 0xb9, 0x76, 0xd3, 0x53, + 0xfe, 0x49, 0x24, 0x7d, 0x02, 0x42, 0x2b, 0x72, + 0xfb, 0xcb, 0xd8, 0x96, 0x02, 0xc6, 0x9a, 0x20, + 0xf3, 0x5a, 0x67, 0xe8, 0x13, 0xf8, 0xb2, 0xcb, + 0xa2, 0xec, 0x18, 0x20, 0x4a, 0xb0, 0x73, 0x53, + 0x21, 0xb0, 0x77, 0x53, 0xd8, 0x76, 0xa1, 0x30, + 0x17, 0x72, 0x2e, 0x33, 0x5f, 0x33, 0x6b, 0x28, + 0xfb, 0xb0, 0xf4, 0xec, 0x8e, 0xed, 0x20, 0x7d, + 0x57, 0x8c, 0x74, 0x28, 0x64, 0x8b, 0xeb, 0x59, + 0x38, 0x3f, 0xe7, 0x83, 0x2e, 0xe5, 0x64, 0x4d, + 0x5c, 0x1f, 0xe1, 0x3b, 0xd9, 0x84, 0xdb, 0xc9, + 0xec, 0xd8, 0xc1, 0x7c, 0x1f, 0x1b, 0x68, 0x35, + 0xc6, 0x34, 0x10, 0xef, 0x19, 0xc9, 0x0a, 0xd6, + 0x43, 0x7f, 0xa6, 0xcb, 0x9d, 0xf4, 0xf0, 0x16, + 0xb1, 0xb1, 0x96, 0x64, 0xec, 0x8d, 0x22, 0x4c, + 0x4b, 0xe8, 0x1a, 0xba, 0x6f, 0xb7, 0xfc, 0xa5, + 0x69, 0x3e, 0xad, 0x78, 0x79, 0x19, 0xb5, 0x04, + 0x69, 0xe5, 0x3f, 0xff, 0x60, 0x8c, 0xda, 0x0b, + 0x7b, 0xf7, 0xe7, 0xe6, 0x29, 0x3a, 0x85, 0xba, + 0xb5, 0xb0, 0x35, 0xbd, 0x38, 0xce, 0x34, 0x5e, + 0xf2, 0xdc, 0xd1, 0x8f, 0xc3, 0x03, 0x24, 0xa2, + 0x03, 0xf7, 0x4e, 0x49, 0x5b, 0xcf, 0x6d, 0xb0, + 0xeb, 0xe3, 0x30, 0x28, 0xd5, 0x5b, 0x82, 0x5f, + 0xe4, 0x7c, 0x1e, 0xec, 0xd2, 0x39, 0xf9, 0x6f, + 0x2e, 0xb3, 0xcd, 0x01, 0xb1, 0x67, 0xaa, 0xea, + 0xaa, 0xb3, 0x63, 0xaf, 0xd9, 0xb2, 0x1f, 0xba, + 0x05, 0x20, 0xeb, 0x19, 0x32, 0xf0, 0x6c, 0x3f, + 0x40, 0xcc, 0x93, 0xb3, 0xd8, 0x25, 0xa6, 0xe4, + 0xce, 0xd7, 0x7e, 0x48, 0x99, 0x65, 0x7f, 0x86, + 0xc5, 0xd4, 0x79, 0x6b, 0xab, 0x43, 0xb8, 0x6b, + 0xf1, 0x2f, 0xea, 0x4c, 0x5e, 0xf0, 0x3b, 0xb4, + 0xb8, 0xb0, 0x94, 0x0c, 0x6b, 0xe7, 0x22, 0x93, + 0xaa, 0x01, 0xcb, 0xf1, 0x11, 0x60, 0xf6, 0x69, + 0xcf, 0x14, 0xde, 0xfb, 0x90, 0x05, 0x27, 0x0c, + 0x1a, 0x9e, 0xf0, 0xb4, 0xc6, 0xa1, 0xe8, 0xdd, + 0xd0, 0x4c, 0x25, 0x4f, 0x9c, 0xb7, 0xb1, 0xb0, + 0x21, 0xdb, 0x87, 0x09, 0x03, 0xf2, 0xb3 +}; +static const u8 key70[] __initconst = { + 0x3b, 0x5b, 0x59, 0x36, 0x44, 0xd1, 0xba, 0x71, + 0x55, 0x87, 0x4d, 0x62, 0x3d, 0xc2, 0xfc, 0xaa, + 0x3f, 0x4e, 0x1a, 0xe4, 0xca, 0x09, 0xfc, 0x6a, + 0xb2, 0xd6, 0x5d, 0x79, 0xf9, 0x1a, 0x91, 0xa7 +}; +enum { nonce70 = 0x3fd6786dd147a85ULL }; + +static const u8 input71[] __initconst = { + 0x18, 0x78, 0xd6, 0x79, 0xe4, 0x9a, 0x6c, 0x73, + 0x17, 0xd4, 0x05, 0x0f, 0x1e, 0x9f, 0xd9, 0x2b, + 0x86, 0x48, 0x7d, 0xf4, 0xd9, 0x1c, 0x76, 0xfc, + 0x8e, 0x22, 0x34, 0xe1, 0x48, 0x4a, 0x8d, 0x79, + 0xb7, 0xbb, 0x88, 0xab, 0x90, 0xde, 0xc5, 0xb4, + 0xb4, 0xe7, 0x85, 0x49, 0xda, 0x57, 0xeb, 0xc9, + 0xcd, 0x21, 0xfc, 0x45, 0x6e, 0x32, 0x67, 0xf2, + 0x4f, 0xa6, 0x54, 0xe5, 0x20, 0xed, 0xcf, 0xc6, + 0x62, 0x25, 0x8e, 0x00, 0xf8, 0x6b, 0xa2, 0x80, + 0xac, 0x88, 0xa6, 0x59, 0x27, 0x83, 0x95, 0x11, + 0x3f, 0x70, 0x5e, 0x3f, 0x11, 0xfb, 0x26, 0xbf, + 0xe1, 0x48, 0x75, 0xf9, 0x86, 0xbf, 0xa6, 0x5d, + 0x15, 0x61, 0x66, 0xbf, 0x78, 0x8f, 0x6b, 0x9b, + 0xda, 0x98, 0xb7, 0x19, 0xe2, 0xf2, 0xa3, 0x9c, + 0x7c, 0x6a, 0x9a, 0xd8, 0x3d, 0x4c, 0x2c, 0xe1, + 0x09, 0xb4, 0x28, 0x82, 0x4e, 0xab, 0x0c, 0x75, + 0x63, 0xeb, 0xbc, 0xd0, 0x71, 0xa2, 0x73, 0x85, + 0xed, 0x53, 0x7a, 0x3f, 0x68, 0x9f, 0xd0, 0xa9, + 0x00, 0x5a, 0x9e, 0x80, 0x55, 0x00, 0xe6, 0xae, + 0x0c, 0x03, 0x40, 0xed, 0xfc, 0x68, 0x4a, 0xb7, + 0x1e, 0x09, 0x65, 0x30, 0x5a, 0x3d, 0x97, 0x4d, + 0x5e, 0x51, 0x8e, 0xda, 0xc3, 0x55, 0x8c, 0xfb, + 0xcf, 0x83, 0x05, 0x35, 0x0d, 0x08, 0x1b, 0xf3, + 0x3a, 0x57, 0x96, 0xac, 0x58, 0x8b, 0xfa, 0x00, + 0x49, 0x15, 0x78, 0xd2, 0x4b, 0xed, 0xb8, 0x59, + 0x78, 0x9b, 0x7f, 0xaa, 0xfc, 0xe7, 0x46, 0xdc, + 0x7b, 0x34, 0xd0, 0x34, 0xe5, 0x10, 0xff, 0x4d, + 0x5a, 0x4d, 0x60, 0xa7, 0x16, 0x54, 0xc4, 0xfd, + 0xca, 0x5d, 0x68, 0xc7, 0x4a, 0x01, 0x8d, 0x7f, + 0x74, 0x5d, 0xff, 0xb8, 0x37, 0x15, 0x62, 0xfa, + 0x44, 0x45, 0xcf, 0x77, 0x3b, 0x1d, 0xb2, 0xd2, + 0x0d, 0x42, 0x00, 0x39, 0x68, 0x1f, 0xcc, 0x89, + 0x73, 0x5d, 0xa9, 0x2e, 0xfd, 0x58, 0x62, 0xca, + 0x35, 0x8e, 0x70, 0x70, 0xaa, 0x6e, 0x14, 0xe9, + 0xa4, 0xe2, 0x10, 0x66, 0x71, 0xdc, 0x4c, 0xfc, + 0xa9, 0xdc, 0x8f, 0x57, 0x4d, 0xc5, 0xac, 0xd7, + 0xa9, 0xf3, 0xf3, 0xa1, 0xff, 0x62, 0xa0, 0x8f, + 0xe4, 0x96, 0x3e, 0xcb, 0x9f, 0x76, 0x42, 0x39, + 0x1f, 0x24, 0xfd, 0xfd, 0x79, 0xe8, 0x27, 0xdf, + 0xa8, 0xf6, 0x33, 0x8b, 0x31, 0x59, 0x69, 0xcf, + 0x6a, 0xef, 0x89, 0x4d, 0xa7, 0xf6, 0x7e, 0x97, + 0x14, 0xbd, 0xda, 0xdd, 0xb4, 0x84, 0x04, 0x24, + 0xe0, 0x17, 0xe1, 0x0f, 0x1f, 0x8a, 0x6a, 0x71, + 0x74, 0x41, 0xdc, 0x59, 0x5c, 0x8f, 0x01, 0x25, + 0x92, 0xf0, 0x2e, 0x15, 0x62, 0x71, 0x9a, 0x9f, + 0x87, 0xdf, 0x62, 0x49, 0x7f, 0x86, 0x62, 0xfc, + 0x20, 0x84, 0xd7, 0xe3, 0x3a, 0xd9, 0x37, 0x85, + 0xb7, 0x84, 0x5a, 0xf9, 0xed, 0x21, 0x32, 0x94, + 0x3e, 0x04, 0xe7, 0x8c, 0x46, 0x76, 0x21, 0x67, + 0xf6, 0x95, 0x64, 0x92, 0xb7, 0x15, 0xf6, 0xe3, + 0x41, 0x27, 0x9d, 0xd7, 0xe3, 0x79, 0x75, 0x92, + 0xd0, 0xc1, 0xf3, 0x40, 0x92, 0x08, 0xde, 0x90, + 0x22, 0x82, 0xb2, 0x69, 0xae, 0x1a, 0x35, 0x11, + 0x89, 0xc8, 0x06, 0x82, 0x95, 0x23, 0x44, 0x08, + 0x22, 0xf2, 0x71, 0x73, 0x1b, 0x88, 0x11, 0xcf, + 0x1c, 0x7e, 0x8a, 0x2e, 0xdc, 0x79, 0x57, 0xce, + 0x1f, 0xe7, 0x6c, 0x07, 0xd8, 0x06, 0xbe, 0xec, + 0xa3, 0xcf, 0xf9, 0x68, 0xa5, 0xb8, 0xf0, 0xe3, + 0x3f, 0x01, 0x92, 0xda, 0xf1, 0xa0, 0x2d, 0x7b, + 0xab, 0x57, 0x58, 0x2a, 0xaf, 0xab, 0xbd, 0xf2, + 0xe5, 0xaf, 0x7e, 0x1f, 0x46, 0x24, 0x9e, 0x20, + 0x22, 0x0f, 0x84, 0x4c, 0xb7, 0xd8, 0x03, 0xe8, + 0x09, 0x73, 0x6c, 0xc6, 0x9b, 0x90, 0xe0, 0xdb, + 0xf2, 0x71, 0xba, 0xad, 0xb3, 0xec, 0xda, 0x7a +}; +static const u8 output71[] __initconst = { + 0x28, 0xc5, 0x9b, 0x92, 0xf9, 0x21, 0x4f, 0xbb, + 0xef, 0x3b, 0xf0, 0xf5, 0x3a, 0x6d, 0x7f, 0xd6, + 0x6a, 0x8d, 0xa1, 0x01, 0x5c, 0x62, 0x20, 0x8b, + 0x5b, 0x39, 0xd5, 0xd3, 0xc2, 0xf6, 0x9d, 0x5e, + 0xcc, 0xe1, 0xa2, 0x61, 0x16, 0xe2, 0xce, 0xe9, + 0x86, 0xd0, 0xfc, 0xce, 0x9a, 0x28, 0x27, 0xc4, + 0x0c, 0xb9, 0xaa, 0x8d, 0x48, 0xdb, 0xbf, 0x82, + 0x7d, 0xd0, 0x35, 0xc4, 0x06, 0x34, 0xb4, 0x19, + 0x51, 0x73, 0xf4, 0x7a, 0xf4, 0xfd, 0xe9, 0x1d, + 0xdc, 0x0f, 0x7e, 0xf7, 0x96, 0x03, 0xe3, 0xb1, + 0x2e, 0x22, 0x59, 0xb7, 0x6d, 0x1c, 0x97, 0x8c, + 0xd7, 0x31, 0x08, 0x26, 0x4c, 0x6d, 0xc6, 0x14, + 0xa5, 0xeb, 0x45, 0x6a, 0x88, 0xa3, 0xa2, 0x36, + 0xc4, 0x35, 0xb1, 0x5a, 0xa0, 0xad, 0xf7, 0x06, + 0x9b, 0x5d, 0xc1, 0x15, 0xc1, 0xce, 0x0a, 0xb0, + 0x57, 0x2e, 0x3f, 0x6f, 0x0d, 0x10, 0xd9, 0x11, + 0x2c, 0x9c, 0xad, 0x2d, 0xa5, 0x81, 0xfb, 0x4e, + 0x8f, 0xd5, 0x32, 0x4e, 0xaf, 0x5c, 0xc1, 0x86, + 0xde, 0x56, 0x5a, 0x33, 0x29, 0xf7, 0x67, 0xc6, + 0x37, 0x6f, 0xb2, 0x37, 0x4e, 0xd4, 0x69, 0x79, + 0xaf, 0xd5, 0x17, 0x79, 0xe0, 0xba, 0x62, 0xa3, + 0x68, 0xa4, 0x87, 0x93, 0x8d, 0x7e, 0x8f, 0xa3, + 0x9c, 0xef, 0xda, 0xe3, 0xa5, 0x1f, 0xcd, 0x30, + 0xa6, 0x55, 0xac, 0x4c, 0x69, 0x74, 0x02, 0xc7, + 0x5d, 0x95, 0x81, 0x4a, 0x68, 0x11, 0xd3, 0xa9, + 0x98, 0xb1, 0x0b, 0x0d, 0xae, 0x40, 0x86, 0x65, + 0xbf, 0xcc, 0x2d, 0xef, 0x57, 0xca, 0x1f, 0xe4, + 0x34, 0x4e, 0xa6, 0x5e, 0x82, 0x6e, 0x61, 0xad, + 0x0b, 0x3c, 0xf8, 0xeb, 0x01, 0x43, 0x7f, 0x87, + 0xa2, 0xa7, 0x6a, 0xe9, 0x62, 0x23, 0x24, 0x61, + 0xf1, 0xf7, 0x36, 0xdb, 0x10, 0xe5, 0x57, 0x72, + 0x3a, 0xc2, 0xae, 0xcc, 0x75, 0xc7, 0x80, 0x05, + 0x0a, 0x5c, 0x4c, 0x95, 0xda, 0x02, 0x01, 0x14, + 0x06, 0x6b, 0x5c, 0x65, 0xc2, 0xb8, 0x4a, 0xd6, + 0xd3, 0xb4, 0xd8, 0x12, 0x52, 0xb5, 0x60, 0xd3, + 0x8e, 0x5f, 0x5c, 0x76, 0x33, 0x7a, 0x05, 0xe5, + 0xcb, 0xef, 0x4f, 0x89, 0xf1, 0xba, 0x32, 0x6f, + 0x33, 0xcd, 0x15, 0x8d, 0xa3, 0x0c, 0x3f, 0x63, + 0x11, 0xe7, 0x0e, 0xe0, 0x00, 0x01, 0xe9, 0xe8, + 0x8e, 0x36, 0x34, 0x8d, 0x96, 0xb5, 0x03, 0xcf, + 0x55, 0x62, 0x49, 0x7a, 0x34, 0x44, 0xa5, 0xee, + 0x8c, 0x46, 0x06, 0x22, 0xab, 0x1d, 0x53, 0x9c, + 0xa1, 0xf9, 0x67, 0x18, 0x57, 0x89, 0xf9, 0xc2, + 0xd1, 0x7e, 0xbe, 0x36, 0x40, 0xcb, 0xe9, 0x04, + 0xde, 0xb1, 0x3b, 0x29, 0x52, 0xc5, 0x9a, 0xb5, + 0xa2, 0x7c, 0x7b, 0xfe, 0xe5, 0x92, 0x73, 0xea, + 0xea, 0x7b, 0xba, 0x0a, 0x8c, 0x88, 0x15, 0xe6, + 0x53, 0xbf, 0x1c, 0x33, 0xf4, 0x9b, 0x9a, 0x5e, + 0x8d, 0xae, 0x60, 0xdc, 0xcb, 0x5d, 0xfa, 0xbe, + 0x06, 0xc3, 0x3f, 0x06, 0xe7, 0x00, 0x40, 0x7b, + 0xaa, 0x94, 0xfa, 0x6d, 0x1f, 0xe4, 0xc5, 0xa9, + 0x1b, 0x5f, 0x36, 0xea, 0x5a, 0xdd, 0xa5, 0x48, + 0x6a, 0x55, 0xd2, 0x47, 0x28, 0xbf, 0x96, 0xf1, + 0x9f, 0xb6, 0x11, 0x4b, 0xd3, 0x44, 0x7d, 0x48, + 0x41, 0x61, 0xdb, 0x12, 0xd4, 0xc2, 0x59, 0x82, + 0x4c, 0x47, 0x5c, 0x04, 0xf6, 0x7b, 0xd3, 0x92, + 0x2e, 0xe8, 0x40, 0xef, 0x15, 0x32, 0x97, 0xdc, + 0x35, 0x4c, 0x6e, 0xa4, 0x97, 0xe9, 0x24, 0xde, + 0x63, 0x8b, 0xb1, 0x6b, 0x48, 0xbb, 0x46, 0x1f, + 0x84, 0xd6, 0x17, 0xb0, 0x5a, 0x4a, 0x4e, 0xd5, + 0x31, 0xd7, 0xcf, 0xa0, 0x39, 0xc6, 0x2e, 0xfc, + 0xa6, 0xa3, 0xd3, 0x0f, 0xa4, 0x28, 0xac, 0xb2, + 0xf4, 0x48, 0x8d, 0x50, 0xa5, 0x1c, 0x44, 0x5d, + 0x6e, 0x38, 0xb7, 0x2b, 0x8a, 0x45, 0xa7, 0x3d +}; +static const u8 key71[] __initconst = { + 0x8b, 0x68, 0xc4, 0xb7, 0x0d, 0x81, 0xef, 0x52, + 0x1e, 0x05, 0x96, 0x72, 0x62, 0x89, 0x27, 0x83, + 0xd0, 0xc7, 0x33, 0x6d, 0xf2, 0xcc, 0x69, 0xf9, + 0x23, 0xae, 0x99, 0xb1, 0xd1, 0x05, 0x4e, 0x54 +}; +enum { nonce71 = 0x983f03656d64b5f6ULL }; + +static const u8 input72[] __initconst = { + 0x6b, 0x09, 0xc9, 0x57, 0x3d, 0x79, 0x04, 0x8c, + 0x65, 0xad, 0x4a, 0x0f, 0xa1, 0x31, 0x3a, 0xdd, + 0x14, 0x8e, 0xe8, 0xfe, 0xbf, 0x42, 0x87, 0x98, + 0x2e, 0x8d, 0x83, 0xa3, 0xf8, 0x55, 0x3d, 0x84, + 0x1e, 0x0e, 0x05, 0x4a, 0x38, 0x9e, 0xe7, 0xfe, + 0xd0, 0x4d, 0x79, 0x74, 0x3a, 0x0b, 0x9b, 0xe1, + 0xfd, 0x51, 0x84, 0x4e, 0xb2, 0x25, 0xe4, 0x64, + 0x4c, 0xda, 0xcf, 0x46, 0xec, 0xba, 0x12, 0xeb, + 0x5a, 0x33, 0x09, 0x6e, 0x78, 0x77, 0x8f, 0x30, + 0xb1, 0x7d, 0x3f, 0x60, 0x8c, 0xf2, 0x1d, 0x8e, + 0xb4, 0x70, 0xa2, 0x90, 0x7c, 0x79, 0x1a, 0x2c, + 0xf6, 0x28, 0x79, 0x7c, 0x53, 0xc5, 0xfa, 0xcc, + 0x65, 0x9b, 0xe1, 0x51, 0xd1, 0x7f, 0x1d, 0xc4, + 0xdb, 0xd4, 0xd9, 0x04, 0x61, 0x7d, 0xbe, 0x12, + 0xfc, 0xcd, 0xaf, 0xe4, 0x0f, 0x9c, 0x20, 0xb5, + 0x22, 0x40, 0x18, 0xda, 0xe4, 0xda, 0x8c, 0x2d, + 0x84, 0xe3, 0x5f, 0x53, 0x17, 0xed, 0x78, 0xdc, + 0x2f, 0xe8, 0x31, 0xc7, 0xe6, 0x39, 0x71, 0x40, + 0xb4, 0x0f, 0xc9, 0xa9, 0x7e, 0x78, 0x87, 0xc1, + 0x05, 0x78, 0xbb, 0x01, 0xf2, 0x8f, 0x33, 0xb0, + 0x6e, 0x84, 0xcd, 0x36, 0x33, 0x5c, 0x5b, 0x8e, + 0xf1, 0xac, 0x30, 0xfe, 0x33, 0xec, 0x08, 0xf3, + 0x7e, 0xf2, 0xf0, 0x4c, 0xf2, 0xad, 0xd8, 0xc1, + 0xd4, 0x4e, 0x87, 0x06, 0xd4, 0x75, 0xe7, 0xe3, + 0x09, 0xd3, 0x4d, 0xe3, 0x21, 0x32, 0xba, 0xb4, + 0x68, 0x68, 0xcb, 0x4c, 0xa3, 0x1e, 0xb3, 0x87, + 0x7b, 0xd3, 0x0c, 0x63, 0x37, 0x71, 0x79, 0xfb, + 0x58, 0x36, 0x57, 0x0f, 0x34, 0x1d, 0xc1, 0x42, + 0x02, 0x17, 0xe7, 0xed, 0xe8, 0xe7, 0x76, 0xcb, + 0x42, 0xc4, 0x4b, 0xe2, 0xb2, 0x5e, 0x42, 0xd5, + 0xec, 0x9d, 0xc1, 0x32, 0x71, 0xe4, 0xeb, 0x10, + 0x68, 0x1a, 0x6e, 0x99, 0x8e, 0x73, 0x12, 0x1f, + 0x97, 0x0c, 0x9e, 0xcd, 0x02, 0x3e, 0x4c, 0xa0, + 0xf2, 0x8d, 0xe5, 0x44, 0xca, 0x6d, 0xfe, 0x07, + 0xe3, 0xe8, 0x9b, 0x76, 0xc1, 0x6d, 0xb7, 0x6e, + 0x0d, 0x14, 0x00, 0x6f, 0x8a, 0xfd, 0x43, 0xc6, + 0x43, 0xa5, 0x9c, 0x02, 0x47, 0x10, 0xd4, 0xb4, + 0x9b, 0x55, 0x67, 0xc8, 0x7f, 0xc1, 0x8a, 0x1f, + 0x1e, 0xd1, 0xbc, 0x99, 0x5d, 0x50, 0x4f, 0x89, + 0xf1, 0xe6, 0x5d, 0x91, 0x40, 0xdc, 0x20, 0x67, + 0x56, 0xc2, 0xef, 0xbd, 0x2c, 0xa2, 0x99, 0x38, + 0xe0, 0x45, 0xec, 0x44, 0x05, 0x52, 0x65, 0x11, + 0xfc, 0x3b, 0x19, 0xcb, 0x71, 0xc2, 0x8e, 0x0e, + 0x03, 0x2a, 0x03, 0x3b, 0x63, 0x06, 0x31, 0x9a, + 0xac, 0x53, 0x04, 0x14, 0xd4, 0x80, 0x9d, 0x6b, + 0x42, 0x7e, 0x7e, 0x4e, 0xdc, 0xc7, 0x01, 0x49, + 0x9f, 0xf5, 0x19, 0x86, 0x13, 0x28, 0x2b, 0xa6, + 0xa6, 0xbe, 0xa1, 0x7e, 0x71, 0x05, 0x00, 0xff, + 0x59, 0x2d, 0xb6, 0x63, 0xf0, 0x1e, 0x2e, 0x69, + 0x9b, 0x85, 0xf1, 0x1e, 0x8a, 0x64, 0x39, 0xab, + 0x00, 0x12, 0xe4, 0x33, 0x4b, 0xb5, 0xd8, 0xb3, + 0x6b, 0x5b, 0x8b, 0x5c, 0xd7, 0x6f, 0x23, 0xcf, + 0x3f, 0x2e, 0x5e, 0x47, 0xb9, 0xb8, 0x1f, 0xf0, + 0x1d, 0xda, 0xe7, 0x4f, 0x6e, 0xab, 0xc3, 0x36, + 0xb4, 0x74, 0x6b, 0xeb, 0xc7, 0x5d, 0x91, 0xe5, + 0xda, 0xf2, 0xc2, 0x11, 0x17, 0x48, 0xf8, 0x9c, + 0xc9, 0x8b, 0xc1, 0xa2, 0xf4, 0xcd, 0x16, 0xf8, + 0x27, 0xd9, 0x6c, 0x6f, 0xb5, 0x8f, 0x77, 0xca, + 0x1b, 0xd8, 0xef, 0x84, 0x68, 0x71, 0x53, 0xc1, + 0x43, 0x0f, 0x9f, 0x98, 0xae, 0x7e, 0x31, 0xd2, + 0x98, 0xfb, 0x20, 0xa2, 0xad, 0x00, 0x10, 0x83, + 0x00, 0x8b, 0xeb, 0x56, 0xd2, 0xc4, 0xcc, 0x7f, + 0x2f, 0x4e, 0xfa, 0x88, 0x13, 0xa4, 0x2c, 0xde, + 0x6b, 0x77, 0x86, 0x10, 0x6a, 0xab, 0x43, 0x0a, + 0x02 +}; +static const u8 output72[] __initconst = { + 0x42, 0x89, 0xa4, 0x80, 0xd2, 0xcb, 0x5f, 0x7f, + 0x2a, 0x1a, 0x23, 0x00, 0xa5, 0x6a, 0x95, 0xa3, + 0x9a, 0x41, 0xa1, 0xd0, 0x2d, 0x1e, 0xd6, 0x13, + 0x34, 0x40, 0x4e, 0x7f, 0x1a, 0xbe, 0xa0, 0x3d, + 0x33, 0x9c, 0x56, 0x2e, 0x89, 0x25, 0x45, 0xf9, + 0xf0, 0xba, 0x9c, 0x6d, 0xd1, 0xd1, 0xde, 0x51, + 0x47, 0x63, 0xc9, 0xbd, 0xfa, 0xa2, 0x9e, 0xad, + 0x6a, 0x7b, 0x21, 0x1a, 0x6c, 0x3e, 0xff, 0x46, + 0xbe, 0xf3, 0x35, 0x7a, 0x6e, 0xb3, 0xb9, 0xf7, + 0xda, 0x5e, 0xf0, 0x14, 0xb5, 0x70, 0xa4, 0x2b, + 0xdb, 0xbb, 0xc7, 0x31, 0x4b, 0x69, 0x5a, 0x83, + 0x70, 0xd9, 0x58, 0xd4, 0x33, 0x84, 0x23, 0xf0, + 0xae, 0xbb, 0x6d, 0x26, 0x7c, 0xc8, 0x30, 0xf7, + 0x24, 0xad, 0xbd, 0xe4, 0x2c, 0x38, 0x38, 0xac, + 0xe1, 0x4a, 0x9b, 0xac, 0x33, 0x0e, 0x4a, 0xf4, + 0x93, 0xed, 0x07, 0x82, 0x81, 0x4f, 0x8f, 0xb1, + 0xdd, 0x73, 0xd5, 0x50, 0x6d, 0x44, 0x1e, 0xbe, + 0xa7, 0xcd, 0x17, 0x57, 0xd5, 0x3b, 0x62, 0x36, + 0xcf, 0x7d, 0xc8, 0xd8, 0xd1, 0x78, 0xd7, 0x85, + 0x46, 0x76, 0x5d, 0xcc, 0xfe, 0xe8, 0x94, 0xc5, + 0xad, 0xbc, 0x5e, 0xbc, 0x8d, 0x1d, 0xdf, 0x03, + 0xc9, 0x6b, 0x1b, 0x81, 0xd1, 0xb6, 0x5a, 0x24, + 0xe3, 0xdc, 0x3f, 0x20, 0xc9, 0x07, 0x73, 0x4c, + 0x43, 0x13, 0x87, 0x58, 0x34, 0x0d, 0x14, 0x63, + 0x0f, 0x6f, 0xad, 0x8d, 0xac, 0x7c, 0x67, 0x68, + 0xa3, 0x9d, 0x7f, 0x00, 0xdf, 0x28, 0xee, 0x67, + 0xf4, 0x5c, 0x26, 0xcb, 0xef, 0x56, 0x71, 0xc8, + 0xc6, 0x67, 0x5f, 0x38, 0xbb, 0xa0, 0xb1, 0x5c, + 0x1f, 0xb3, 0x08, 0xd9, 0x38, 0xcf, 0x74, 0x54, + 0xc6, 0xa4, 0xc4, 0xc0, 0x9f, 0xb3, 0xd0, 0xda, + 0x62, 0x67, 0x8b, 0x81, 0x33, 0xf0, 0xa9, 0x73, + 0xa4, 0xd1, 0x46, 0x88, 0x8d, 0x85, 0x12, 0x40, + 0xba, 0x1a, 0xcd, 0x82, 0xd8, 0x8d, 0xc4, 0x52, + 0xe7, 0x01, 0x94, 0x2e, 0x0e, 0xd0, 0xaf, 0xe7, + 0x2d, 0x3f, 0x3c, 0xaa, 0xf4, 0xf5, 0xa7, 0x01, + 0x4c, 0x14, 0xe2, 0xc2, 0x96, 0x76, 0xbe, 0x05, + 0xaa, 0x19, 0xb1, 0xbd, 0x95, 0xbb, 0x5a, 0xf9, + 0xa5, 0xa7, 0xe6, 0x16, 0x38, 0x34, 0xf7, 0x9d, + 0x19, 0x66, 0x16, 0x8e, 0x7f, 0x2b, 0x5a, 0xfb, + 0xb5, 0x29, 0x79, 0xbf, 0x52, 0xae, 0x30, 0x95, + 0x3f, 0x31, 0x33, 0x28, 0xde, 0xc5, 0x0d, 0x55, + 0x89, 0xec, 0x21, 0x11, 0x0f, 0x8b, 0xfe, 0x63, + 0x3a, 0xf1, 0x95, 0x5c, 0xcd, 0x50, 0xe4, 0x5d, + 0x8f, 0xa7, 0xc8, 0xca, 0x93, 0xa0, 0x67, 0x82, + 0x63, 0x5c, 0xd0, 0xed, 0xe7, 0x08, 0xc5, 0x60, + 0xf8, 0xb4, 0x47, 0xf0, 0x1a, 0x65, 0x4e, 0xa3, + 0x51, 0x68, 0xc7, 0x14, 0xa1, 0xd9, 0x39, 0x72, + 0xa8, 0x6f, 0x7c, 0x7e, 0xf6, 0x03, 0x0b, 0x25, + 0x9b, 0xf2, 0xca, 0x49, 0xae, 0x5b, 0xf8, 0x0f, + 0x71, 0x51, 0x01, 0xa6, 0x23, 0xa9, 0xdf, 0xd0, + 0x7a, 0x39, 0x19, 0xf5, 0xc5, 0x26, 0x44, 0x7b, + 0x0a, 0x4a, 0x41, 0xbf, 0xf2, 0x8e, 0x83, 0x50, + 0x91, 0x96, 0x72, 0x02, 0xf6, 0x80, 0xbf, 0x95, + 0x41, 0xac, 0xda, 0xb0, 0xba, 0xe3, 0x76, 0xb1, + 0x9d, 0xff, 0x1f, 0x33, 0x02, 0x85, 0xfc, 0x2a, + 0x29, 0xe6, 0xe3, 0x9d, 0xd0, 0xef, 0xc2, 0xd6, + 0x9c, 0x4a, 0x62, 0xac, 0xcb, 0xea, 0x8b, 0xc3, + 0x08, 0x6e, 0x49, 0x09, 0x26, 0x19, 0xc1, 0x30, + 0xcc, 0x27, 0xaa, 0xc6, 0x45, 0x88, 0xbd, 0xae, + 0xd6, 0x79, 0xff, 0x4e, 0xfc, 0x66, 0x4d, 0x02, + 0xa5, 0xee, 0x8e, 0xa5, 0xb6, 0x15, 0x72, 0x24, + 0xb1, 0xbf, 0xbf, 0x64, 0xcf, 0xcc, 0x93, 0xe9, + 0xb6, 0xfd, 0xb4, 0xb6, 0x21, 0xb5, 0x48, 0x08, + 0x0f, 0x11, 0x65, 0xe1, 0x47, 0xee, 0x93, 0x29, + 0xad +}; +static const u8 key72[] __initconst = { + 0xb9, 0xa2, 0xfc, 0x59, 0x06, 0x3f, 0x77, 0xa5, + 0x66, 0xd0, 0x2b, 0x22, 0x74, 0x22, 0x4c, 0x1e, + 0x6a, 0x39, 0xdf, 0xe1, 0x0d, 0x4c, 0x64, 0x99, + 0x54, 0x8a, 0xba, 0x1d, 0x2c, 0x21, 0x5f, 0xc3 +}; +enum { nonce72 = 0x3d069308fa3db04bULL }; + +static const u8 input73[] __initconst = { + 0xe4, 0xdd, 0x36, 0xd4, 0xf5, 0x70, 0x51, 0x73, + 0x97, 0x1d, 0x45, 0x05, 0x92, 0xe7, 0xeb, 0xb7, + 0x09, 0x82, 0x6e, 0x25, 0x6c, 0x50, 0xf5, 0x40, + 0x19, 0xba, 0xbc, 0xf4, 0x39, 0x14, 0xc5, 0x15, + 0x83, 0x40, 0xbd, 0x26, 0xe0, 0xff, 0x3b, 0x22, + 0x7c, 0x7c, 0xd7, 0x0b, 0xe9, 0x25, 0x0c, 0x3d, + 0x92, 0x38, 0xbe, 0xe4, 0x22, 0x75, 0x65, 0xf1, + 0x03, 0x85, 0x34, 0x09, 0xb8, 0x77, 0xfb, 0x48, + 0xb1, 0x2e, 0x21, 0x67, 0x9b, 0x9d, 0xad, 0x18, + 0x82, 0x0d, 0x6b, 0xc3, 0xcf, 0x00, 0x61, 0x6e, + 0xda, 0xdc, 0xa7, 0x0b, 0x5c, 0x02, 0x1d, 0xa6, + 0x4e, 0x0d, 0x7f, 0x37, 0x01, 0x5a, 0x37, 0xf3, + 0x2b, 0xbf, 0xba, 0xe2, 0x1c, 0xb3, 0xa3, 0xbc, + 0x1c, 0x93, 0x1a, 0xb1, 0x71, 0xaf, 0xe2, 0xdd, + 0x17, 0xee, 0x53, 0xfa, 0xfb, 0x02, 0x40, 0x3e, + 0x03, 0xca, 0xe7, 0xc3, 0x51, 0x81, 0xcc, 0x8c, + 0xca, 0xcf, 0x4e, 0xc5, 0x78, 0x99, 0xfd, 0xbf, + 0xea, 0xab, 0x38, 0x81, 0xfc, 0xd1, 0x9e, 0x41, + 0x0b, 0x84, 0x25, 0xf1, 0x6b, 0x3c, 0xf5, 0x40, + 0x0d, 0xc4, 0x3e, 0xb3, 0x6a, 0xec, 0x6e, 0x75, + 0xdc, 0x9b, 0xdf, 0x08, 0x21, 0x16, 0xfb, 0x7a, + 0x8e, 0x19, 0x13, 0x02, 0xa7, 0xfc, 0x58, 0x21, + 0xc3, 0xb3, 0x59, 0x5a, 0x9c, 0xef, 0x38, 0xbd, + 0x87, 0x55, 0xd7, 0x0d, 0x1f, 0x84, 0xdc, 0x98, + 0x22, 0xca, 0x87, 0x96, 0x71, 0x6d, 0x68, 0x00, + 0xcb, 0x4f, 0x2f, 0xc4, 0x64, 0x0c, 0xc1, 0x53, + 0x0c, 0x90, 0xe7, 0x3c, 0x88, 0xca, 0xc5, 0x85, + 0xa3, 0x2a, 0x96, 0x7c, 0x82, 0x6d, 0x45, 0xf5, + 0xb7, 0x8d, 0x17, 0x69, 0xd6, 0xcd, 0x3c, 0xd3, + 0xe7, 0x1c, 0xce, 0x93, 0x50, 0xd4, 0x59, 0xa2, + 0xd8, 0x8b, 0x72, 0x60, 0x5b, 0x25, 0x14, 0xcd, + 0x5a, 0xe8, 0x8c, 0xdb, 0x23, 0x8d, 0x2b, 0x59, + 0x12, 0x13, 0x10, 0x47, 0xa4, 0xc8, 0x3c, 0xc1, + 0x81, 0x89, 0x6c, 0x98, 0xec, 0x8f, 0x7b, 0x32, + 0xf2, 0x87, 0xd9, 0xa2, 0x0d, 0xc2, 0x08, 0xf9, + 0xd5, 0xf3, 0x91, 0xe7, 0xb3, 0x87, 0xa7, 0x0b, + 0x64, 0x8f, 0xb9, 0x55, 0x1c, 0x81, 0x96, 0x6c, + 0xa1, 0xc9, 0x6e, 0x3b, 0xcd, 0x17, 0x1b, 0xfc, + 0xa6, 0x05, 0xba, 0x4a, 0x7d, 0x03, 0x3c, 0x59, + 0xc8, 0xee, 0x50, 0xb2, 0x5b, 0xe1, 0x4d, 0x6a, + 0x1f, 0x09, 0xdc, 0xa2, 0x51, 0xd1, 0x93, 0x3a, + 0x5f, 0x72, 0x1d, 0x26, 0x14, 0x62, 0xa2, 0x41, + 0x3d, 0x08, 0x70, 0x7b, 0x27, 0x3d, 0xbc, 0xdf, + 0x15, 0xfa, 0xb9, 0x5f, 0xb5, 0x38, 0x84, 0x0b, + 0x58, 0x3d, 0xee, 0x3f, 0x32, 0x65, 0x6d, 0xd7, + 0xce, 0x97, 0x3c, 0x8d, 0xfb, 0x63, 0xb9, 0xb0, + 0xa8, 0x4a, 0x72, 0x99, 0x97, 0x58, 0xc8, 0xa7, + 0xf9, 0x4c, 0xae, 0xc1, 0x63, 0xb9, 0x57, 0x18, + 0x8a, 0xfa, 0xab, 0xe9, 0xf3, 0x67, 0xe6, 0xfd, + 0xd2, 0x9d, 0x5c, 0xa9, 0x8e, 0x11, 0x0a, 0xf4, + 0x4b, 0xf1, 0xec, 0x1a, 0xaf, 0x50, 0x5d, 0x16, + 0x13, 0x69, 0x2e, 0xbd, 0x0d, 0xe6, 0xf0, 0xb2, + 0xed, 0xb4, 0x4c, 0x59, 0x77, 0x37, 0x00, 0x0b, + 0xc7, 0xa7, 0x9e, 0x37, 0xf3, 0x60, 0x70, 0xef, + 0xf3, 0xc1, 0x74, 0x52, 0x87, 0xc6, 0xa1, 0x81, + 0xbd, 0x0a, 0x2c, 0x5d, 0x2c, 0x0c, 0x6a, 0x81, + 0xa1, 0xfe, 0x26, 0x78, 0x6c, 0x03, 0x06, 0x07, + 0x34, 0xaa, 0xd1, 0x1b, 0x40, 0x03, 0x39, 0x56, + 0xcf, 0x2a, 0x92, 0xc1, 0x4e, 0xdf, 0x29, 0x24, + 0x83, 0x22, 0x7a, 0xea, 0x67, 0x1e, 0xe7, 0x54, + 0x64, 0xd3, 0xbd, 0x3a, 0x5d, 0xae, 0xca, 0xf0, + 0x9c, 0xd6, 0x5a, 0x9a, 0x62, 0xc8, 0xc7, 0x83, + 0xf9, 0x89, 0xde, 0x2d, 0x53, 0x64, 0x61, 0xf7, + 0xa3, 0xa7, 0x31, 0x38, 0xc6, 0x22, 0x9c, 0xb4, + 0x87, 0xe0 +}; +static const u8 output73[] __initconst = { + 0x34, 0xed, 0x05, 0xb0, 0x14, 0xbc, 0x8c, 0xcc, + 0x95, 0xbd, 0x99, 0x0f, 0xb1, 0x98, 0x17, 0x10, + 0xae, 0xe0, 0x08, 0x53, 0xa3, 0x69, 0xd2, 0xed, + 0x66, 0xdb, 0x2a, 0x34, 0x8d, 0x0c, 0x6e, 0xce, + 0x63, 0x69, 0xc9, 0xe4, 0x57, 0xc3, 0x0c, 0x8b, + 0xa6, 0x2c, 0xa7, 0xd2, 0x08, 0xff, 0x4f, 0xec, + 0x61, 0x8c, 0xee, 0x0d, 0xfa, 0x6b, 0xe0, 0xe8, + 0x71, 0xbc, 0x41, 0x46, 0xd7, 0x33, 0x1d, 0xc0, + 0xfd, 0xad, 0xca, 0x8b, 0x34, 0x56, 0xa4, 0x86, + 0x71, 0x62, 0xae, 0x5e, 0x3d, 0x2b, 0x66, 0x3e, + 0xae, 0xd8, 0xc0, 0xe1, 0x21, 0x3b, 0xca, 0xd2, + 0x6b, 0xa2, 0xb8, 0xc7, 0x98, 0x4a, 0xf3, 0xcf, + 0xb8, 0x62, 0xd8, 0x33, 0xe6, 0x80, 0xdb, 0x2f, + 0x0a, 0xaf, 0x90, 0x3c, 0xe1, 0xec, 0xe9, 0x21, + 0x29, 0x42, 0x9e, 0xa5, 0x50, 0xe9, 0x93, 0xd3, + 0x53, 0x1f, 0xac, 0x2a, 0x24, 0x07, 0xb8, 0xed, + 0xed, 0x38, 0x2c, 0xc4, 0xa1, 0x2b, 0x31, 0x5d, + 0x9c, 0x24, 0x7b, 0xbf, 0xd9, 0xbb, 0x4e, 0x87, + 0x8f, 0x32, 0x30, 0xf1, 0x11, 0x29, 0x54, 0x94, + 0x00, 0x95, 0x1d, 0x1d, 0x24, 0xc0, 0xd4, 0x34, + 0x49, 0x1d, 0xd5, 0xe3, 0xa6, 0xde, 0x8b, 0xbf, + 0x5a, 0x9f, 0x58, 0x5a, 0x9b, 0x70, 0xe5, 0x9b, + 0xb3, 0xdb, 0xe8, 0xb8, 0xca, 0x1b, 0x43, 0xe3, + 0xc6, 0x6f, 0x0a, 0xd6, 0x32, 0x11, 0xd4, 0x04, + 0xef, 0xa3, 0xe4, 0x3f, 0x12, 0xd8, 0xc1, 0x73, + 0x51, 0x87, 0x03, 0xbd, 0xba, 0x60, 0x79, 0xee, + 0x08, 0xcc, 0xf7, 0xc0, 0xaa, 0x4c, 0x33, 0xc4, + 0xc7, 0x09, 0xf5, 0x91, 0xcb, 0x74, 0x57, 0x08, + 0x1b, 0x90, 0xa9, 0x1b, 0x60, 0x02, 0xd2, 0x3f, + 0x7a, 0xbb, 0xfd, 0x78, 0xf0, 0x15, 0xf9, 0x29, + 0x82, 0x8f, 0xc4, 0xb2, 0x88, 0x1f, 0xbc, 0xcc, + 0x53, 0x27, 0x8b, 0x07, 0x5f, 0xfc, 0x91, 0x29, + 0x82, 0x80, 0x59, 0x0a, 0x3c, 0xea, 0xc4, 0x7e, + 0xad, 0xd2, 0x70, 0x46, 0xbd, 0x9e, 0x3b, 0x1c, + 0x8a, 0x62, 0xea, 0x69, 0xbd, 0xf6, 0x96, 0x15, + 0xb5, 0x57, 0xe8, 0x63, 0x5f, 0x65, 0x46, 0x84, + 0x58, 0x50, 0x87, 0x4b, 0x0e, 0x5b, 0x52, 0x90, + 0xb0, 0xae, 0x37, 0x0f, 0xdd, 0x7e, 0xa2, 0xa0, + 0x8b, 0x78, 0xc8, 0x5a, 0x1f, 0x53, 0xdb, 0xc5, + 0xbf, 0x73, 0x20, 0xa9, 0x44, 0xfb, 0x1e, 0xc7, + 0x97, 0xb2, 0x3a, 0x5a, 0x17, 0xe6, 0x8b, 0x9b, + 0xe8, 0xf8, 0x2a, 0x01, 0x27, 0xa3, 0x71, 0x28, + 0xe3, 0x19, 0xc6, 0xaf, 0xf5, 0x3a, 0x26, 0xc0, + 0x5c, 0x69, 0x30, 0x78, 0x75, 0x27, 0xf2, 0x0c, + 0x22, 0x71, 0x65, 0xc6, 0x8e, 0x7b, 0x47, 0xe3, + 0x31, 0xaf, 0x7b, 0xc6, 0xc2, 0x55, 0x68, 0x81, + 0xaa, 0x1b, 0x21, 0x65, 0xfb, 0x18, 0x35, 0x45, + 0x36, 0x9a, 0x44, 0xba, 0x5c, 0xff, 0x06, 0xde, + 0x3a, 0xc8, 0x44, 0x0b, 0xaa, 0x8e, 0x34, 0xe2, + 0x84, 0xac, 0x18, 0xfe, 0x9b, 0xe1, 0x4f, 0xaa, + 0xb6, 0x90, 0x0b, 0x1c, 0x2c, 0xd9, 0x9a, 0x10, + 0x18, 0xf9, 0x49, 0x41, 0x42, 0x1b, 0xb5, 0xe1, + 0x26, 0xac, 0x2d, 0x38, 0x00, 0x00, 0xe4, 0xb4, + 0x50, 0x6f, 0x14, 0x18, 0xd6, 0x3d, 0x00, 0x59, + 0x3c, 0x45, 0xf3, 0x42, 0x13, 0x44, 0xb8, 0x57, + 0xd4, 0x43, 0x5c, 0x8a, 0x2a, 0xb4, 0xfc, 0x0a, + 0x25, 0x5a, 0xdc, 0x8f, 0x11, 0x0b, 0x11, 0x44, + 0xc7, 0x0e, 0x54, 0x8b, 0x22, 0x01, 0x7e, 0x67, + 0x2e, 0x15, 0x3a, 0xb9, 0xee, 0x84, 0x10, 0xd4, + 0x80, 0x57, 0xd7, 0x75, 0xcf, 0x8b, 0xcb, 0x03, + 0xc9, 0x92, 0x2b, 0x69, 0xd8, 0x5a, 0x9b, 0x06, + 0x85, 0x47, 0xaa, 0x4c, 0x28, 0xde, 0x49, 0x58, + 0xe6, 0x11, 0x1e, 0x5e, 0x64, 0x8e, 0x3b, 0xe0, + 0x40, 0x2e, 0xac, 0x96, 0x97, 0x15, 0x37, 0x1e, + 0x30, 0xdd +}; +static const u8 key73[] __initconst = { + 0x96, 0x06, 0x1e, 0xc1, 0x6d, 0xba, 0x49, 0x5b, + 0x65, 0x80, 0x79, 0xdd, 0xf3, 0x67, 0xa8, 0x6e, + 0x2d, 0x9c, 0x54, 0x46, 0xd8, 0x4a, 0xeb, 0x7e, + 0x23, 0x86, 0x51, 0xd8, 0x49, 0x49, 0x56, 0xe0 +}; +enum { nonce73 = 0xbefb83cb67e11ffdULL }; + +static const u8 input74[] __initconst = { + 0x47, 0x22, 0x70, 0xe5, 0x2f, 0x41, 0x18, 0x45, + 0x07, 0xd3, 0x6d, 0x32, 0x0d, 0x43, 0x92, 0x2b, + 0x9b, 0x65, 0x73, 0x13, 0x1a, 0x4f, 0x49, 0x8f, + 0xff, 0xf8, 0xcc, 0xae, 0x15, 0xab, 0x9d, 0x7d, + 0xee, 0x22, 0x5d, 0x8b, 0xde, 0x81, 0x5b, 0x81, + 0x83, 0x49, 0x35, 0x9b, 0xb4, 0xbc, 0x4e, 0x01, + 0xc2, 0x29, 0xa7, 0xf1, 0xca, 0x3a, 0xce, 0x3f, + 0xf5, 0x31, 0x93, 0xa8, 0xe2, 0xc9, 0x7d, 0x03, + 0x26, 0xa4, 0xbc, 0xa8, 0x9c, 0xb9, 0x68, 0xf3, + 0xb3, 0x91, 0xe8, 0xe6, 0xc7, 0x2b, 0x1a, 0xce, + 0xd2, 0x41, 0x53, 0xbd, 0xa3, 0x2c, 0x54, 0x94, + 0x21, 0xa1, 0x40, 0xae, 0xc9, 0x0c, 0x11, 0x92, + 0xfd, 0x91, 0xa9, 0x40, 0xca, 0xde, 0x21, 0x4e, + 0x1e, 0x3d, 0xcc, 0x2c, 0x87, 0x11, 0xef, 0x46, + 0xed, 0x52, 0x03, 0x11, 0x19, 0x43, 0x25, 0xc7, + 0x0d, 0xc3, 0x37, 0x5f, 0xd3, 0x6f, 0x0c, 0x6a, + 0x45, 0x30, 0x88, 0xec, 0xf0, 0x21, 0xef, 0x1d, + 0x7b, 0x38, 0x63, 0x4b, 0x49, 0x0c, 0x72, 0xf6, + 0x4c, 0x40, 0xc3, 0xcc, 0x03, 0xa7, 0xae, 0xa8, + 0x8c, 0x37, 0x03, 0x1c, 0x11, 0xae, 0x0d, 0x1b, + 0x62, 0x97, 0x27, 0xfc, 0x56, 0x4b, 0xb7, 0xfd, + 0xbc, 0xfb, 0x0e, 0xfc, 0x61, 0xad, 0xc6, 0xb5, + 0x9c, 0x8c, 0xc6, 0x38, 0x27, 0x91, 0x29, 0x3d, + 0x29, 0xc8, 0x37, 0xc9, 0x96, 0x69, 0xe3, 0xdc, + 0x3e, 0x61, 0x35, 0x9b, 0x99, 0x4f, 0xb9, 0x4e, + 0x5a, 0x29, 0x1c, 0x2e, 0xcf, 0x16, 0xcb, 0x69, + 0x87, 0xe4, 0x1a, 0xc4, 0x6e, 0x78, 0x43, 0x00, + 0x03, 0xb2, 0x8b, 0x03, 0xd0, 0xb4, 0xf1, 0xd2, + 0x7d, 0x2d, 0x7e, 0xfc, 0x19, 0x66, 0x5b, 0xa3, + 0x60, 0x3f, 0x9d, 0xbd, 0xfa, 0x3e, 0xca, 0x7b, + 0x26, 0x08, 0x19, 0x16, 0x93, 0x5d, 0x83, 0xfd, + 0xf9, 0x21, 0xc6, 0x31, 0x34, 0x6f, 0x0c, 0xaa, + 0x28, 0xf9, 0x18, 0xa2, 0xc4, 0x78, 0x3b, 0x56, + 0xc0, 0x88, 0x16, 0xba, 0x22, 0x2c, 0x07, 0x2f, + 0x70, 0xd0, 0xb0, 0x46, 0x35, 0xc7, 0x14, 0xdc, + 0xbb, 0x56, 0x23, 0x1e, 0x36, 0x36, 0x2d, 0x73, + 0x78, 0xc7, 0xce, 0xf3, 0x58, 0xf7, 0x58, 0xb5, + 0x51, 0xff, 0x33, 0x86, 0x0e, 0x3b, 0x39, 0xfb, + 0x1a, 0xfd, 0xf8, 0x8b, 0x09, 0x33, 0x1b, 0x83, + 0xf2, 0xe6, 0x38, 0x37, 0xef, 0x47, 0x84, 0xd9, + 0x82, 0x77, 0x2b, 0x82, 0xcc, 0xf9, 0xee, 0x94, + 0x71, 0x78, 0x81, 0xc8, 0x4d, 0x91, 0xd7, 0x35, + 0x29, 0x31, 0x30, 0x5c, 0x4a, 0x23, 0x23, 0xb1, + 0x38, 0x6b, 0xac, 0x22, 0x3f, 0x80, 0xc7, 0xe0, + 0x7d, 0xfa, 0x76, 0x47, 0xd4, 0x6f, 0x93, 0xa0, + 0xa0, 0x93, 0x5d, 0x68, 0xf7, 0x43, 0x25, 0x8f, + 0x1b, 0xc7, 0x87, 0xea, 0x59, 0x0c, 0xa2, 0xfa, + 0xdb, 0x2f, 0x72, 0x43, 0xcf, 0x90, 0xf1, 0xd6, + 0x58, 0xf3, 0x17, 0x6a, 0xdf, 0xb3, 0x4e, 0x0e, + 0x38, 0x24, 0x48, 0x1f, 0xb7, 0x01, 0xec, 0x81, + 0xb1, 0x87, 0x5b, 0xec, 0x9c, 0x11, 0x1a, 0xff, + 0xa5, 0xca, 0x5a, 0x63, 0x31, 0xb2, 0xe4, 0xc6, + 0x3c, 0x1d, 0xaf, 0x27, 0xb2, 0xd4, 0x19, 0xa2, + 0xcc, 0x04, 0x92, 0x42, 0xd2, 0xc1, 0x8c, 0x3b, + 0xce, 0xf5, 0x74, 0xc1, 0x81, 0xf8, 0x20, 0x23, + 0x6f, 0x20, 0x6d, 0x78, 0x36, 0x72, 0x2c, 0x52, + 0xdf, 0x5e, 0xe8, 0x75, 0xce, 0x1c, 0x49, 0x9d, + 0x93, 0x6f, 0x65, 0xeb, 0xb1, 0xbd, 0x8e, 0x5e, + 0xe5, 0x89, 0xc4, 0x8a, 0x81, 0x3d, 0x9a, 0xa7, + 0x11, 0x82, 0x8e, 0x38, 0x5b, 0x5b, 0xca, 0x7d, + 0x4b, 0x72, 0xc2, 0x9c, 0x30, 0x5e, 0x7f, 0xc0, + 0x6f, 0x91, 0xd5, 0x67, 0x8c, 0x3e, 0xae, 0xda, + 0x2b, 0x3c, 0x53, 0xcc, 0x50, 0x97, 0x36, 0x0b, + 0x79, 0xd6, 0x73, 0x6e, 0x7d, 0x42, 0x56, 0xe1, + 0xaa, 0xfc, 0xb3, 0xa7, 0xc8, 0x01, 0xaa, 0xc1, + 0xfc, 0x5c, 0x72, 0x8e, 0x63, 0xa8, 0x46, 0x18, + 0xee, 0x11, 0xe7, 0x30, 0x09, 0x83, 0x6c, 0xd9, + 0xf4, 0x7a, 0x7b, 0xb5, 0x1f, 0x6d, 0xc7, 0xbc, + 0xcb, 0x55, 0xea, 0x40, 0x58, 0x7a, 0x00, 0x00, + 0x90, 0x60, 0xc5, 0x64, 0x69, 0x05, 0x99, 0xd2, + 0x49, 0x62, 0x4f, 0xcb, 0x97, 0xdf, 0xdd, 0x6b, + 0x60, 0x75, 0xe2, 0xe0, 0x6f, 0x76, 0xd0, 0x37, + 0x67, 0x0a, 0xcf, 0xff, 0xc8, 0x61, 0x84, 0x14, + 0x80, 0x7c, 0x1d, 0x31, 0x8d, 0x90, 0xde, 0x0b, + 0x1c, 0x74, 0x9f, 0x82, 0x96, 0x80, 0xda, 0xaf, + 0x8d, 0x99, 0x86, 0x9f, 0x24, 0x99, 0x28, 0x3e, + 0xe0, 0xa3, 0xc3, 0x90, 0x2d, 0x14, 0x65, 0x1e, + 0x3b, 0xb9, 0xba, 0x13, 0xa5, 0x77, 0x73, 0x63, + 0x9a, 0x06, 0x3d, 0xa9, 0x28, 0x9b, 0xba, 0x25, + 0x61, 0xc9, 0xcd, 0xcf, 0x7a, 0x4d, 0x96, 0x09, + 0xcb, 0xca, 0x03, 0x9c, 0x54, 0x34, 0x31, 0x85, + 0xa0, 0x3d, 0xe5, 0xbc, 0xa5, 0x5f, 0x1b, 0xd3, + 0x10, 0x63, 0x74, 0x9d, 0x01, 0x92, 0x88, 0xf0, + 0x27, 0x9c, 0x28, 0xd9, 0xfd, 0xe2, 0x4e, 0x01, + 0x8d, 0x61, 0x79, 0x60, 0x61, 0x5b, 0x76, 0xab, + 0x06, 0xd3, 0x44, 0x87, 0x43, 0x52, 0xcd, 0x06, + 0x68, 0x1e, 0x2d, 0xc5, 0xb0, 0x07, 0x25, 0xdf, + 0x0a, 0x50, 0xd7, 0xd9, 0x08, 0x53, 0x65, 0xf1, + 0x0c, 0x2c, 0xde, 0x3f, 0x9d, 0x03, 0x1f, 0xe1, + 0x49, 0x43, 0x3c, 0x83, 0x81, 0x37, 0xf8, 0xa2, + 0x0b, 0xf9, 0x61, 0x1c, 0xc1, 0xdb, 0x79, 0xbc, + 0x64, 0xce, 0x06, 0x4e, 0x87, 0x89, 0x62, 0x73, + 0x51, 0xbc, 0xa4, 0x32, 0xd4, 0x18, 0x62, 0xab, + 0x65, 0x7e, 0xad, 0x1e, 0x91, 0xa3, 0xfa, 0x2d, + 0x58, 0x9e, 0x2a, 0xe9, 0x74, 0x44, 0x64, 0x11, + 0xe6, 0xb6, 0xb3, 0x00, 0x7e, 0xa3, 0x16, 0xef, + 0x72 +}; +static const u8 output74[] __initconst = { + 0xf5, 0xca, 0x45, 0x65, 0x50, 0x35, 0x47, 0x67, + 0x6f, 0x4f, 0x67, 0xff, 0x34, 0xd9, 0xc3, 0x37, + 0x2a, 0x26, 0xb0, 0x4f, 0x08, 0x1e, 0x45, 0x13, + 0xc7, 0x2c, 0x14, 0x75, 0x33, 0xd8, 0x8e, 0x1e, + 0x1b, 0x11, 0x0d, 0x97, 0x04, 0x33, 0x8a, 0xe4, + 0xd8, 0x8d, 0x0e, 0x12, 0x8d, 0xdb, 0x6e, 0x02, + 0xfa, 0xe5, 0xbd, 0x3a, 0xb5, 0x28, 0x07, 0x7d, + 0x20, 0xf0, 0x12, 0x64, 0x83, 0x2f, 0x59, 0x79, + 0x17, 0x88, 0x3c, 0x2d, 0x08, 0x2f, 0x55, 0xda, + 0xcc, 0x02, 0x3a, 0x82, 0xcd, 0x03, 0x94, 0xdf, + 0xdf, 0xab, 0x8a, 0x13, 0xf5, 0xe6, 0x74, 0xdf, + 0x7b, 0xe2, 0xab, 0x34, 0xbc, 0x00, 0x85, 0xbf, + 0x5a, 0x48, 0xc8, 0xff, 0x8d, 0x6c, 0x27, 0x48, + 0x19, 0x2d, 0x08, 0xfa, 0x82, 0x62, 0x39, 0x55, + 0x32, 0x11, 0xa8, 0xd7, 0xb9, 0x08, 0x2c, 0xd6, + 0x7a, 0xd9, 0x83, 0x9f, 0x9b, 0xfb, 0xec, 0x3a, + 0xd1, 0x08, 0xc7, 0xad, 0xdc, 0x98, 0x4c, 0xbc, + 0x98, 0xeb, 0x36, 0xb0, 0x39, 0xf4, 0x3a, 0xd6, + 0x53, 0x02, 0xa0, 0xa9, 0x73, 0xa1, 0xca, 0xef, + 0xd8, 0xd2, 0xec, 0x0e, 0xf8, 0xf5, 0xac, 0x8d, + 0x34, 0x41, 0x06, 0xa8, 0xc6, 0xc3, 0x31, 0xbc, + 0xe5, 0xcc, 0x7e, 0x72, 0x63, 0x59, 0x3e, 0x63, + 0xc2, 0x8d, 0x2b, 0xd5, 0xb9, 0xfd, 0x1e, 0x31, + 0x69, 0x32, 0x05, 0xd6, 0xde, 0xc9, 0xe6, 0x4c, + 0xac, 0x68, 0xf7, 0x1f, 0x9d, 0xcd, 0x0e, 0xa2, + 0x15, 0x3d, 0xd6, 0x47, 0x99, 0xab, 0x08, 0x5f, + 0x28, 0xc3, 0x4c, 0xc2, 0xd5, 0xdd, 0x10, 0xb7, + 0xbd, 0xdb, 0x9b, 0xcf, 0x85, 0x27, 0x29, 0x76, + 0x98, 0xeb, 0xad, 0x31, 0x64, 0xe7, 0xfb, 0x61, + 0xe0, 0xd8, 0x1a, 0xa6, 0xe2, 0xe7, 0x43, 0x42, + 0x77, 0xc9, 0x82, 0x00, 0xac, 0x85, 0xe0, 0xa2, + 0xd4, 0x62, 0xe3, 0xb7, 0x17, 0x6e, 0xb2, 0x9e, + 0x21, 0x58, 0x73, 0xa9, 0x53, 0x2d, 0x3c, 0xe1, + 0xdd, 0xd6, 0x6e, 0x92, 0xf2, 0x1d, 0xc2, 0x22, + 0x5f, 0x9a, 0x7e, 0xd0, 0x52, 0xbf, 0x54, 0x19, + 0xd7, 0x80, 0x63, 0x3e, 0xd0, 0x08, 0x2d, 0x37, + 0x0c, 0x15, 0xf7, 0xde, 0xab, 0x2b, 0xe3, 0x16, + 0x21, 0x3a, 0xee, 0xa5, 0xdc, 0xdf, 0xde, 0xa3, + 0x69, 0xcb, 0xfd, 0x92, 0x89, 0x75, 0xcf, 0xc9, + 0x8a, 0xa4, 0xc8, 0xdd, 0xcc, 0x21, 0xe6, 0xfe, + 0x9e, 0x43, 0x76, 0xb2, 0x45, 0x22, 0xb9, 0xb5, + 0xac, 0x7e, 0x3d, 0x26, 0xb0, 0x53, 0xc8, 0xab, + 0xfd, 0xea, 0x2c, 0xd1, 0x44, 0xc5, 0x60, 0x1b, + 0x8a, 0x99, 0x0d, 0xa5, 0x0e, 0x67, 0x6e, 0x3a, + 0x96, 0x55, 0xec, 0xe8, 0xcc, 0xbe, 0x49, 0xd9, + 0xf2, 0x72, 0x9f, 0x30, 0x21, 0x97, 0x57, 0x19, + 0xbe, 0x5e, 0x33, 0x0c, 0xee, 0xc0, 0x72, 0x0d, + 0x2e, 0xd1, 0xe1, 0x52, 0xc2, 0xea, 0x41, 0xbb, + 0xe1, 0x6d, 0xd4, 0x17, 0xa9, 0x8d, 0x89, 0xa9, + 0xd6, 0x4b, 0xc6, 0x4c, 0xf2, 0x88, 0x97, 0x54, + 0x3f, 0x4f, 0x57, 0xb7, 0x37, 0xf0, 0x2c, 0x11, + 0x15, 0x56, 0xdb, 0x28, 0xb5, 0x16, 0x84, 0x66, + 0xce, 0x45, 0x3f, 0x61, 0x75, 0xb6, 0xbe, 0x00, + 0xd1, 0xe4, 0xf5, 0x27, 0x54, 0x7f, 0xc2, 0xf1, + 0xb3, 0x32, 0x9a, 0xe8, 0x07, 0x02, 0xf3, 0xdb, + 0xa9, 0xd1, 0xc2, 0xdf, 0xee, 0xad, 0xe5, 0x8a, + 0x3c, 0xfa, 0x67, 0xec, 0x6b, 0xa4, 0x08, 0xfe, + 0xba, 0x5a, 0x58, 0x0b, 0x78, 0x11, 0x91, 0x76, + 0xe3, 0x1a, 0x28, 0x54, 0x5e, 0xbd, 0x71, 0x1b, + 0x8b, 0xdc, 0x6c, 0xf4, 0x6f, 0xd7, 0xf4, 0xf3, + 0xe1, 0x03, 0xa4, 0x3c, 0x8d, 0x91, 0x2e, 0xba, + 0x5f, 0x7f, 0x8c, 0xaf, 0x69, 0x89, 0x29, 0x0a, + 0x5b, 0x25, 0x13, 0xc4, 0x2e, 0x16, 0xc2, 0x15, + 0x07, 0x5d, 0x58, 0x33, 0x7c, 0xe0, 0xf0, 0x55, + 0x5f, 0xbf, 0x5e, 0xf0, 0x71, 0x48, 0x8f, 0xf7, + 0x48, 0xb3, 0xf7, 0x0d, 0xa1, 0xd0, 0x63, 0xb1, + 0xad, 0xae, 0xb5, 0xb0, 0x5f, 0x71, 0xaf, 0x24, + 0x8b, 0xb9, 0x1c, 0x44, 0xd2, 0x1a, 0x53, 0xd1, + 0xd5, 0xb4, 0xa9, 0xff, 0x88, 0x73, 0xb5, 0xaa, + 0x15, 0x32, 0x5f, 0x59, 0x9d, 0x2e, 0xb5, 0xcb, + 0xde, 0x21, 0x2e, 0xe9, 0x35, 0xed, 0xfd, 0x0f, + 0xb6, 0xbb, 0xe6, 0x4b, 0x16, 0xf1, 0x45, 0x1e, + 0xb4, 0x84, 0xe9, 0x58, 0x1c, 0x0c, 0x95, 0xc0, + 0xcf, 0x49, 0x8b, 0x59, 0xa1, 0x78, 0xe6, 0x80, + 0x12, 0x49, 0x7a, 0xd4, 0x66, 0x62, 0xdf, 0x9c, + 0x18, 0xc8, 0x8c, 0xda, 0xc1, 0xa6, 0xbc, 0x65, + 0x28, 0xd2, 0xa4, 0xe8, 0xf1, 0x35, 0xdb, 0x5a, + 0x75, 0x1f, 0x73, 0x60, 0xec, 0xa8, 0xda, 0x5a, + 0x43, 0x15, 0x83, 0x9b, 0xe7, 0xb1, 0xa6, 0x81, + 0xbb, 0xef, 0xf3, 0x8f, 0x0f, 0xd3, 0x79, 0xa2, + 0xe5, 0xaa, 0x42, 0xef, 0xa0, 0x13, 0x4e, 0x91, + 0x2d, 0xcb, 0x61, 0x7a, 0x9a, 0x33, 0x14, 0x50, + 0x77, 0x4a, 0xd0, 0x91, 0x48, 0xe0, 0x0c, 0xe0, + 0x11, 0xcb, 0xdf, 0xb0, 0xce, 0x06, 0xd2, 0x79, + 0x4d, 0x69, 0xb9, 0xc9, 0x36, 0x74, 0x8f, 0x81, + 0x72, 0x73, 0xf3, 0x17, 0xb7, 0x13, 0xcb, 0x5b, + 0xd2, 0x5c, 0x33, 0x61, 0xb7, 0x61, 0x79, 0xb0, + 0xc0, 0x4d, 0xa1, 0xc7, 0x5d, 0x98, 0xc9, 0xe1, + 0x98, 0xbd, 0x78, 0x5a, 0x2c, 0x64, 0x53, 0xaf, + 0xaf, 0x66, 0x51, 0x47, 0xe4, 0x48, 0x66, 0x8b, + 0x07, 0x52, 0xa3, 0x03, 0x93, 0x28, 0xad, 0xcc, + 0xa3, 0x86, 0xad, 0x63, 0x04, 0x35, 0x6c, 0x49, + 0xd5, 0x28, 0x0e, 0x00, 0x47, 0xf4, 0xd4, 0x32, + 0x27, 0x19, 0xb3, 0x29, 0xe7, 0xbc, 0xbb, 0xce, + 0x3e, 0x3e, 0xd5, 0x67, 0x20, 0xe4, 0x0b, 0x75, + 0x95, 0x24, 0xe0, 0x6c, 0xb6, 0x29, 0x0c, 0x14, + 0xfd +}; +static const u8 key74[] __initconst = { + 0xf0, 0x41, 0x5b, 0x00, 0x56, 0xc4, 0xac, 0xf6, + 0xa2, 0x4c, 0x33, 0x41, 0x16, 0x09, 0x1b, 0x8e, + 0x4d, 0xe8, 0x8c, 0xd9, 0x48, 0xab, 0x3e, 0x60, + 0xcb, 0x49, 0x3e, 0xaf, 0x2b, 0x8b, 0xc8, 0xf0 +}; +enum { nonce74 = 0xcbdb0ffd0e923384ULL }; + +static const struct chacha20_testvec chacha20_testvecs[] __initconst = { + { input01, output01, key01, nonce01, sizeof(input01) }, + { input02, output02, key02, nonce02, sizeof(input02) }, + { input03, output03, key03, nonce03, sizeof(input03) }, + { input04, output04, key04, nonce04, sizeof(input04) }, + { input05, output05, key05, nonce05, sizeof(input05) }, + { input06, output06, key06, nonce06, sizeof(input06) }, + { input07, output07, key07, nonce07, sizeof(input07) }, + { input08, output08, key08, nonce08, sizeof(input08) }, + { input09, output09, key09, nonce09, sizeof(input09) }, + { input10, output10, key10, nonce10, sizeof(input10) }, + { input11, output11, key11, nonce11, sizeof(input11) }, + { input12, output12, key12, nonce12, sizeof(input12) }, + { input13, output13, key13, nonce13, sizeof(input13) }, + { input14, output14, key14, nonce14, sizeof(input14) }, + { input15, output15, key15, nonce15, sizeof(input15) }, + { input16, output16, key16, nonce16, sizeof(input16) }, + { input17, output17, key17, nonce17, sizeof(input17) }, + { input18, output18, key18, nonce18, sizeof(input18) }, + { input19, output19, key19, nonce19, sizeof(input19) }, + { input20, output20, key20, nonce20, sizeof(input20) }, + { input21, output21, key21, nonce21, sizeof(input21) }, + { input22, output22, key22, nonce22, sizeof(input22) }, + { input23, output23, key23, nonce23, sizeof(input23) }, + { input24, output24, key24, nonce24, sizeof(input24) }, + { input25, output25, key25, nonce25, sizeof(input25) }, + { input26, output26, key26, nonce26, sizeof(input26) }, + { input27, output27, key27, nonce27, sizeof(input27) }, + { input28, output28, key28, nonce28, sizeof(input28) }, + { input29, output29, key29, nonce29, sizeof(input29) }, + { input30, output30, key30, nonce30, sizeof(input30) }, + { input31, output31, key31, nonce31, sizeof(input31) }, + { input32, output32, key32, nonce32, sizeof(input32) }, + { input33, output33, key33, nonce33, sizeof(input33) }, + { input34, output34, key34, nonce34, sizeof(input34) }, + { input35, output35, key35, nonce35, sizeof(input35) }, + { input36, output36, key36, nonce36, sizeof(input36) }, + { input37, output37, key37, nonce37, sizeof(input37) }, + { input38, output38, key38, nonce38, sizeof(input38) }, + { input39, output39, key39, nonce39, sizeof(input39) }, + { input40, output40, key40, nonce40, sizeof(input40) }, + { input41, output41, key41, nonce41, sizeof(input41) }, + { input42, output42, key42, nonce42, sizeof(input42) }, + { input43, output43, key43, nonce43, sizeof(input43) }, + { input44, output44, key44, nonce44, sizeof(input44) }, + { input45, output45, key45, nonce45, sizeof(input45) }, + { input46, output46, key46, nonce46, sizeof(input46) }, + { input47, output47, key47, nonce47, sizeof(input47) }, + { input48, output48, key48, nonce48, sizeof(input48) }, + { input49, output49, key49, nonce49, sizeof(input49) }, + { input50, output50, key50, nonce50, sizeof(input50) }, + { input51, output51, key51, nonce51, sizeof(input51) }, + { input52, output52, key52, nonce52, sizeof(input52) }, + { input53, output53, key53, nonce53, sizeof(input53) }, + { input54, output54, key54, nonce54, sizeof(input54) }, + { input55, output55, key55, nonce55, sizeof(input55) }, + { input56, output56, key56, nonce56, sizeof(input56) }, + { input57, output57, key57, nonce57, sizeof(input57) }, + { input58, output58, key58, nonce58, sizeof(input58) }, + { input59, output59, key59, nonce59, sizeof(input59) }, + { input60, output60, key60, nonce60, sizeof(input60) }, + { input61, output61, key61, nonce61, sizeof(input61) }, + { input62, output62, key62, nonce62, sizeof(input62) }, + { input63, output63, key63, nonce63, sizeof(input63) }, + { input64, output64, key64, nonce64, sizeof(input64) }, + { input65, output65, key65, nonce65, sizeof(input65) }, + { input66, output66, key66, nonce66, sizeof(input66) }, + { input67, output67, key67, nonce67, sizeof(input67) }, + { input68, output68, key68, nonce68, sizeof(input68) }, + { input69, output69, key69, nonce69, sizeof(input69) }, + { input70, output70, key70, nonce70, sizeof(input70) }, + { input71, output71, key71, nonce71, sizeof(input71) }, + { input72, output72, key72, nonce72, sizeof(input72) }, + { input73, output73, key73, nonce73, sizeof(input73) }, + { input74, output74, key74, nonce74, sizeof(input74) } +}; + +static const struct hchacha20_testvec hchacha20_testvecs[] __initconst = {{ + .key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .nonce = { 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x4a, + 0x00, 0x00, 0x00, 0x00, 0x31, 0x41, 0x59, 0x27 }, + .output = { 0x82, 0x41, 0x3b, 0x42, 0x27, 0xb2, 0x7b, 0xfe, + 0xd3, 0x0e, 0x42, 0x50, 0x8a, 0x87, 0x7d, 0x73, + 0xa0, 0xf9, 0xe4, 0xd5, 0x8a, 0x74, 0xa8, 0x53, + 0xc1, 0x2e, 0xc4, 0x13, 0x26, 0xd3, 0xec, 0xdc } +}}; + +static bool __init chacha20_selftest(void) +{ + enum { + MAXIMUM_TEST_BUFFER_LEN = 1UL << 10, + OUTRAGEOUSLY_HUGE_BUFFER_LEN = PAGE_SIZE * 35 + 17 /* 143k */ + }; + size_t i, j, k; + u32 derived_key[CHACHA20_KEY_WORDS]; + u8 *offset_input = NULL, *computed_output = NULL, *massive_input = NULL; + u8 offset_key[CHACHA20_KEY_SIZE + 1] + __aligned(__alignof__(unsigned long)); + struct chacha20_ctx state; + bool success = true; + simd_context_t simd_context; + + offset_input = kmalloc(MAXIMUM_TEST_BUFFER_LEN + 1, GFP_KERNEL); + computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN + 1, GFP_KERNEL); + massive_input = vzalloc(OUTRAGEOUSLY_HUGE_BUFFER_LEN); + if (!computed_output || !offset_input || !massive_input) { + pr_err("chacha20 self-test malloc: FAIL\n"); + success = false; + goto out; + } + + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20_testvecs); ++i) { + /* Boring case */ + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + chacha20_init(&state, chacha20_testvecs[i].key, + chacha20_testvecs[i].nonce); + chacha20(&state, computed_output, chacha20_testvecs[i].input, + chacha20_testvecs[i].ilen, &simd_context); + if (memcmp(computed_output, chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu: FAIL\n", i + 1); + success = false; + } + for (k = chacha20_testvecs[i].ilen; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (zero check): FAIL\n", + i + 1); + success = false; + break; + } + } + + /* Unaligned case */ + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + memcpy(offset_input + 1, chacha20_testvecs[i].input, + chacha20_testvecs[i].ilen); + memcpy(offset_key + 1, chacha20_testvecs[i].key, + CHACHA20_KEY_SIZE); + chacha20_init(&state, offset_key + 1, chacha20_testvecs[i].nonce); + chacha20(&state, computed_output + 1, offset_input + 1, + chacha20_testvecs[i].ilen, &simd_context); + if (memcmp(computed_output + 1, chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu (unaligned): FAIL\n", + i + 1); + success = false; + } + if (computed_output[0]) { + pr_err("chacha20 self-test %zu (unaligned, zero check): FAIL\n", + i + 1); + success = false; + } + for (k = chacha20_testvecs[i].ilen + 1; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (unaligned, zero check): FAIL\n", + i + 1); + success = false; + break; + } + } + + /* Chunked case */ + if (chacha20_testvecs[i].ilen <= CHACHA20_BLOCK_SIZE) + goto next_test; + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + chacha20_init(&state, chacha20_testvecs[i].key, + chacha20_testvecs[i].nonce); + chacha20(&state, computed_output, chacha20_testvecs[i].input, + CHACHA20_BLOCK_SIZE, &simd_context); + chacha20(&state, computed_output + CHACHA20_BLOCK_SIZE, + chacha20_testvecs[i].input + CHACHA20_BLOCK_SIZE, + chacha20_testvecs[i].ilen - CHACHA20_BLOCK_SIZE, + &simd_context); + if (memcmp(computed_output, chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu (chunked): FAIL\n", + i + 1); + success = false; + } + for (k = chacha20_testvecs[i].ilen; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (chunked, zero check): FAIL\n", + i + 1); + success = false; + break; + } + } + +next_test: + /* Sliding unaligned case */ + if (chacha20_testvecs[i].ilen > CHACHA20_BLOCK_SIZE + 1 || + !chacha20_testvecs[i].ilen) + continue; + for (j = 1; j < CHACHA20_BLOCK_SIZE; ++j) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + memcpy(offset_input + j, chacha20_testvecs[i].input, + chacha20_testvecs[i].ilen); + chacha20_init(&state, chacha20_testvecs[i].key, + chacha20_testvecs[i].nonce); + chacha20(&state, computed_output + j, offset_input + j, + chacha20_testvecs[i].ilen, &simd_context); + if (memcmp(computed_output + j, + chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu (unaligned, slide %zu): FAIL\n", + i + 1, j); + success = false; + } + for (k = j; k < j; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (unaligned, slide %zu, zero check): FAIL\n", + i + 1, j); + success = false; + break; + } + } + for (k = chacha20_testvecs[i].ilen + j; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (unaligned, slide %zu, zero check): FAIL\n", + i + 1, j); + success = false; + break; + } + } + } + } + for (i = 0; i < ARRAY_SIZE(hchacha20_testvecs); ++i) { + memset(&derived_key, 0, sizeof(derived_key)); + hchacha20(derived_key, hchacha20_testvecs[i].nonce, + hchacha20_testvecs[i].key, &simd_context); + cpu_to_le32_array(derived_key, ARRAY_SIZE(derived_key)); + if (memcmp(derived_key, hchacha20_testvecs[i].output, + CHACHA20_KEY_SIZE)) { + pr_err("hchacha20 self-test %zu: FAIL\n", i + 1); + success = false; + } + } + memset(&state, 0, sizeof(state)); + chacha20_init(&state, chacha20_testvecs[0].key, + chacha20_testvecs[0].nonce); + chacha20(&state, massive_input, massive_input, + OUTRAGEOUSLY_HUGE_BUFFER_LEN, &simd_context); + chacha20_init(&state, chacha20_testvecs[0].key, + chacha20_testvecs[0].nonce); + chacha20(&state, massive_input, massive_input, + OUTRAGEOUSLY_HUGE_BUFFER_LEN, DONT_USE_SIMD); + for (k = 0; k < OUTRAGEOUSLY_HUGE_BUFFER_LEN; ++k) { + if (massive_input[k]) { + pr_err("chacha20 self-test massive: FAIL\n"); + success = false; + break; + } + } + + simd_put(&simd_context); + +out: + kfree(offset_input); + kfree(computed_output); + vfree(massive_input); + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/chacha20poly1305.c b/net/wireguard/crypto/zinc/selftest/chacha20poly1305.c new file mode 100644 index 000000000000..c58ac6e69d54 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/chacha20poly1305.c @@ -0,0 +1,9076 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +struct chacha20poly1305_testvec { + const u8 *input, *output, *assoc, *nonce, *key; + size_t ilen, alen, nlen; + bool failure; +}; + +/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539 + * 2.8.2. After they are generated by reference implementations. And the final + * marked ones are taken from wycheproof, but we only do these for the encrypt + * side, because mostly we're stressing the primitives rather than the actual + * chapoly construction. This also requires adding a 96-bit nonce construction, + * just for the purpose of the tests. + */ + +static const u8 enc_input001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 enc_output001[] __initconst = { + 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4, + 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd, + 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89, + 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2, + 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee, + 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0, + 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00, + 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf, + 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce, + 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81, + 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd, + 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55, + 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61, + 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38, + 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0, + 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4, + 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46, + 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9, + 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e, + 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e, + 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15, + 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a, + 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea, + 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a, + 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99, + 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e, + 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10, + 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10, + 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94, + 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30, + 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf, + 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29, + 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70, + 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, + 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, + 0x38 +}; +static const u8 enc_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 enc_nonce001[] __initconst = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 +}; +static const u8 enc_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const u8 enc_input002[] __initconst = { }; +static const u8 enc_output002[] __initconst = { + 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1, + 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92 +}; +static const u8 enc_assoc002[] __initconst = { }; +static const u8 enc_nonce002[] __initconst = { + 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e +}; +static const u8 enc_key002[] __initconst = { + 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f, + 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86, + 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef, + 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68 +}; + +static const u8 enc_input003[] __initconst = { }; +static const u8 enc_output003[] __initconst = { + 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6, + 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77 +}; +static const u8 enc_assoc003[] __initconst = { + 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b +}; +static const u8 enc_nonce003[] __initconst = { + 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d +}; +static const u8 enc_key003[] __initconst = { + 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88, + 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a, + 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08, + 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d +}; + +static const u8 enc_input004[] __initconst = { + 0xa4 +}; +static const u8 enc_output004[] __initconst = { + 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2, + 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac, + 0x89 +}; +static const u8 enc_assoc004[] __initconst = { + 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40 +}; +static const u8 enc_nonce004[] __initconst = { + 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4 +}; +static const u8 enc_key004[] __initconst = { + 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8, + 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1, + 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d, + 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e +}; + +static const u8 enc_input005[] __initconst = { + 0x2d +}; +static const u8 enc_output005[] __initconst = { + 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e, + 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c, + 0xac +}; +static const u8 enc_assoc005[] __initconst = { }; +static const u8 enc_nonce005[] __initconst = { + 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30 +}; +static const u8 enc_key005[] __initconst = { + 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31, + 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87, + 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01, + 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87 +}; + +static const u8 enc_input006[] __initconst = { + 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a, + 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92, + 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37, + 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50, + 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec, + 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb, + 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66, + 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb, + 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b, + 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e, + 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3, + 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0, + 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb, + 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41, + 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc, + 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde, + 0x8f +}; +static const u8 enc_output006[] __initconst = { + 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1, + 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15, + 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c, + 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda, + 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11, + 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8, + 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc, + 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3, + 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5, + 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02, + 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93, + 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78, + 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1, + 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66, + 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc, + 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0, + 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d, + 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a, + 0xeb +}; +static const u8 enc_assoc006[] __initconst = { + 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b +}; +static const u8 enc_nonce006[] __initconst = { + 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c +}; +static const u8 enc_key006[] __initconst = { + 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae, + 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78, + 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9, + 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01 +}; + +static const u8 enc_input007[] __initconst = { + 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5, + 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a, + 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1, + 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17, + 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c, + 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1, + 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51, + 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1, + 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86, + 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a, + 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a, + 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98, + 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36, + 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34, + 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57, + 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84, + 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4, + 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80, + 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82, + 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5, + 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d, + 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c, + 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf, + 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc, + 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3, + 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14, + 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81, + 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77, + 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3, + 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2, + 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b, + 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3 +}; +static const u8 enc_output007[] __initconst = { + 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c, + 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8, + 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c, + 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb, + 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0, + 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21, + 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70, + 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac, + 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99, + 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9, + 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f, + 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7, + 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53, + 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12, + 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6, + 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0, + 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54, + 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6, + 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e, + 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb, + 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30, + 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f, + 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2, + 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e, + 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34, + 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39, + 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7, + 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9, + 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82, + 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04, + 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34, + 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef, + 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42, + 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53 +}; +static const u8 enc_assoc007[] __initconst = { }; +static const u8 enc_nonce007[] __initconst = { + 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0 +}; +static const u8 enc_key007[] __initconst = { + 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd, + 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c, + 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80, + 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01 +}; + +static const u8 enc_input008[] __initconst = { + 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10, + 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2, + 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c, + 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb, + 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12, + 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa, + 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6, + 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4, + 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91, + 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb, + 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47, + 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15, + 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f, + 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a, + 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3, + 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97, + 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80, + 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e, + 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f, + 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10, + 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a, + 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0, + 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35, + 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d, + 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d, + 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57, + 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4, + 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f, + 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39, + 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda, + 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17, + 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43, + 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19, + 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09, + 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21, + 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07, + 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f, + 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b, + 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a, + 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed, + 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2, + 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca, + 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff, + 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b, + 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b, + 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b, + 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6, + 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04, + 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48, + 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b, + 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13, + 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8, + 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f, + 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0, + 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92, + 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a, + 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41, + 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17, + 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30, + 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20, + 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49, + 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a, + 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b, + 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3 +}; +static const u8 enc_output008[] __initconst = { + 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd, + 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1, + 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93, + 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d, + 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c, + 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6, + 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4, + 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5, + 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84, + 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd, + 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed, + 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab, + 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13, + 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49, + 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6, + 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8, + 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2, + 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94, + 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18, + 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60, + 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8, + 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b, + 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f, + 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c, + 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20, + 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff, + 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9, + 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c, + 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9, + 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6, + 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea, + 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e, + 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82, + 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1, + 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70, + 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1, + 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c, + 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7, + 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc, + 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc, + 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3, + 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb, + 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97, + 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f, + 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39, + 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f, + 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d, + 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2, + 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d, + 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96, + 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b, + 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20, + 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95, + 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb, + 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35, + 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62, + 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9, + 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6, + 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8, + 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a, + 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93, + 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14, + 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99, + 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86, + 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f, + 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54 +}; +static const u8 enc_assoc008[] __initconst = { }; +static const u8 enc_nonce008[] __initconst = { + 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02 +}; +static const u8 enc_key008[] __initconst = { + 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53, + 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0, + 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86, + 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba +}; + +static const u8 enc_input009[] __initconst = { + 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b, + 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8, + 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca, + 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09, + 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5, + 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85, + 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44, + 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97, + 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77, + 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41, + 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c, + 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00, + 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82, + 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f, + 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e, + 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55, + 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab, + 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17, + 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e, + 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f, + 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82, + 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3, + 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f, + 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0, + 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08, + 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b, + 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85, + 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28, + 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c, + 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62, + 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2, + 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3, + 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62, + 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40, + 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f, + 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b, + 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91, + 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5, + 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c, + 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4, + 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49, + 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04, + 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03, + 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa, + 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec, + 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6, + 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69, + 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36, + 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8, + 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf, + 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe, + 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82, + 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab, + 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d, + 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3, + 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5, + 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34, + 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49, + 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f, + 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d, + 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42, + 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef, + 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27, + 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52, + 0x65 +}; +static const u8 enc_output009[] __initconst = { + 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf, + 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66, + 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72, + 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd, + 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28, + 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe, + 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06, + 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5, + 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7, + 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09, + 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a, + 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00, + 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62, + 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb, + 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2, + 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28, + 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e, + 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a, + 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6, + 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83, + 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9, + 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a, + 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79, + 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a, + 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea, + 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b, + 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52, + 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb, + 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89, + 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad, + 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19, + 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71, + 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d, + 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54, + 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a, + 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d, + 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95, + 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42, + 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16, + 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6, + 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf, + 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d, + 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f, + 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b, + 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e, + 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4, + 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c, + 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4, + 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1, + 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb, + 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff, + 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2, + 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06, + 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66, + 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90, + 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55, + 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc, + 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8, + 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62, + 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba, + 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2, + 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89, + 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06, + 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90, + 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf, + 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8, + 0xae +}; +static const u8 enc_assoc009[] __initconst = { + 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e, + 0xef +}; +static const u8 enc_nonce009[] __initconst = { + 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78 +}; +static const u8 enc_key009[] __initconst = { + 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5, + 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86, + 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2, + 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b +}; + +static const u8 enc_input010[] __initconst = { + 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf, + 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c, + 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22, + 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc, + 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16, + 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7, + 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4, + 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d, + 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5, + 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46, + 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82, + 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b, + 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a, + 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf, + 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca, + 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95, + 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09, + 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3, + 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3, + 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f, + 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58, + 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad, + 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde, + 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44, + 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a, + 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9, + 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26, + 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc, + 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74, + 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b, + 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93, + 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37, + 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f, + 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d, + 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca, + 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73, + 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f, + 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1, + 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9, + 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76, + 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac, + 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7, + 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce, + 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30, + 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb, + 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa, + 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd, + 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f, + 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb, + 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34, + 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e, + 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f, + 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53, + 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41, + 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e, + 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d, + 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27, + 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e, + 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8, + 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a, + 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12, + 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3, + 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66, + 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0, + 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c, + 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4, + 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49, + 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90, + 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11, + 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c, + 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b, + 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74, + 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c, + 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27, + 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1, + 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27, + 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88, + 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27, + 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b, + 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39, + 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7, + 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc, + 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe, + 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5, + 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf, + 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05, + 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73, + 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda, + 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe, + 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71, + 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed, + 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d, + 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33, + 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f, + 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a, + 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa, + 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e, + 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e, + 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87, + 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5, + 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4, + 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38, + 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34, + 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f, + 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36, + 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69, + 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44, + 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5, + 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce, + 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd, + 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27, + 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f, + 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8, + 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a, + 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5, + 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca, + 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e, + 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92, + 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13, + 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf, + 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6, + 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3, + 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b, + 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d, + 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f, + 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40, + 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c, + 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f +}; +static const u8 enc_output010[] __initconst = { + 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b, + 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74, + 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1, + 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd, + 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6, + 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5, + 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96, + 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02, + 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30, + 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57, + 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53, + 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65, + 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71, + 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9, + 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18, + 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce, + 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a, + 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69, + 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2, + 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95, + 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49, + 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e, + 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a, + 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a, + 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e, + 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19, + 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b, + 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75, + 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d, + 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d, + 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f, + 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a, + 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d, + 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5, + 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c, + 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77, + 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46, + 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43, + 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe, + 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8, + 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76, + 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47, + 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8, + 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32, + 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59, + 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae, + 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a, + 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3, + 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74, + 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75, + 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2, + 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e, + 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2, + 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9, + 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1, + 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07, + 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79, + 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71, + 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad, + 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a, + 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c, + 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9, + 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79, + 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27, + 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90, + 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe, + 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99, + 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1, + 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9, + 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0, + 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28, + 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e, + 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20, + 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60, + 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47, + 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68, + 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe, + 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33, + 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8, + 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38, + 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7, + 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04, + 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c, + 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f, + 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c, + 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77, + 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54, + 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5, + 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4, + 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2, + 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e, + 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27, + 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f, + 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92, + 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55, + 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe, + 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04, + 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4, + 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56, + 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02, + 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2, + 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8, + 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27, + 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47, + 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10, + 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43, + 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0, + 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee, + 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47, + 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6, + 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d, + 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c, + 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3, + 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b, + 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09, + 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d, + 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1, + 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd, + 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4, + 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63, + 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87, + 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd, + 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e, + 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a, + 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c, + 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38, + 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a, + 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5, + 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9, + 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0 +}; +static const u8 enc_assoc010[] __initconst = { + 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27, + 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2 +}; +static const u8 enc_nonce010[] __initconst = { + 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30 +}; +static const u8 enc_key010[] __initconst = { + 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44, + 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf, + 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74, + 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7 +}; + +static const u8 enc_input011[] __initconst = { + 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b, + 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b, + 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d, + 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee, + 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30, + 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20, + 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f, + 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e, + 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66, + 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46, + 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35, + 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6, + 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0, + 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15, + 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13, + 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7, + 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3, + 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37, + 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc, + 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95, + 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8, + 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac, + 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45, + 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf, + 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d, + 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc, + 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45, + 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a, + 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec, + 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e, + 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10, + 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8, + 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66, + 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0, + 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62, + 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b, + 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4, + 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96, + 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7, + 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74, + 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8, + 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b, + 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70, + 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95, + 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3, + 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9, + 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d, + 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e, + 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32, + 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5, + 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80, + 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3, + 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad, + 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d, + 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20, + 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17, + 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6, + 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d, + 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82, + 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c, + 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9, + 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb, + 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96, + 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9, + 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f, + 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40, + 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc, + 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce, + 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71, + 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f, + 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35, + 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90, + 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8, + 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01, + 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1, + 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe, + 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4, + 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf, + 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9, + 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f, + 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04, + 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7, + 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15, + 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc, + 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0, + 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae, + 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb, + 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed, + 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51, + 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52, + 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84, + 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5, + 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4, + 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e, + 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74, + 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f, + 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13, + 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea, + 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b, + 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef, + 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09, + 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe, + 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1, + 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9, + 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15, + 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a, + 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab, + 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36, + 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd, + 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde, + 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd, + 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47, + 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5, + 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69, + 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21, + 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98, + 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07, + 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57, + 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd, + 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03, + 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11, + 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96, + 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91, + 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d, + 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0, + 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9, + 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42, + 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a, + 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18, + 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc, + 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce, + 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc, + 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0, + 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf, + 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7, + 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80, + 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c, + 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82, + 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9, + 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20, + 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58, + 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6, + 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc, + 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50, + 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86, + 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a, + 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80, + 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec, + 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08, + 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c, + 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde, + 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d, + 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17, + 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f, + 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26, + 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96, + 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97, + 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6, + 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55, + 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e, + 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88, + 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5, + 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b, + 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15, + 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1, + 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4, + 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3, + 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf, + 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e, + 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb, + 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76, + 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5, + 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c, + 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde, + 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f, + 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51, + 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9, + 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99, + 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6, + 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04, + 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31, + 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a, + 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56, + 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e, + 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78, + 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a, + 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7, + 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb, + 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6, + 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8, + 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc, + 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84, + 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86, + 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76, + 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a, + 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73, + 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8, + 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6, + 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2, + 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56, + 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb, + 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab, + 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76, + 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69, + 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d, + 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc, + 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22, + 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39, + 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6, + 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9, + 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f, + 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1, + 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83, + 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc, + 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4, + 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59, + 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68, + 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef, + 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1, + 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3, + 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44, + 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09, + 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8, + 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a, + 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d, + 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae, + 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2, + 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10, + 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a, + 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34, + 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f, + 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9, + 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b, + 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d, + 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57, + 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03, + 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87, + 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca, + 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53, + 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f, + 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61, + 0x10, 0x1e, 0xbf, 0xec, 0xa8 +}; +static const u8 enc_output011[] __initconst = { + 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8, + 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc, + 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74, + 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73, + 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e, + 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9, + 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e, + 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd, + 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57, + 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19, + 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f, + 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45, + 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e, + 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39, + 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03, + 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f, + 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0, + 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce, + 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb, + 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52, + 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21, + 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a, + 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35, + 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91, + 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b, + 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e, + 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19, + 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07, + 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18, + 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96, + 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68, + 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4, + 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57, + 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c, + 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23, + 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8, + 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6, + 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40, + 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab, + 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb, + 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea, + 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8, + 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31, + 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0, + 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc, + 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94, + 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1, + 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46, + 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6, + 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7, + 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71, + 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a, + 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33, + 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38, + 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23, + 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb, + 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65, + 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73, + 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8, + 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb, + 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a, + 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca, + 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5, + 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71, + 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8, + 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d, + 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6, + 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d, + 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7, + 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5, + 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8, + 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd, + 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29, + 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22, + 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5, + 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67, + 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11, + 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e, + 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09, + 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4, + 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f, + 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa, + 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec, + 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b, + 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d, + 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b, + 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48, + 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3, + 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63, + 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd, + 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78, + 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed, + 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82, + 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f, + 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3, + 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9, + 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72, + 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74, + 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40, + 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b, + 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a, + 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5, + 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98, + 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71, + 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e, + 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4, + 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46, + 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e, + 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f, + 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93, + 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0, + 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5, + 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61, + 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64, + 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85, + 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20, + 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6, + 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc, + 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8, + 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50, + 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4, + 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80, + 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0, + 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a, + 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35, + 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43, + 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12, + 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7, + 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34, + 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42, + 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0, + 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95, + 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74, + 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5, + 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12, + 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6, + 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86, + 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97, + 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45, + 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19, + 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86, + 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c, + 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba, + 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29, + 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6, + 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6, + 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09, + 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31, + 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99, + 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b, + 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca, + 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00, + 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93, + 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3, + 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07, + 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda, + 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90, + 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b, + 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a, + 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6, + 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c, + 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57, + 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15, + 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e, + 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51, + 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75, + 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19, + 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08, + 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14, + 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba, + 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff, + 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90, + 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e, + 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93, + 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad, + 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2, + 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac, + 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d, + 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06, + 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c, + 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91, + 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17, + 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20, + 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7, + 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf, + 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c, + 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2, + 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e, + 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a, + 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05, + 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58, + 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8, + 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d, + 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71, + 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3, + 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe, + 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62, + 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16, + 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66, + 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4, + 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2, + 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35, + 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3, + 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4, + 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f, + 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe, + 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56, + 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b, + 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37, + 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3, + 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f, + 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f, + 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0, + 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70, + 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd, + 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f, + 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e, + 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67, + 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51, + 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23, + 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3, + 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5, + 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09, + 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7, + 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed, + 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb, + 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6, + 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5, + 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96, + 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe, + 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44, + 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6, + 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e, + 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0, + 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79, + 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f, + 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d, + 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82, + 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47, + 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93, + 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6, + 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69, + 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e, + 0x2b, 0xdf, 0xcd, 0xf9, 0x3c +}; +static const u8 enc_assoc011[] __initconst = { + 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7 +}; +static const u8 enc_nonce011[] __initconst = { + 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa +}; +static const u8 enc_key011[] __initconst = { + 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85, + 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca, + 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52, + 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38 +}; + +static const u8 enc_input012[] __initconst = { + 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, + 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, + 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, + 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, + 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, + 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, + 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, + 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, + 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, + 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, + 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, + 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, + 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, + 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, + 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, + 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, + 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, + 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, + 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, + 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, + 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, + 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, + 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, + 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, + 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, + 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, + 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, + 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, + 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, + 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, + 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, + 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, + 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, + 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, + 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, + 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, + 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, + 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, + 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, + 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, + 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, + 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, + 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, + 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, + 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, + 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, + 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, + 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, + 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, + 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, + 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, + 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, + 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, + 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, + 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, + 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, + 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, + 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, + 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, + 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, + 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, + 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, + 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, + 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, + 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, + 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, + 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, + 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, + 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, + 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, + 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, + 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, + 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, + 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, + 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, + 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, + 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, + 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, + 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, + 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, + 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, + 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, + 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, + 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, + 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, + 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, + 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, + 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, + 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, + 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, + 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, + 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, + 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, + 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, + 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, + 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, + 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, + 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, + 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, + 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, + 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, + 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, + 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, + 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, + 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, + 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, + 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, + 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, + 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, + 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, + 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, + 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, + 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, + 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, + 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, + 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, + 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, + 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, + 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, + 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, + 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, + 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, + 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, + 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, + 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, + 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, + 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, + 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, + 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, + 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, + 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, + 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, + 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, + 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, + 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, + 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, + 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, + 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, + 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, + 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, + 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, + 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, + 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, + 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, + 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, + 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, + 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, + 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, + 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, + 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, + 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, + 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, + 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, + 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, + 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, + 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, + 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, + 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, + 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, + 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, + 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, + 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, + 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, + 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, + 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, + 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, + 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, + 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, + 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, + 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, + 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, + 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, + 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, + 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, + 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, + 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, + 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, + 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, + 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, + 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, + 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, + 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, + 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, + 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, + 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, + 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, + 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, + 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, + 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, + 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, + 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, + 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, + 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, + 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, + 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, + 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, + 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, + 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, + 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, + 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, + 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, + 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, + 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, + 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, + 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, + 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, + 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, + 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, + 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, + 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, + 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, + 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, + 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, + 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, + 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, + 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, + 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, + 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, + 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, + 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, + 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, + 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, + 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, + 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, + 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, + 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, + 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, + 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, + 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, + 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, + 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, + 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, + 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, + 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, + 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, + 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, + 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, + 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, + 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, + 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, + 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, + 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, + 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, + 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, + 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, + 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, + 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, + 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, + 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, + 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, + 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, + 0x78, 0xec, 0x00 +}; +static const u8 enc_output012[] __initconst = { + 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, + 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, + 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, + 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, + 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, + 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, + 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, + 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, + 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, + 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, + 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, + 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, + 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, + 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, + 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, + 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, + 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, + 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, + 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, + 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, + 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, + 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, + 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, + 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, + 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, + 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, + 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, + 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, + 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, + 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, + 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, + 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, + 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, + 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, + 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, + 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, + 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, + 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, + 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, + 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, + 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, + 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, + 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, + 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, + 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, + 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, + 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, + 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, + 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, + 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, + 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, + 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, + 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, + 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, + 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, + 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, + 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, + 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, + 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, + 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, + 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, + 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, + 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, + 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, + 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, + 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, + 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, + 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, + 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, + 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, + 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, + 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, + 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, + 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, + 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, + 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, + 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, + 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, + 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, + 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, + 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, + 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, + 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, + 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, + 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, + 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, + 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, + 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, + 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, + 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, + 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, + 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, + 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, + 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, + 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, + 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, + 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, + 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, + 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, + 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, + 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, + 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, + 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, + 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, + 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, + 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, + 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, + 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, + 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, + 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, + 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, + 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, + 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, + 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, + 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, + 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, + 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, + 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, + 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, + 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, + 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, + 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, + 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, + 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, + 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, + 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, + 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, + 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, + 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, + 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, + 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, + 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, + 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, + 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, + 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, + 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, + 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, + 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, + 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, + 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, + 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, + 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, + 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, + 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, + 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, + 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, + 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, + 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, + 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, + 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, + 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, + 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, + 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, + 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, + 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, + 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, + 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, + 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, + 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, + 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, + 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, + 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, + 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, + 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, + 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, + 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, + 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, + 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, + 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, + 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, + 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, + 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, + 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, + 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, + 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, + 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, + 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, + 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, + 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, + 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, + 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, + 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, + 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, + 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, + 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, + 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, + 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, + 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, + 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, + 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, + 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, + 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, + 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, + 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, + 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, + 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, + 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, + 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, + 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, + 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, + 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, + 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, + 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, + 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, + 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, + 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, + 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, + 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, + 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, + 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, + 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, + 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, + 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, + 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, + 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, + 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, + 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, + 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, + 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, + 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, + 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, + 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, + 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, + 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, + 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, + 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, + 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, + 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, + 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, + 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, + 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, + 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, + 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, + 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, + 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, + 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, + 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, + 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, + 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, + 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, + 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, + 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, + 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, + 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, + 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, + 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, + 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, + 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, + 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, + 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, + 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, + 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, + 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, + 0x70, 0xcf, 0xd6 +}; +static const u8 enc_assoc012[] __initconst = { + 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, + 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, + 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, + 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, + 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, + 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, + 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, + 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 +}; +static const u8 enc_nonce012[] __initconst = { + 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 +}; +static const u8 enc_key012[] __initconst = { + 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, + 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, + 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, + 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 +}; + +/* wycheproof - rfc7539 */ +static const u8 enc_input013[] __initconst = { + 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61, + 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39, + 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63, + 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66, + 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f, + 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20, + 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, + 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73, + 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f, + 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, + 0x74, 0x2e +}; +static const u8 enc_output013[] __initconst = { + 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb, + 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2, + 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe, + 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6, + 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12, + 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b, + 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29, + 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36, + 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c, + 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58, + 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94, + 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc, + 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d, + 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b, + 0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09, + 0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60, + 0x06, 0x91 +}; +static const u8 enc_assoc013[] __initconst = { + 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, + 0xc4, 0xc5, 0xc6, 0xc7 +}; +static const u8 enc_nonce013[] __initconst = { + 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47 +}; +static const u8 enc_key013[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input014[] __initconst = { }; +static const u8 enc_output014[] __initconst = { + 0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5, + 0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d +}; +static const u8 enc_assoc014[] __initconst = { }; +static const u8 enc_nonce014[] __initconst = { + 0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1, + 0xea, 0x12, 0x37, 0x9d +}; +static const u8 enc_key014[] __initconst = { + 0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96, + 0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0, + 0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08, + 0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0 +}; + +/* wycheproof - misc */ +static const u8 enc_input015[] __initconst = { }; +static const u8 enc_output015[] __initconst = { + 0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b, + 0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09 +}; +static const u8 enc_assoc015[] __initconst = { + 0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10 +}; +static const u8 enc_nonce015[] __initconst = { + 0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16, + 0xa3, 0xc6, 0xf6, 0x89 +}; +static const u8 enc_key015[] __initconst = { + 0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb, + 0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22, + 0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63, + 0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42 +}; + +/* wycheproof - misc */ +static const u8 enc_input016[] __initconst = { + 0x2a +}; +static const u8 enc_output016[] __initconst = { + 0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80, + 0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75, + 0x22 +}; +static const u8 enc_assoc016[] __initconst = { }; +static const u8 enc_nonce016[] __initconst = { + 0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd, + 0xee, 0xab, 0x60, 0xf1 +}; +static const u8 enc_key016[] __initconst = { + 0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50, + 0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa, + 0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a, + 0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73 +}; + +/* wycheproof - misc */ +static const u8 enc_input017[] __initconst = { + 0x51 +}; +static const u8 enc_output017[] __initconst = { + 0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7, + 0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72, + 0xb9 +}; +static const u8 enc_assoc017[] __initconst = { + 0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53 +}; +static const u8 enc_nonce017[] __initconst = { + 0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2, + 0x78, 0x2f, 0x44, 0x03 +}; +static const u8 enc_key017[] __initconst = { + 0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5, + 0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f, + 0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5, + 0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee +}; + +/* wycheproof - misc */ +static const u8 enc_input018[] __initconst = { + 0x5c, 0x60 +}; +static const u8 enc_output018[] __initconst = { + 0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39, + 0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22, + 0xe5, 0xe2 +}; +static const u8 enc_assoc018[] __initconst = { }; +static const u8 enc_nonce018[] __initconst = { + 0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34, + 0x7e, 0x03, 0xf2, 0xdb +}; +static const u8 enc_key018[] __initconst = { + 0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89, + 0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e, + 0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f, + 0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00 +}; + +/* wycheproof - misc */ +static const u8 enc_input019[] __initconst = { + 0xdd, 0xf2 +}; +static const u8 enc_output019[] __initconst = { + 0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec, + 0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24, + 0x37, 0xa2 +}; +static const u8 enc_assoc019[] __initconst = { + 0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf +}; +static const u8 enc_nonce019[] __initconst = { + 0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90, + 0xb6, 0x04, 0x0a, 0xc6 +}; +static const u8 enc_key019[] __initconst = { + 0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48, + 0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff, + 0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44, + 0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58 +}; + +/* wycheproof - misc */ +static const u8 enc_input020[] __initconst = { + 0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31 +}; +static const u8 enc_output020[] __initconst = { + 0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed, + 0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28, + 0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42 +}; +static const u8 enc_assoc020[] __initconst = { }; +static const u8 enc_nonce020[] __initconst = { + 0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59, + 0x6d, 0xc5, 0x5b, 0xb7 +}; +static const u8 enc_key020[] __initconst = { + 0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f, + 0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f, + 0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3, + 0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7 +}; + +/* wycheproof - misc */ +static const u8 enc_input021[] __initconst = { + 0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90 +}; +static const u8 enc_output021[] __initconst = { + 0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18, + 0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5, + 0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba +}; +static const u8 enc_assoc021[] __initconst = { + 0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53 +}; +static const u8 enc_nonce021[] __initconst = { + 0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98, + 0xde, 0x94, 0x83, 0x96 +}; +static const u8 enc_key021[] __initconst = { + 0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5, + 0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4, + 0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda, + 0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f +}; + +/* wycheproof - misc */ +static const u8 enc_input022[] __initconst = { + 0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed +}; +static const u8 enc_output022[] __initconst = { + 0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17, + 0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93, + 0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21 +}; +static const u8 enc_assoc022[] __initconst = { }; +static const u8 enc_nonce022[] __initconst = { + 0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2, + 0x36, 0x18, 0x23, 0xd3 +}; +static const u8 enc_key022[] __initconst = { + 0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf, + 0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d, + 0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7, + 0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52 +}; + +/* wycheproof - misc */ +static const u8 enc_input023[] __initconst = { + 0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf +}; +static const u8 enc_output023[] __initconst = { + 0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0, + 0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0, + 0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18 +}; +static const u8 enc_assoc023[] __initconst = { + 0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d +}; +static const u8 enc_nonce023[] __initconst = { + 0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33, + 0x66, 0x48, 0x4a, 0x78 +}; +static const u8 enc_key023[] __initconst = { + 0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54, + 0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a, + 0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe, + 0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd +}; + +/* wycheproof - misc */ +static const u8 enc_input024[] __initconst = { + 0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c, + 0x36, 0x8d, 0x14, 0xe0 +}; +static const u8 enc_output024[] __initconst = { + 0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a, + 0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27, + 0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10, + 0x6d, 0xcb, 0x29, 0xb4 +}; +static const u8 enc_assoc024[] __initconst = { }; +static const u8 enc_nonce024[] __initconst = { + 0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e, + 0x07, 0x53, 0x86, 0x56 +}; +static const u8 enc_key024[] __initconst = { + 0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0, + 0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39, + 0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5, + 0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe +}; + +/* wycheproof - misc */ +static const u8 enc_input025[] __initconst = { + 0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7, + 0x59, 0xb1, 0xa0, 0xda +}; +static const u8 enc_output025[] __initconst = { + 0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38, + 0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c, + 0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7, + 0xcf, 0x05, 0x07, 0x2f +}; +static const u8 enc_assoc025[] __initconst = { + 0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9 +}; +static const u8 enc_nonce025[] __initconst = { + 0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d, + 0xd9, 0x06, 0xe9, 0xce +}; +static const u8 enc_key025[] __initconst = { + 0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40, + 0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70, + 0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e, + 0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57 +}; + +/* wycheproof - misc */ +static const u8 enc_input026[] __initconst = { + 0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac, + 0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07 +}; +static const u8 enc_output026[] __initconst = { + 0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf, + 0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a, + 0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79, + 0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2 +}; +static const u8 enc_assoc026[] __initconst = { }; +static const u8 enc_nonce026[] __initconst = { + 0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda, + 0xd4, 0x61, 0xd2, 0x3c +}; +static const u8 enc_key026[] __initconst = { + 0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda, + 0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77, + 0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0, + 0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb +}; + +/* wycheproof - misc */ +static const u8 enc_input027[] __initconst = { + 0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f, + 0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73 +}; +static const u8 enc_output027[] __initconst = { + 0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81, + 0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe, + 0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9, + 0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17 +}; +static const u8 enc_assoc027[] __initconst = { + 0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12 +}; +static const u8 enc_nonce027[] __initconst = { + 0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14, + 0xc5, 0x03, 0x5b, 0x6a +}; +static const u8 enc_key027[] __initconst = { + 0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f, + 0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38, + 0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b, + 0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d +}; + +/* wycheproof - misc */ +static const u8 enc_input028[] __initconst = { + 0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0, + 0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88 +}; +static const u8 enc_output028[] __initconst = { + 0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4, + 0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13, + 0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a, + 0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c +}; +static const u8 enc_assoc028[] __initconst = { }; +static const u8 enc_nonce028[] __initconst = { + 0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8, + 0x47, 0x40, 0xad, 0x9b +}; +static const u8 enc_key028[] __initconst = { + 0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7, + 0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7, + 0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63, + 0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a +}; + +/* wycheproof - misc */ +static const u8 enc_input029[] __initconst = { + 0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09, + 0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6 +}; +static const u8 enc_output029[] __initconst = { + 0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3, + 0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c, + 0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc, + 0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d +}; +static const u8 enc_assoc029[] __initconst = { + 0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff +}; +static const u8 enc_nonce029[] __initconst = { + 0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80, + 0x07, 0x1f, 0x52, 0x66 +}; +static const u8 enc_key029[] __initconst = { + 0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8, + 0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14, + 0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d, + 0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e +}; + +/* wycheproof - misc */ +static const u8 enc_input030[] __initconst = { + 0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15, + 0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e, + 0x1f +}; +static const u8 enc_output030[] __initconst = { + 0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd, + 0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74, + 0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80, + 0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08, + 0x13 +}; +static const u8 enc_assoc030[] __initconst = { }; +static const u8 enc_nonce030[] __initconst = { + 0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42, + 0xdc, 0x03, 0x44, 0x5d +}; +static const u8 enc_key030[] __initconst = { + 0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21, + 0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e, + 0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b, + 0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11 +}; + +/* wycheproof - misc */ +static const u8 enc_input031[] __initconst = { + 0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88, + 0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c, + 0x9c +}; +static const u8 enc_output031[] __initconst = { + 0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b, + 0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e, + 0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38, + 0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c, + 0xeb +}; +static const u8 enc_assoc031[] __initconst = { + 0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79 +}; +static const u8 enc_nonce031[] __initconst = { + 0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10, + 0x63, 0x3d, 0x99, 0x3d +}; +static const u8 enc_key031[] __initconst = { + 0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7, + 0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f, + 0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82, + 0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70 +}; + +/* wycheproof - misc */ +static const u8 enc_input032[] __initconst = { + 0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66, + 0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7, + 0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11 +}; +static const u8 enc_output032[] __initconst = { + 0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d, + 0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0, + 0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64, + 0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d, + 0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a +}; +static const u8 enc_assoc032[] __initconst = { }; +static const u8 enc_nonce032[] __initconst = { + 0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76, + 0x2c, 0x65, 0xf3, 0x1b +}; +static const u8 enc_key032[] __initconst = { + 0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87, + 0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f, + 0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2, + 0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7 +}; + +/* wycheproof - misc */ +static const u8 enc_input033[] __initconst = { + 0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d, + 0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c, + 0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93 +}; +static const u8 enc_output033[] __initconst = { + 0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69, + 0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4, + 0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00, + 0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63, + 0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65 +}; +static const u8 enc_assoc033[] __initconst = { + 0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08 +}; +static const u8 enc_nonce033[] __initconst = { + 0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd, + 0x78, 0x34, 0xed, 0x55 +}; +static const u8 enc_key033[] __initconst = { + 0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed, + 0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda, + 0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d, + 0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3 +}; + +/* wycheproof - misc */ +static const u8 enc_input034[] __initconst = { + 0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f, + 0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c, + 0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26, + 0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29 +}; +static const u8 enc_output034[] __initconst = { + 0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab, + 0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb, + 0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1, + 0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56, + 0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f, + 0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93 +}; +static const u8 enc_assoc034[] __initconst = { }; +static const u8 enc_nonce034[] __initconst = { + 0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31, + 0x37, 0x1a, 0x6f, 0xd2 +}; +static const u8 enc_key034[] __initconst = { + 0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e, + 0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2, + 0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15, + 0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71 +}; + +/* wycheproof - misc */ +static const u8 enc_input035[] __initconst = { + 0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2, + 0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f, + 0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56, + 0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb +}; +static const u8 enc_output035[] __initconst = { + 0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20, + 0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5, + 0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e, + 0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53, + 0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d, + 0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f +}; +static const u8 enc_assoc035[] __initconst = { + 0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48 +}; +static const u8 enc_nonce035[] __initconst = { + 0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8, + 0xb8, 0x1a, 0x1f, 0x8b +}; +static const u8 enc_key035[] __initconst = { + 0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f, + 0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40, + 0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4, + 0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4 +}; + +/* wycheproof - misc */ +static const u8 enc_input036[] __initconst = { + 0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a, + 0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb, + 0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce, + 0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78 +}; +static const u8 enc_output036[] __initconst = { + 0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76, + 0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e, + 0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50, + 0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21, + 0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33, + 0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea +}; +static const u8 enc_assoc036[] __initconst = { }; +static const u8 enc_nonce036[] __initconst = { + 0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2, + 0x2b, 0x7e, 0x6e, 0x6a +}; +static const u8 enc_key036[] __initconst = { + 0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c, + 0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb, + 0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9, + 0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3 +}; + +/* wycheproof - misc */ +static const u8 enc_input037[] __initconst = { + 0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14, + 0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3, + 0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79, + 0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e +}; +static const u8 enc_output037[] __initconst = { + 0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b, + 0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2, + 0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29, + 0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4, + 0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d, + 0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32 +}; +static const u8 enc_assoc037[] __initconst = { + 0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59 +}; +static const u8 enc_nonce037[] __initconst = { + 0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5, + 0x34, 0x0d, 0xd1, 0xb8 +}; +static const u8 enc_key037[] __initconst = { + 0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4, + 0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f, + 0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd, + 0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45 +}; + +/* wycheproof - misc */ +static const u8 enc_input038[] __initconst = { + 0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4, + 0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e, + 0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11, + 0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04, + 0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46, + 0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a +}; +static const u8 enc_output038[] __initconst = { + 0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6, + 0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00, + 0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6, + 0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27, + 0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24, + 0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f, + 0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08, + 0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9 +}; +static const u8 enc_assoc038[] __initconst = { }; +static const u8 enc_nonce038[] __initconst = { + 0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9, + 0x0b, 0xef, 0x55, 0xd2 +}; +static const u8 enc_key038[] __initconst = { + 0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51, + 0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63, + 0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3, + 0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64 +}; + +/* wycheproof - misc */ +static const u8 enc_input039[] __initconst = { + 0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74, + 0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3, + 0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d, + 0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59, + 0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c, + 0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9 +}; +static const u8 enc_output039[] __initconst = { + 0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec, + 0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04, + 0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e, + 0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d, + 0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3, + 0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4, + 0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42, + 0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3 +}; +static const u8 enc_assoc039[] __initconst = { + 0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84 +}; +static const u8 enc_nonce039[] __initconst = { + 0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b, + 0x31, 0xcd, 0x4d, 0x95 +}; +static const u8 enc_key039[] __initconst = { + 0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c, + 0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25, + 0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4, + 0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac +}; + +/* wycheproof - misc */ +static const u8 enc_input040[] __initconst = { + 0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e, + 0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd, + 0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f, + 0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f, + 0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88, + 0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76, + 0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d, + 0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82 +}; +static const u8 enc_output040[] __initconst = { + 0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5, + 0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8, + 0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c, + 0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8, + 0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc, + 0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33, + 0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd, + 0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50, + 0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56, + 0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13 +}; +static const u8 enc_assoc040[] __initconst = { }; +static const u8 enc_nonce040[] __initconst = { + 0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28, + 0x23, 0xcc, 0x06, 0x5b +}; +static const u8 enc_key040[] __initconst = { + 0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0, + 0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8, + 0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30, + 0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01 +}; + +/* wycheproof - misc */ +static const u8 enc_input041[] __initconst = { + 0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88, + 0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d, + 0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19, + 0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44, + 0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec, + 0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d, + 0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c, + 0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17 +}; +static const u8 enc_output041[] __initconst = { + 0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16, + 0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1, + 0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8, + 0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97, + 0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84, + 0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3, + 0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20, + 0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e, + 0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14, + 0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec +}; +static const u8 enc_assoc041[] __initconst = { + 0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2 +}; +static const u8 enc_nonce041[] __initconst = { + 0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d, + 0xe4, 0xeb, 0xb1, 0x9c +}; +static const u8 enc_key041[] __initconst = { + 0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9, + 0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2, + 0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5, + 0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e +}; + +/* wycheproof - misc */ +static const u8 enc_input042[] __initconst = { + 0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba, + 0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58, + 0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06, + 0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64, + 0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5, + 0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74, + 0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61, + 0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e, + 0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6, + 0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd, + 0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4, + 0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5, + 0x92 +}; +static const u8 enc_output042[] __initconst = { + 0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97, + 0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf, + 0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98, + 0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5, + 0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45, + 0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10, + 0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28, + 0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe, + 0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c, + 0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a, + 0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2, + 0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3, + 0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b, + 0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b, + 0xb0 +}; +static const u8 enc_assoc042[] __initconst = { }; +static const u8 enc_nonce042[] __initconst = { + 0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03, + 0xac, 0xde, 0x27, 0x99 +}; +static const u8 enc_key042[] __initconst = { + 0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28, + 0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d, + 0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a, + 0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01 +}; + +/* wycheproof - misc */ +static const u8 enc_input043[] __initconst = { + 0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff, + 0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc, + 0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc, + 0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5, + 0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd, + 0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55, + 0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85, + 0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b, + 0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3, + 0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad, + 0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92, + 0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31, + 0x76 +}; +static const u8 enc_output043[] __initconst = { + 0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5, + 0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06, + 0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e, + 0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae, + 0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37, + 0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8, + 0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3, + 0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d, + 0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70, + 0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07, + 0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6, + 0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54, + 0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5, + 0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac, + 0xf6 +}; +static const u8 enc_assoc043[] __initconst = { + 0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30 +}; +static const u8 enc_nonce043[] __initconst = { + 0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63, + 0xe5, 0x22, 0x94, 0x60 +}; +static const u8 enc_key043[] __initconst = { + 0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c, + 0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4, + 0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b, + 0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e +}; + +/* wycheproof - misc */ +static const u8 enc_input044[] __initconst = { + 0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68, + 0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2 +}; +static const u8 enc_output044[] __initconst = { + 0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6, + 0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b, + 0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02, + 0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4 +}; +static const u8 enc_assoc044[] __initconst = { + 0x02 +}; +static const u8 enc_nonce044[] __initconst = { + 0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21, + 0x02, 0xd5, 0x06, 0x56 +}; +static const u8 enc_key044[] __initconst = { + 0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32, + 0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50, + 0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0, + 0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c +}; + +/* wycheproof - misc */ +static const u8 enc_input045[] __initconst = { + 0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44, + 0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8 +}; +static const u8 enc_output045[] __initconst = { + 0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1, + 0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60, + 0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1, + 0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58 +}; +static const u8 enc_assoc045[] __initconst = { + 0xb6, 0x48 +}; +static const u8 enc_nonce045[] __initconst = { + 0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9, + 0x5b, 0x3a, 0xa7, 0x13 +}; +static const u8 enc_key045[] __initconst = { + 0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41, + 0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0, + 0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44, + 0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc +}; + +/* wycheproof - misc */ +static const u8 enc_input046[] __initconst = { + 0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23, + 0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47 +}; +static const u8 enc_output046[] __initconst = { + 0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda, + 0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1, + 0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b, + 0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8 +}; +static const u8 enc_assoc046[] __initconst = { + 0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd, + 0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0 +}; +static const u8 enc_nonce046[] __initconst = { + 0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b, + 0xa4, 0x65, 0x96, 0xdf +}; +static const u8 enc_key046[] __initconst = { + 0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08, + 0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96, + 0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89, + 0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f +}; + +/* wycheproof - misc */ +static const u8 enc_input047[] __initconst = { + 0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf, + 0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2 +}; +static const u8 enc_output047[] __initconst = { + 0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb, + 0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95, + 0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34, + 0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f +}; +static const u8 enc_assoc047[] __initconst = { + 0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07, + 0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b +}; +static const u8 enc_nonce047[] __initconst = { + 0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6, + 0x80, 0x92, 0x66, 0xd9 +}; +static const u8 enc_key047[] __initconst = { + 0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91, + 0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23, + 0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6, + 0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16 +}; + +/* wycheproof - misc */ +static const u8 enc_input048[] __initconst = { + 0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32, + 0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3 +}; +static const u8 enc_output048[] __initconst = { + 0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b, + 0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68, + 0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84, + 0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3 +}; +static const u8 enc_assoc048[] __initconst = { + 0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab, + 0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c, + 0x0e +}; +static const u8 enc_nonce048[] __initconst = { + 0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40, + 0xfc, 0x10, 0x68, 0xc3 +}; +static const u8 enc_key048[] __initconst = { + 0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b, + 0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97, + 0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b, + 0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57 +}; + +/* wycheproof - misc */ +static const u8 enc_input049[] __initconst = { + 0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2, + 0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6 +}; +static const u8 enc_output049[] __initconst = { + 0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87, + 0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11, + 0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e, + 0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6 +}; +static const u8 enc_assoc049[] __initconst = { + 0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8, + 0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94, + 0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5, + 0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda +}; +static const u8 enc_nonce049[] __initconst = { + 0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf, + 0xad, 0x14, 0xd5, 0x3e +}; +static const u8 enc_key049[] __initconst = { + 0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d, + 0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc, + 0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47, + 0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0 +}; + +/* wycheproof - misc */ +static const u8 enc_input050[] __initconst = { + 0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18, + 0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c +}; +static const u8 enc_output050[] __initconst = { + 0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6, + 0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca, + 0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b, + 0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04 +}; +static const u8 enc_assoc050[] __initconst = { + 0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22, + 0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07, + 0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90, + 0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37 +}; +static const u8 enc_nonce050[] __initconst = { + 0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28, + 0x6a, 0x7b, 0x76, 0x51 +}; +static const u8 enc_key050[] __initconst = { + 0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1, + 0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c, + 0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a, + 0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30 +}; + +/* wycheproof - misc */ +static const u8 enc_input051[] __initconst = { + 0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59, + 0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6 +}; +static const u8 enc_output051[] __initconst = { + 0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70, + 0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd, + 0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4, + 0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43 +}; +static const u8 enc_assoc051[] __initconst = { + 0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92, + 0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57, + 0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75, + 0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88, + 0x73 +}; +static const u8 enc_nonce051[] __initconst = { + 0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0, + 0xa8, 0xfa, 0x89, 0x49 +}; +static const u8 enc_key051[] __initconst = { + 0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27, + 0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74, + 0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c, + 0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99 +}; + +/* wycheproof - misc */ +static const u8 enc_input052[] __initconst = { + 0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3, + 0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed +}; +static const u8 enc_output052[] __initconst = { + 0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc, + 0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b, + 0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed, + 0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32 +}; +static const u8 enc_assoc052[] __initconst = { + 0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a, + 0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14, + 0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae, + 0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c, + 0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92, + 0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61 +}; +static const u8 enc_nonce052[] __initconst = { + 0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73, + 0x0e, 0xc3, 0x5d, 0x12 +}; +static const u8 enc_key052[] __initconst = { + 0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5, + 0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf, + 0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c, + 0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4 +}; + +/* wycheproof - misc */ +static const u8 enc_input053[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, + 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, + 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, + 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe +}; +static const u8 enc_output053[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7, + 0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07 +}; +static const u8 enc_assoc053[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce053[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key053[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input054[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, + 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, + 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, + 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe, + 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe, + 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b, + 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5, + 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd +}; +static const u8 enc_output054[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2, + 0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce +}; +static const u8 enc_assoc054[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce054[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key054[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input055[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, + 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, + 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, + 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe, + 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe, + 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b, + 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5, + 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd, + 0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa, + 0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02, + 0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80, + 0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4, + 0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7, + 0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed, + 0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38, + 0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7 +}; +static const u8 enc_output055[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3, + 0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e +}; +static const u8 enc_assoc055[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce055[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key055[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input056[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, + 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, + 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, + 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41 +}; +static const u8 enc_output056[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27, + 0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6 +}; +static const u8 enc_assoc056[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce056[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key056[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input057[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, + 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, + 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, + 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41, + 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01, + 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4, + 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a, + 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42 +}; +static const u8 enc_output057[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b, + 0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68 +}; +static const u8 enc_assoc057[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce057[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key057[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input058[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, + 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, + 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, + 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41, + 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01, + 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4, + 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a, + 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42, + 0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05, + 0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd, + 0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f, + 0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b, + 0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38, + 0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12, + 0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7, + 0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38 +}; +static const u8 enc_output058[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91, + 0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62 +}; +static const u8 enc_assoc058[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce058[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key058[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input059[] __initconst = { + 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, + 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, + 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, + 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e +}; +static const u8 enc_output059[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75, + 0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4 +}; +static const u8 enc_assoc059[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_nonce059[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key059[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input060[] __initconst = { + 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, + 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, + 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, + 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e, + 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e, + 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab, + 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65, + 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d +}; +static const u8 enc_output060[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba, + 0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a +}; +static const u8 enc_assoc060[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_nonce060[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key060[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input061[] __initconst = { + 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, + 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, + 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, + 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e, + 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e, + 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab, + 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65, + 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d, + 0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a, + 0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82, + 0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00, + 0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44, + 0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47, + 0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d, + 0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8, + 0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47 +}; +static const u8 enc_output061[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f, + 0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a +}; +static const u8 enc_assoc061[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_nonce061[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key061[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input062[] __initconst = { + 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, + 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, + 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, + 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1 +}; +static const u8 enc_output062[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59, + 0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19 +}; +static const u8 enc_assoc062[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_nonce062[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key062[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input063[] __initconst = { + 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, + 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, + 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, + 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1, + 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81, + 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54, + 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a, + 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2 +}; +static const u8 enc_output063[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2, + 0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c +}; +static const u8 enc_assoc063[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_nonce063[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key063[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input064[] __initconst = { + 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, + 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, + 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, + 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1, + 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81, + 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54, + 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a, + 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2, + 0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85, + 0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d, + 0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff, + 0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb, + 0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8, + 0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92, + 0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47, + 0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8 +}; +static const u8 enc_output064[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5, + 0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06 +}; +static const u8 enc_assoc064[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_nonce064[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key064[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input065[] __initconst = { + 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, + 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, + 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, + 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41 +}; +static const u8 enc_output065[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf, + 0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67 +}; +static const u8 enc_assoc065[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce065[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key065[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input066[] __initconst = { + 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, + 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, + 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, + 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41, + 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01, + 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4, + 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a, + 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42 +}; +static const u8 enc_output066[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89, + 0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90 +}; +static const u8 enc_assoc066[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce066[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key066[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input067[] __initconst = { + 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, + 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, + 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, + 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41, + 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01, + 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4, + 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a, + 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42, + 0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05, + 0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd, + 0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f, + 0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b, + 0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38, + 0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12, + 0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7, + 0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38 +}; +static const u8 enc_output067[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94, + 0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22 +}; +static const u8 enc_assoc067[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce067[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key067[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input068[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, + 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, + 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, + 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41 +}; +static const u8 enc_output068[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9, + 0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d +}; +static const u8 enc_assoc068[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce068[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key068[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input069[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, + 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, + 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, + 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41, + 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01, + 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4, + 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a, + 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42 +}; +static const u8 enc_output069[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde, + 0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82 +}; +static const u8 enc_assoc069[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce069[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key069[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input070[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, + 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, + 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, + 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41, + 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01, + 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4, + 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a, + 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42, + 0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05, + 0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd, + 0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f, + 0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b, + 0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38, + 0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12, + 0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7, + 0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38 +}; +static const u8 enc_output070[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3, + 0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71 +}; +static const u8 enc_assoc070[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce070[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key070[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input071[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, + 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, + 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, + 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe +}; +static const u8 enc_output071[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5, + 0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20 +}; +static const u8 enc_assoc071[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce071[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key071[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input072[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, + 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, + 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, + 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe, + 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe, + 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b, + 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5, + 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd +}; +static const u8 enc_output072[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e, + 0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4 +}; +static const u8 enc_assoc072[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce072[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key072[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input073[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, + 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, + 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, + 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe, + 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe, + 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b, + 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5, + 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd, + 0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa, + 0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02, + 0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80, + 0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4, + 0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7, + 0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed, + 0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38, + 0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7 +}; +static const u8 enc_output073[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51, + 0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f +}; +static const u8 enc_assoc073[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce073[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key073[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input074[] __initconst = { + 0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51, + 0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69, + 0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f, + 0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90, + 0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0, + 0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac, + 0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2, + 0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5, + 0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b, + 0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49, + 0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16, + 0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58, + 0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73, + 0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3, + 0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c, + 0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a +}; +static const u8 enc_output074[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85, + 0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca +}; +static const u8 enc_assoc074[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce074[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x00, 0x02, 0x50, 0x6e +}; +static const u8 enc_key074[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input075[] __initconst = { + 0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75, + 0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56, + 0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2, + 0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed, + 0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f, + 0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f, + 0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0, + 0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8, + 0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32, + 0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8, + 0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b, + 0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b, + 0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd, + 0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f, + 0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1, + 0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94 +}; +static const u8 enc_output075[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7, + 0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5 +}; +static const u8 enc_assoc075[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce075[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x00, 0x03, 0x18, 0xa5 +}; +static const u8 enc_key075[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input076[] __initconst = { + 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85, + 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02, + 0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09, + 0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8, + 0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd, + 0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85, + 0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39, + 0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae, + 0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed, + 0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4, + 0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c, + 0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33, + 0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19, + 0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11, + 0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9, + 0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5 +}; +static const u8 enc_output076[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d, + 0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63 +}; +static const u8 enc_assoc076[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce076[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0 +}; +static const u8 enc_key076[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input077[] __initconst = { + 0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae, + 0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16, + 0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7, + 0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61, + 0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2, + 0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf, + 0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf, + 0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48, + 0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8, + 0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa, + 0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87, + 0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32, + 0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1, + 0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b, + 0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c, + 0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f +}; +static const u8 enc_output077[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4, + 0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa +}; +static const u8 enc_assoc077[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce077[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66 +}; +static const u8 enc_key077[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input078[] __initconst = { + 0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef, + 0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5, + 0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c, + 0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d, + 0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf, + 0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6, + 0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe, + 0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5, + 0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62, + 0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c, + 0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f, + 0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2, + 0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33, + 0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e, + 0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9, + 0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62 +}; +static const u8 enc_output078[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7, + 0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d +}; +static const u8 enc_assoc078[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce078[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90 +}; +static const u8 enc_key078[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input079[] __initconst = { + 0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9, + 0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6, + 0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c, + 0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82, + 0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21, + 0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12, + 0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c, + 0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d, + 0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32, + 0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84, + 0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3, + 0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24, + 0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94, + 0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53, + 0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71, + 0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd +}; +static const u8 enc_output079[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2, + 0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e +}; +static const u8 enc_assoc079[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce079[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a +}; +static const u8 enc_key079[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input080[] __initconst = { + 0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5, + 0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9, + 0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b, + 0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc, + 0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38, + 0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27, + 0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83, + 0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b, + 0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda, + 0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4, + 0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc, + 0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b, + 0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4, + 0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00, + 0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d, + 0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1 +}; +static const u8 enc_output080[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a, + 0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02 +}; +static const u8 enc_assoc080[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce080[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40 +}; +static const u8 enc_key080[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input081[] __initconst = { + 0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92, + 0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c, + 0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9, + 0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06, + 0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a, + 0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa, + 0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07, + 0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6, + 0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06, + 0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8, + 0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5, + 0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5, + 0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f, + 0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86, + 0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23, + 0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1 +}; +static const u8 enc_output081[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba, + 0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0 +}; +static const u8 enc_assoc081[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce081[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35 +}; +static const u8 enc_key081[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input082[] __initconst = { + 0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb, + 0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49, + 0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16, + 0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d, + 0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9, + 0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e, + 0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d, + 0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc, + 0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6, + 0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca, + 0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83, + 0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08, + 0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95, + 0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66, + 0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83, + 0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07 +}; +static const u8 enc_output082[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42, + 0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae +}; +static const u8 enc_assoc082[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce082[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5 +}; +static const u8 enc_key082[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input083[] __initconst = { + 0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58, + 0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84, + 0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5, + 0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf, + 0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39, + 0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03, + 0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f, + 0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa, + 0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08, + 0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59, + 0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56, + 0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9, + 0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43, + 0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa, + 0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba, + 0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a +}; +static const u8 enc_output083[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b, + 0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19 +}; +static const u8 enc_assoc083[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce083[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4 +}; +static const u8 enc_key083[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input084[] __initconst = { + 0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18, + 0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b, + 0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99, + 0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c, + 0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde, + 0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2, + 0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1, + 0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8, + 0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b, + 0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f, + 0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77, + 0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8, + 0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c, + 0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf, + 0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97, + 0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee +}; +static const u8 enc_output084[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab, + 0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87 +}; +static const u8 enc_assoc084[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce084[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8 +}; +static const u8 enc_key084[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input085[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6, + 0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed, + 0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7, + 0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37, + 0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a, + 0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b +}; +static const u8 enc_output085[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68, + 0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43 +}; +static const u8 enc_assoc085[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce085[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key085[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input086[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output086[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f +}; +static const u8 enc_assoc086[] __initconst = { + 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1, + 0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00 +}; +static const u8 enc_nonce086[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key086[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input087[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output087[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_assoc087[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f, + 0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58 +}; +static const u8 enc_nonce087[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key087[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input088[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output088[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_assoc088[] __initconst = { + 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f, + 0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00 +}; +static const u8 enc_nonce088[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key088[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input089[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output089[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_assoc089[] __initconst = { + 0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae, + 0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02 +}; +static const u8 enc_nonce089[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key089[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input090[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output090[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_assoc090[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe, + 0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3 +}; +static const u8 enc_nonce090[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key090[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input091[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output091[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 +}; +static const u8 enc_assoc091[] __initconst = { + 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30, + 0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01 +}; +static const u8 enc_nonce091[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key091[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input092[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output092[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_assoc092[] __initconst = { + 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11, + 0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01 +}; +static const u8 enc_nonce092[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key092[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input093[] __initconst = { + 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d, + 0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44, + 0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3, + 0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b, + 0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb, + 0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c +}; +static const u8 enc_output093[] __initconst = { + 0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14, + 0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7, + 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96, + 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00, + 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96, + 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00, + 0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77, + 0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a +}; +static const u8 enc_assoc093[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce093[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key093[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input094[] __initconst = { + 0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90, + 0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45, + 0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef, + 0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09, + 0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20, + 0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10 +}; +static const u8 enc_output094[] __initconst = { + 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66, + 0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02, + 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29, + 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02, + 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29, + 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02, + 0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6, + 0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc +}; +static const u8 enc_assoc094[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce094[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key094[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input095[] __initconst = { + 0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b, + 0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46, + 0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6, + 0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a, + 0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79, + 0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13 +}; +static const u8 enc_output095[] __initconst = { + 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad, + 0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01, + 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70, + 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01, + 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70, + 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01, + 0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02, + 0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0 +}; +static const u8 enc_assoc095[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce095[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key095[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input096[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60, + 0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c, + 0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67, + 0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94, + 0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94, + 0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04 +}; +static const u8 enc_output096[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96, + 0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b, + 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52, + 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f, + 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52, + 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f, + 0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73, + 0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8 +}; +static const u8 enc_assoc096[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce096[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key096[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input097[] __initconst = { + 0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6, + 0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47, + 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01, + 0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96, + 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce, + 0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f +}; +static const u8 enc_output097[] __initconst = { + 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00, + 0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7, + 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7, + 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d, + 0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b, + 0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26 +}; +static const u8 enc_assoc097[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce097[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key097[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input098[] __initconst = { + 0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62, + 0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45, + 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0, + 0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8, + 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f, + 0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1 +}; +static const u8 enc_output098[] __initconst = { + 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94, + 0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66, + 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66, + 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3, + 0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e, + 0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b +}; +static const u8 enc_assoc098[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce098[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key098[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input099[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12, + 0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98, + 0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95, + 0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48, + 0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66, + 0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8 +}; +static const u8 enc_output099[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4, + 0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf, + 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0, + 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3, + 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0, + 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3, + 0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56, + 0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1 +}; +static const u8 enc_assoc099[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce099[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key099[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input100[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70, + 0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd, + 0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77, + 0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75, + 0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84, + 0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5 +}; +static const u8 enc_output100[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86, + 0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa, + 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42, + 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee, + 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42, + 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee, + 0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59, + 0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6 +}; +static const u8 enc_assoc100[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce100[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key100[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input101[] __initconst = { + 0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c, + 0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde, + 0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92, + 0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f, + 0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea, + 0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88 +}; +static const u8 enc_output101[] __initconst = { + 0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5, + 0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d, + 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7, + 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04, + 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7, + 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04, + 0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65, + 0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05 +}; +static const u8 enc_assoc101[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce101[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key101[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input102[] __initconst = { + 0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0, + 0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d, + 0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73, + 0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b, + 0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b, + 0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c +}; +static const u8 enc_output102[] __initconst = { + 0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39, + 0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e, + 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46, + 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00, + 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46, + 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00, + 0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56, + 0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04 +}; +static const u8 enc_assoc102[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce102[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key102[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input103[] __initconst = { + 0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d, + 0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce, + 0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c, + 0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a, + 0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14, + 0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d +}; +static const u8 enc_output103[] __initconst = { + 0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94, + 0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d, + 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59, + 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01, + 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59, + 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01, + 0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a, + 0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08 +}; +static const u8 enc_assoc103[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce103[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key103[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input104[] __initconst = { + 0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac, + 0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b, + 0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9, + 0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98, + 0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1, + 0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f +}; +static const u8 enc_output104[] __initconst = { + 0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35, + 0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec, + 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec, + 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03, + 0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d, + 0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d +}; +static const u8 enc_assoc104[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce104[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key104[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input105[] __initconst = { + 0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5, + 0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99, + 0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e, + 0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a, + 0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46, + 0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d +}; +static const u8 enc_output105[] __initconst = { + 0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c, + 0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b, + 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b, + 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01, + 0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88, + 0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd +}; +static const u8 enc_assoc105[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce105[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key105[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input106[] __initconst = { + 0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06, + 0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5, + 0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68, + 0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a, + 0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10, + 0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d +}; +static const u8 enc_output106[] __initconst = { + 0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f, + 0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76, + 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d, + 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01, + 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d, + 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01, + 0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88, + 0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a +}; +static const u8 enc_assoc106[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce106[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key106[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input107[] __initconst = { + 0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71, + 0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44, + 0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c, + 0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a, + 0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3, + 0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13 +}; +static const u8 enc_output107[] __initconst = { + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87, + 0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03, + 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa, + 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01, + 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa, + 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01, + 0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02, + 0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88 +}; +static const u8 enc_assoc107[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce107[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key107[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input108[] __initconst = { + 0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43, + 0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6, + 0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11, + 0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99, + 0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69, + 0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e +}; +static const u8 enc_output108[] __initconst = { + 0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda, + 0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15, + 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24, + 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02, + 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24, + 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02, + 0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96, + 0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a +}; +static const u8 enc_assoc108[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce108[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key108[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input109[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a, + 0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb, + 0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96, + 0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f, + 0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59, + 0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16 +}; +static const u8 enc_output109[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac, + 0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c, + 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50, + 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04, + 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50, + 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04, + 0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1, + 0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb +}; +static const u8 enc_assoc109[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce109[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key109[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input110[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5, + 0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9, + 0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b, + 0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b, + 0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4, + 0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12 +}; +static const u8 enc_output110[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43, + 0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd, + 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd, + 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00, + 0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6, + 0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe +}; +static const u8 enc_assoc110[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce110[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key110[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input111[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda, + 0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55, + 0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5, + 0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f, + 0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a, + 0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16 +}; +static const u8 enc_output111[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c, + 0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12, + 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13, + 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04, + 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13, + 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04, + 0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4, + 0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9 +}; +static const u8 enc_assoc111[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce111[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key111[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input112[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38, + 0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a, + 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65, + 0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a, + 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa, + 0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13 +}; +static const u8 enc_output112[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce, + 0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3, + 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3, + 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01, + 0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7, + 0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce +}; +static const u8 enc_assoc112[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce112[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key112[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input113[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86, + 0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f, + 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1, + 0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f, + 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e, + 0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16 +}; +static const u8 enc_output113[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70, + 0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37, + 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37, + 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04, + 0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa, + 0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3 +}; +static const u8 enc_assoc113[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce113[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key113[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input114[] __initconst = { + 0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73, + 0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc, + 0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a, + 0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f, + 0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2, + 0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88 +}; +static const u8 enc_output114[] __initconst = { + 0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea, + 0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f, + 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf, + 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04, + 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf, + 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04, + 0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69, + 0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1 +}; +static const u8 enc_assoc114[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce114[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key114[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input115[] __initconst = { + 0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb, + 0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68, + 0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33, + 0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98, + 0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b, + 0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f +}; +static const u8 enc_output115[] __initconst = { + 0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72, + 0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb, + 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06, + 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03, + 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06, + 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03, + 0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74, + 0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05 +}; +static const u8 enc_assoc115[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce115[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key115[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input116[] __initconst = { + 0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd, + 0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44, + 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea, + 0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a, + 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25, + 0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13 +}; +static const u8 enc_output116[] __initconst = { + 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b, + 0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c, + 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c, + 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01, + 0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50, + 0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2 +}; +static const u8 enc_assoc116[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce116[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key116[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input117[] __initconst = { + 0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d, + 0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47, + 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06, + 0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08, + 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9, + 0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11 +}; +static const u8 enc_output117[] __initconst = { + 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb, + 0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0, + 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0, + 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03, + 0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53, + 0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7 +}; +static const u8 enc_assoc117[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce117[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key117[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input118[] __initconst = { + 0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03, + 0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43, + 0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a, + 0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f, + 0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85, + 0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16 +}; +static const u8 enc_output118[] __initconst = { + 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5, + 0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04, + 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c, + 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04, + 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c, + 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04, + 0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0, + 0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1 +}; +static const u8 enc_assoc118[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce118[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key118[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +static const struct chacha20poly1305_testvec +chacha20poly1305_enc_vectors[] __initconst = { + { enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001, + sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) }, + { enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002, + sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) }, + { enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003, + sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) }, + { enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004, + sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) }, + { enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005, + sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) }, + { enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006, + sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) }, + { enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007, + sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) }, + { enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008, + sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) }, + { enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009, + sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) }, + { enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010, + sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) }, + { enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011, + sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) }, + { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012, + sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) }, + { enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013, + sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) }, + { enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014, + sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) }, + { enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015, + sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) }, + { enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016, + sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) }, + { enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017, + sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) }, + { enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018, + sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) }, + { enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019, + sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) }, + { enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020, + sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) }, + { enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021, + sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) }, + { enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022, + sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) }, + { enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023, + sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) }, + { enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024, + sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) }, + { enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025, + sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) }, + { enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026, + sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) }, + { enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027, + sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) }, + { enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028, + sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) }, + { enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029, + sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) }, + { enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030, + sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) }, + { enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031, + sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) }, + { enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032, + sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) }, + { enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033, + sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) }, + { enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034, + sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) }, + { enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035, + sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) }, + { enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036, + sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) }, + { enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037, + sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) }, + { enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038, + sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) }, + { enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039, + sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) }, + { enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040, + sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) }, + { enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041, + sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) }, + { enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042, + sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) }, + { enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043, + sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) }, + { enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044, + sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) }, + { enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045, + sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) }, + { enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046, + sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) }, + { enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047, + sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) }, + { enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048, + sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) }, + { enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049, + sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) }, + { enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050, + sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) }, + { enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051, + sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) }, + { enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052, + sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) }, + { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053, + sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) }, + { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054, + sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) }, + { enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055, + sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) }, + { enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056, + sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) }, + { enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057, + sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) }, + { enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058, + sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) }, + { enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059, + sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) }, + { enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060, + sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) }, + { enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061, + sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) }, + { enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062, + sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) }, + { enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063, + sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) }, + { enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064, + sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) }, + { enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065, + sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) }, + { enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066, + sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) }, + { enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067, + sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) }, + { enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068, + sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) }, + { enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069, + sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) }, + { enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070, + sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) }, + { enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071, + sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) }, + { enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072, + sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) }, + { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073, + sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) }, + { enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074, + sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) }, + { enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075, + sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) }, + { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076, + sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) }, + { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077, + sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) }, + { enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078, + sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) }, + { enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079, + sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) }, + { enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080, + sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) }, + { enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081, + sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) }, + { enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082, + sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) }, + { enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083, + sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) }, + { enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084, + sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) }, + { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085, + sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) }, + { enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086, + sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) }, + { enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087, + sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) }, + { enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088, + sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) }, + { enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089, + sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) }, + { enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090, + sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) }, + { enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091, + sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) }, + { enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092, + sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) }, + { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093, + sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) }, + { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094, + sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) }, + { enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095, + sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) }, + { enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096, + sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) }, + { enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097, + sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) }, + { enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098, + sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) }, + { enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099, + sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) }, + { enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100, + sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) }, + { enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101, + sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) }, + { enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102, + sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) }, + { enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103, + sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) }, + { enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104, + sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) }, + { enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105, + sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) }, + { enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106, + sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) }, + { enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107, + sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) }, + { enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108, + sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) }, + { enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109, + sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) }, + { enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110, + sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) }, + { enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111, + sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) }, + { enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112, + sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) }, + { enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113, + sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) }, + { enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114, + sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) }, + { enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115, + sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) }, + { enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116, + sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) }, + { enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117, + sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) }, + { enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118, + sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) } +}; + +static const u8 dec_input001[] __initconst = { + 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4, + 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd, + 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89, + 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2, + 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee, + 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0, + 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00, + 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf, + 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce, + 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81, + 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd, + 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55, + 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61, + 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38, + 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0, + 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4, + 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46, + 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9, + 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e, + 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e, + 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15, + 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a, + 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea, + 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a, + 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99, + 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e, + 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10, + 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10, + 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94, + 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30, + 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf, + 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29, + 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70, + 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, + 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, + 0x38 +}; +static const u8 dec_output001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 dec_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 dec_nonce001[] __initconst = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 +}; +static const u8 dec_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const u8 dec_input002[] __initconst = { + 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1, + 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92 +}; +static const u8 dec_output002[] __initconst = { }; +static const u8 dec_assoc002[] __initconst = { }; +static const u8 dec_nonce002[] __initconst = { + 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e +}; +static const u8 dec_key002[] __initconst = { + 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f, + 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86, + 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef, + 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68 +}; + +static const u8 dec_input003[] __initconst = { + 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6, + 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77 +}; +static const u8 dec_output003[] __initconst = { }; +static const u8 dec_assoc003[] __initconst = { + 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b +}; +static const u8 dec_nonce003[] __initconst = { + 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d +}; +static const u8 dec_key003[] __initconst = { + 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88, + 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a, + 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08, + 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d +}; + +static const u8 dec_input004[] __initconst = { + 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2, + 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac, + 0x89 +}; +static const u8 dec_output004[] __initconst = { + 0xa4 +}; +static const u8 dec_assoc004[] __initconst = { + 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40 +}; +static const u8 dec_nonce004[] __initconst = { + 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4 +}; +static const u8 dec_key004[] __initconst = { + 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8, + 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1, + 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d, + 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e +}; + +static const u8 dec_input005[] __initconst = { + 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e, + 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c, + 0xac +}; +static const u8 dec_output005[] __initconst = { + 0x2d +}; +static const u8 dec_assoc005[] __initconst = { }; +static const u8 dec_nonce005[] __initconst = { + 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30 +}; +static const u8 dec_key005[] __initconst = { + 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31, + 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87, + 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01, + 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87 +}; + +static const u8 dec_input006[] __initconst = { + 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1, + 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15, + 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c, + 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda, + 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11, + 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8, + 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc, + 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3, + 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5, + 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02, + 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93, + 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78, + 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1, + 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66, + 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc, + 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0, + 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d, + 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a, + 0xeb +}; +static const u8 dec_output006[] __initconst = { + 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a, + 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92, + 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37, + 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50, + 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec, + 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb, + 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66, + 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb, + 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b, + 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e, + 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3, + 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0, + 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb, + 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41, + 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc, + 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde, + 0x8f +}; +static const u8 dec_assoc006[] __initconst = { + 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b +}; +static const u8 dec_nonce006[] __initconst = { + 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c +}; +static const u8 dec_key006[] __initconst = { + 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae, + 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78, + 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9, + 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01 +}; + +static const u8 dec_input007[] __initconst = { + 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c, + 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8, + 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c, + 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb, + 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0, + 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21, + 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70, + 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac, + 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99, + 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9, + 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f, + 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7, + 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53, + 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12, + 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6, + 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0, + 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54, + 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6, + 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e, + 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb, + 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30, + 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f, + 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2, + 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e, + 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34, + 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39, + 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7, + 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9, + 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82, + 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04, + 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34, + 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef, + 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42, + 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53 +}; +static const u8 dec_output007[] __initconst = { + 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5, + 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a, + 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1, + 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17, + 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c, + 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1, + 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51, + 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1, + 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86, + 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a, + 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a, + 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98, + 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36, + 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34, + 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57, + 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84, + 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4, + 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80, + 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82, + 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5, + 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d, + 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c, + 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf, + 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc, + 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3, + 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14, + 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81, + 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77, + 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3, + 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2, + 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b, + 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3 +}; +static const u8 dec_assoc007[] __initconst = { }; +static const u8 dec_nonce007[] __initconst = { + 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0 +}; +static const u8 dec_key007[] __initconst = { + 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd, + 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c, + 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80, + 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01 +}; + +static const u8 dec_input008[] __initconst = { + 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd, + 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1, + 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93, + 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d, + 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c, + 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6, + 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4, + 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5, + 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84, + 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd, + 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed, + 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab, + 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13, + 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49, + 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6, + 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8, + 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2, + 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94, + 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18, + 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60, + 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8, + 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b, + 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f, + 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c, + 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20, + 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff, + 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9, + 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c, + 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9, + 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6, + 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea, + 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e, + 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82, + 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1, + 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70, + 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1, + 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c, + 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7, + 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc, + 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc, + 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3, + 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb, + 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97, + 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f, + 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39, + 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f, + 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d, + 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2, + 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d, + 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96, + 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b, + 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20, + 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95, + 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb, + 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35, + 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62, + 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9, + 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6, + 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8, + 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a, + 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93, + 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14, + 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99, + 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86, + 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f, + 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54 +}; +static const u8 dec_output008[] __initconst = { + 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10, + 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2, + 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c, + 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb, + 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12, + 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa, + 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6, + 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4, + 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91, + 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb, + 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47, + 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15, + 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f, + 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a, + 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3, + 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97, + 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80, + 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e, + 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f, + 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10, + 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a, + 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0, + 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35, + 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d, + 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d, + 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57, + 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4, + 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f, + 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39, + 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda, + 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17, + 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43, + 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19, + 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09, + 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21, + 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07, + 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f, + 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b, + 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a, + 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed, + 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2, + 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca, + 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff, + 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b, + 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b, + 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b, + 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6, + 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04, + 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48, + 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b, + 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13, + 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8, + 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f, + 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0, + 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92, + 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a, + 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41, + 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17, + 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30, + 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20, + 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49, + 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a, + 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b, + 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3 +}; +static const u8 dec_assoc008[] __initconst = { }; +static const u8 dec_nonce008[] __initconst = { + 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02 +}; +static const u8 dec_key008[] __initconst = { + 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53, + 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0, + 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86, + 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba +}; + +static const u8 dec_input009[] __initconst = { + 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf, + 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66, + 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72, + 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd, + 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28, + 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe, + 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06, + 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5, + 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7, + 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09, + 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a, + 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00, + 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62, + 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb, + 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2, + 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28, + 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e, + 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a, + 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6, + 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83, + 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9, + 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a, + 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79, + 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a, + 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea, + 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b, + 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52, + 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb, + 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89, + 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad, + 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19, + 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71, + 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d, + 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54, + 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a, + 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d, + 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95, + 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42, + 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16, + 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6, + 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf, + 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d, + 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f, + 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b, + 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e, + 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4, + 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c, + 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4, + 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1, + 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb, + 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff, + 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2, + 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06, + 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66, + 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90, + 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55, + 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc, + 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8, + 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62, + 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba, + 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2, + 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89, + 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06, + 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90, + 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf, + 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8, + 0xae +}; +static const u8 dec_output009[] __initconst = { + 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b, + 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8, + 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca, + 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09, + 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5, + 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85, + 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44, + 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97, + 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77, + 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41, + 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c, + 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00, + 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82, + 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f, + 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e, + 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55, + 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab, + 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17, + 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e, + 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f, + 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82, + 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3, + 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f, + 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0, + 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08, + 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b, + 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85, + 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28, + 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c, + 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62, + 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2, + 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3, + 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62, + 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40, + 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f, + 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b, + 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91, + 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5, + 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c, + 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4, + 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49, + 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04, + 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03, + 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa, + 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec, + 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6, + 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69, + 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36, + 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8, + 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf, + 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe, + 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82, + 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab, + 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d, + 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3, + 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5, + 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34, + 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49, + 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f, + 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d, + 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42, + 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef, + 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27, + 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52, + 0x65 +}; +static const u8 dec_assoc009[] __initconst = { + 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e, + 0xef +}; +static const u8 dec_nonce009[] __initconst = { + 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78 +}; +static const u8 dec_key009[] __initconst = { + 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5, + 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86, + 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2, + 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b +}; + +static const u8 dec_input010[] __initconst = { + 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b, + 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74, + 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1, + 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd, + 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6, + 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5, + 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96, + 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02, + 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30, + 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57, + 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53, + 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65, + 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71, + 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9, + 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18, + 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce, + 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a, + 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69, + 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2, + 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95, + 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49, + 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e, + 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a, + 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a, + 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e, + 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19, + 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b, + 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75, + 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d, + 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d, + 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f, + 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a, + 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d, + 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5, + 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c, + 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77, + 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46, + 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43, + 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe, + 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8, + 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76, + 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47, + 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8, + 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32, + 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59, + 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae, + 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a, + 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3, + 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74, + 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75, + 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2, + 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e, + 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2, + 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9, + 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1, + 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07, + 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79, + 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71, + 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad, + 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a, + 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c, + 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9, + 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79, + 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27, + 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90, + 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe, + 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99, + 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1, + 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9, + 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0, + 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28, + 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e, + 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20, + 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60, + 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47, + 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68, + 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe, + 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33, + 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8, + 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38, + 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7, + 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04, + 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c, + 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f, + 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c, + 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77, + 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54, + 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5, + 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4, + 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2, + 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e, + 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27, + 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f, + 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92, + 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55, + 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe, + 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04, + 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4, + 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56, + 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02, + 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2, + 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8, + 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27, + 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47, + 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10, + 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43, + 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0, + 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee, + 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47, + 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6, + 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d, + 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c, + 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3, + 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b, + 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09, + 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d, + 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1, + 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd, + 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4, + 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63, + 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87, + 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd, + 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e, + 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a, + 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c, + 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38, + 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a, + 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5, + 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9, + 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0 +}; +static const u8 dec_output010[] __initconst = { + 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf, + 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c, + 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22, + 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc, + 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16, + 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7, + 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4, + 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d, + 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5, + 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46, + 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82, + 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b, + 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a, + 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf, + 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca, + 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95, + 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09, + 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3, + 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3, + 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f, + 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58, + 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad, + 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde, + 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44, + 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a, + 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9, + 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26, + 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc, + 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74, + 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b, + 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93, + 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37, + 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f, + 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d, + 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca, + 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73, + 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f, + 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1, + 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9, + 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76, + 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac, + 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7, + 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce, + 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30, + 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb, + 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa, + 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd, + 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f, + 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb, + 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34, + 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e, + 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f, + 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53, + 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41, + 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e, + 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d, + 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27, + 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e, + 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8, + 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a, + 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12, + 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3, + 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66, + 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0, + 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c, + 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4, + 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49, + 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90, + 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11, + 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c, + 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b, + 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74, + 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c, + 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27, + 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1, + 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27, + 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88, + 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27, + 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b, + 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39, + 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7, + 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc, + 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe, + 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5, + 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf, + 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05, + 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73, + 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda, + 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe, + 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71, + 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed, + 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d, + 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33, + 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f, + 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a, + 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa, + 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e, + 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e, + 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87, + 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5, + 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4, + 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38, + 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34, + 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f, + 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36, + 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69, + 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44, + 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5, + 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce, + 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd, + 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27, + 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f, + 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8, + 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a, + 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5, + 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca, + 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e, + 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92, + 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13, + 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf, + 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6, + 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3, + 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b, + 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d, + 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f, + 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40, + 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c, + 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f +}; +static const u8 dec_assoc010[] __initconst = { + 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27, + 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2 +}; +static const u8 dec_nonce010[] __initconst = { + 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30 +}; +static const u8 dec_key010[] __initconst = { + 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44, + 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf, + 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74, + 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7 +}; + +static const u8 dec_input011[] __initconst = { + 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8, + 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc, + 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74, + 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73, + 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e, + 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9, + 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e, + 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd, + 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57, + 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19, + 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f, + 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45, + 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e, + 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39, + 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03, + 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f, + 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0, + 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce, + 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb, + 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52, + 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21, + 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a, + 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35, + 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91, + 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b, + 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e, + 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19, + 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07, + 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18, + 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96, + 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68, + 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4, + 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57, + 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c, + 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23, + 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8, + 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6, + 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40, + 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab, + 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb, + 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea, + 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8, + 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31, + 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0, + 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc, + 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94, + 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1, + 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46, + 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6, + 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7, + 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71, + 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a, + 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33, + 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38, + 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23, + 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb, + 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65, + 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73, + 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8, + 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb, + 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a, + 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca, + 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5, + 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71, + 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8, + 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d, + 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6, + 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d, + 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7, + 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5, + 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8, + 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd, + 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29, + 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22, + 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5, + 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67, + 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11, + 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e, + 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09, + 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4, + 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f, + 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa, + 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec, + 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b, + 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d, + 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b, + 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48, + 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3, + 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63, + 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd, + 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78, + 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed, + 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82, + 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f, + 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3, + 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9, + 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72, + 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74, + 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40, + 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b, + 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a, + 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5, + 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98, + 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71, + 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e, + 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4, + 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46, + 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e, + 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f, + 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93, + 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0, + 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5, + 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61, + 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64, + 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85, + 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20, + 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6, + 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc, + 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8, + 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50, + 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4, + 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80, + 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0, + 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a, + 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35, + 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43, + 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12, + 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7, + 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34, + 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42, + 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0, + 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95, + 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74, + 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5, + 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12, + 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6, + 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86, + 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97, + 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45, + 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19, + 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86, + 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c, + 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba, + 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29, + 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6, + 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6, + 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09, + 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31, + 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99, + 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b, + 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca, + 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00, + 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93, + 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3, + 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07, + 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda, + 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90, + 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b, + 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a, + 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6, + 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c, + 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57, + 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15, + 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e, + 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51, + 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75, + 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19, + 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08, + 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14, + 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba, + 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff, + 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90, + 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e, + 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93, + 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad, + 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2, + 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac, + 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d, + 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06, + 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c, + 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91, + 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17, + 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20, + 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7, + 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf, + 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c, + 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2, + 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e, + 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a, + 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05, + 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58, + 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8, + 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d, + 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71, + 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3, + 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe, + 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62, + 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16, + 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66, + 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4, + 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2, + 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35, + 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3, + 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4, + 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f, + 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe, + 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56, + 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b, + 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37, + 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3, + 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f, + 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f, + 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0, + 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70, + 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd, + 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f, + 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e, + 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67, + 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51, + 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23, + 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3, + 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5, + 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09, + 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7, + 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed, + 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb, + 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6, + 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5, + 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96, + 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe, + 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44, + 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6, + 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e, + 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0, + 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79, + 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f, + 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d, + 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82, + 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47, + 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93, + 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6, + 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69, + 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e, + 0x2b, 0xdf, 0xcd, 0xf9, 0x3c +}; +static const u8 dec_output011[] __initconst = { + 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b, + 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b, + 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d, + 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee, + 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30, + 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20, + 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f, + 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e, + 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66, + 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46, + 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35, + 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6, + 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0, + 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15, + 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13, + 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7, + 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3, + 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37, + 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc, + 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95, + 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8, + 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac, + 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45, + 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf, + 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d, + 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc, + 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45, + 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a, + 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec, + 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e, + 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10, + 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8, + 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66, + 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0, + 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62, + 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b, + 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4, + 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96, + 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7, + 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74, + 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8, + 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b, + 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70, + 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95, + 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3, + 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9, + 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d, + 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e, + 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32, + 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5, + 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80, + 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3, + 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad, + 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d, + 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20, + 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17, + 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6, + 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d, + 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82, + 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c, + 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9, + 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb, + 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96, + 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9, + 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f, + 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40, + 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc, + 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce, + 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71, + 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f, + 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35, + 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90, + 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8, + 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01, + 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1, + 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe, + 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4, + 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf, + 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9, + 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f, + 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04, + 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7, + 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15, + 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc, + 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0, + 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae, + 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb, + 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed, + 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51, + 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52, + 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84, + 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5, + 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4, + 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e, + 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74, + 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f, + 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13, + 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea, + 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b, + 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef, + 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09, + 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe, + 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1, + 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9, + 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15, + 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a, + 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab, + 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36, + 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd, + 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde, + 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd, + 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47, + 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5, + 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69, + 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21, + 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98, + 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07, + 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57, + 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd, + 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03, + 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11, + 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96, + 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91, + 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d, + 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0, + 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9, + 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42, + 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a, + 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18, + 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc, + 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce, + 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc, + 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0, + 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf, + 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7, + 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80, + 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c, + 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82, + 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9, + 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20, + 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58, + 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6, + 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc, + 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50, + 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86, + 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a, + 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80, + 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec, + 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08, + 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c, + 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde, + 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d, + 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17, + 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f, + 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26, + 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96, + 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97, + 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6, + 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55, + 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e, + 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88, + 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5, + 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b, + 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15, + 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1, + 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4, + 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3, + 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf, + 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e, + 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb, + 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76, + 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5, + 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c, + 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde, + 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f, + 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51, + 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9, + 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99, + 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6, + 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04, + 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31, + 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a, + 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56, + 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e, + 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78, + 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a, + 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7, + 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb, + 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6, + 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8, + 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc, + 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84, + 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86, + 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76, + 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a, + 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73, + 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8, + 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6, + 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2, + 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56, + 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb, + 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab, + 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76, + 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69, + 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d, + 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc, + 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22, + 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39, + 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6, + 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9, + 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f, + 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1, + 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83, + 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc, + 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4, + 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59, + 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68, + 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef, + 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1, + 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3, + 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44, + 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09, + 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8, + 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a, + 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d, + 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae, + 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2, + 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10, + 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a, + 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34, + 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f, + 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9, + 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b, + 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d, + 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57, + 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03, + 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87, + 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca, + 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53, + 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f, + 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61, + 0x10, 0x1e, 0xbf, 0xec, 0xa8 +}; +static const u8 dec_assoc011[] __initconst = { + 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7 +}; +static const u8 dec_nonce011[] __initconst = { + 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa +}; +static const u8 dec_key011[] __initconst = { + 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85, + 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca, + 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52, + 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38 +}; + +static const u8 dec_input012[] __initconst = { + 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, + 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, + 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, + 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, + 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, + 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, + 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, + 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, + 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, + 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, + 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, + 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, + 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, + 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, + 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, + 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, + 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, + 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, + 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, + 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, + 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, + 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, + 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, + 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, + 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, + 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, + 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, + 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, + 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, + 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, + 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, + 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, + 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, + 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, + 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, + 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, + 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, + 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, + 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, + 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, + 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, + 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, + 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, + 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, + 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, + 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, + 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, + 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, + 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, + 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, + 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, + 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, + 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, + 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, + 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, + 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, + 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, + 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, + 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, + 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, + 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, + 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, + 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, + 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, + 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, + 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, + 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, + 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, + 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, + 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, + 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, + 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, + 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, + 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, + 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, + 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, + 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, + 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, + 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, + 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, + 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, + 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, + 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, + 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, + 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, + 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, + 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, + 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, + 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, + 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, + 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, + 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, + 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, + 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, + 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, + 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, + 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, + 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, + 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, + 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, + 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, + 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, + 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, + 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, + 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, + 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, + 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, + 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, + 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, + 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, + 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, + 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, + 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, + 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, + 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, + 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, + 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, + 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, + 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, + 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, + 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, + 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, + 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, + 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, + 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, + 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, + 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, + 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, + 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, + 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, + 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, + 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, + 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, + 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, + 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, + 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, + 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, + 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, + 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, + 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, + 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, + 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, + 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, + 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, + 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, + 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, + 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, + 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, + 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, + 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, + 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, + 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, + 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, + 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, + 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, + 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, + 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, + 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, + 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, + 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, + 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, + 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, + 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, + 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, + 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, + 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, + 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, + 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, + 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, + 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, + 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, + 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, + 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, + 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, + 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, + 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, + 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, + 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, + 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, + 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, + 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, + 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, + 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, + 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, + 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, + 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, + 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, + 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, + 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, + 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, + 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, + 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, + 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, + 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, + 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, + 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, + 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, + 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, + 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, + 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, + 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, + 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, + 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, + 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, + 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, + 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, + 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, + 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, + 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, + 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, + 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, + 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, + 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, + 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, + 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, + 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, + 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, + 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, + 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, + 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, + 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, + 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, + 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, + 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, + 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, + 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, + 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, + 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, + 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, + 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, + 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, + 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, + 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, + 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, + 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, + 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, + 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, + 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, + 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, + 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, + 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, + 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, + 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, + 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, + 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, + 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, + 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, + 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, + 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, + 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, + 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, + 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, + 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, + 0x70, 0xcf, 0xd6 +}; +static const u8 dec_output012[] __initconst = { + 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, + 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, + 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, + 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, + 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, + 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, + 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, + 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, + 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, + 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, + 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, + 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, + 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, + 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, + 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, + 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, + 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, + 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, + 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, + 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, + 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, + 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, + 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, + 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, + 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, + 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, + 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, + 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, + 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, + 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, + 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, + 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, + 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, + 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, + 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, + 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, + 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, + 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, + 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, + 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, + 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, + 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, + 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, + 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, + 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, + 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, + 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, + 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, + 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, + 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, + 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, + 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, + 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, + 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, + 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, + 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, + 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, + 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, + 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, + 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, + 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, + 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, + 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, + 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, + 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, + 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, + 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, + 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, + 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, + 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, + 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, + 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, + 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, + 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, + 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, + 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, + 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, + 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, + 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, + 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, + 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, + 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, + 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, + 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, + 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, + 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, + 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, + 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, + 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, + 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, + 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, + 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, + 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, + 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, + 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, + 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, + 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, + 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, + 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, + 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, + 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, + 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, + 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, + 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, + 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, + 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, + 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, + 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, + 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, + 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, + 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, + 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, + 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, + 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, + 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, + 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, + 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, + 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, + 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, + 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, + 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, + 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, + 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, + 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, + 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, + 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, + 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, + 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, + 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, + 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, + 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, + 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, + 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, + 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, + 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, + 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, + 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, + 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, + 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, + 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, + 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, + 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, + 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, + 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, + 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, + 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, + 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, + 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, + 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, + 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, + 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, + 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, + 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, + 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, + 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, + 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, + 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, + 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, + 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, + 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, + 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, + 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, + 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, + 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, + 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, + 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, + 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, + 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, + 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, + 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, + 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, + 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, + 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, + 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, + 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, + 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, + 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, + 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, + 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, + 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, + 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, + 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, + 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, + 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, + 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, + 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, + 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, + 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, + 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, + 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, + 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, + 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, + 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, + 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, + 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, + 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, + 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, + 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, + 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, + 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, + 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, + 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, + 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, + 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, + 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, + 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, + 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, + 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, + 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, + 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, + 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, + 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, + 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, + 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, + 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, + 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, + 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, + 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, + 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, + 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, + 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, + 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, + 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, + 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, + 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, + 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, + 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, + 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, + 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, + 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, + 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, + 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, + 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, + 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, + 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, + 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, + 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, + 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, + 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, + 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, + 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, + 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, + 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, + 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, + 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, + 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, + 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, + 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, + 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, + 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, + 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, + 0x78, 0xec, 0x00 +}; +static const u8 dec_assoc012[] __initconst = { + 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, + 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, + 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, + 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, + 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, + 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, + 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, + 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 +}; +static const u8 dec_nonce012[] __initconst = { + 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 +}; +static const u8 dec_key012[] __initconst = { + 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, + 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, + 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, + 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 +}; + +static const u8 dec_input013[] __initconst = { + 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, + 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, + 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, + 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, + 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, + 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, + 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, + 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, + 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, + 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, + 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, + 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, + 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, + 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, + 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, + 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, + 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, + 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, + 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, + 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, + 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, + 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, + 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, + 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, + 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, + 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, + 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, + 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, + 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, + 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, + 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, + 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, + 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, + 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, + 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, + 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, + 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, + 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, + 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, + 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, + 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, + 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, + 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, + 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, + 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, + 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, + 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, + 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, + 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, + 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, + 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, + 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, + 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, + 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, + 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, + 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, + 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, + 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, + 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, + 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, + 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, + 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, + 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, + 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, + 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, + 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, + 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, + 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, + 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, + 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, + 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, + 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, + 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, + 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, + 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, + 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, + 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, + 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, + 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, + 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, + 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, + 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, + 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, + 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, + 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, + 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, + 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, + 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, + 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, + 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, + 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, + 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, + 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, + 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, + 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, + 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, + 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, + 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, + 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, + 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, + 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, + 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, + 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, + 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, + 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, + 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, + 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, + 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, + 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, + 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, + 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, + 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, + 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, + 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, + 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, + 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, + 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, + 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, + 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, + 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, + 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, + 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, + 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, + 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, + 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, + 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, + 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, + 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, + 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, + 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, + 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, + 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, + 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, + 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, + 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, + 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, + 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, + 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, + 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, + 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, + 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, + 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, + 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, + 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, + 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, + 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, + 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, + 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, + 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, + 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, + 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, + 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, + 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, + 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, + 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, + 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, + 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, + 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, + 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, + 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, + 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, + 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, + 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, + 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, + 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, + 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, + 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, + 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, + 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, + 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, + 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, + 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, + 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, + 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, + 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, + 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, + 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, + 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, + 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, + 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, + 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, + 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, + 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, + 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, + 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, + 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, + 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, + 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, + 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, + 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, + 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, + 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, + 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, + 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, + 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, + 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, + 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, + 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, + 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, + 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, + 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, + 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, + 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, + 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, + 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, + 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, + 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, + 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, + 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, + 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, + 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, + 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, + 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, + 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, + 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, + 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, + 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, + 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, + 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, + 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, + 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, + 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, + 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, + 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, + 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, + 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, + 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, + 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, + 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, + 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, + 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, + 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, + 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, + 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, + 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, + 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, + 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, + 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, + 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, + 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, + 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, + 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, + 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, + 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, + 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, + 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, + 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, + 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, + 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, + 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, + 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, + 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, + 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, + 0x70, 0xcf, 0xd7 +}; +static const u8 dec_output013[] __initconst = { + 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, + 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, + 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, + 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, + 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, + 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, + 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, + 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, + 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, + 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, + 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, + 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, + 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, + 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, + 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, + 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, + 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, + 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, + 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, + 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, + 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, + 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, + 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, + 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, + 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, + 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, + 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, + 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, + 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, + 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, + 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, + 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, + 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, + 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, + 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, + 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, + 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, + 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, + 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, + 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, + 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, + 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, + 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, + 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, + 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, + 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, + 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, + 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, + 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, + 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, + 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, + 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, + 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, + 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, + 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, + 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, + 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, + 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, + 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, + 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, + 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, + 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, + 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, + 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, + 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, + 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, + 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, + 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, + 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, + 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, + 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, + 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, + 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, + 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, + 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, + 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, + 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, + 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, + 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, + 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, + 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, + 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, + 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, + 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, + 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, + 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, + 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, + 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, + 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, + 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, + 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, + 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, + 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, + 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, + 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, + 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, + 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, + 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, + 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, + 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, + 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, + 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, + 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, + 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, + 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, + 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, + 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, + 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, + 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, + 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, + 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, + 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, + 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, + 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, + 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, + 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, + 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, + 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, + 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, + 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, + 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, + 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, + 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, + 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, + 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, + 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, + 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, + 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, + 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, + 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, + 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, + 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, + 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, + 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, + 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, + 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, + 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, + 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, + 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, + 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, + 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, + 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, + 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, + 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, + 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, + 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, + 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, + 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, + 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, + 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, + 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, + 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, + 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, + 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, + 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, + 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, + 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, + 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, + 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, + 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, + 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, + 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, + 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, + 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, + 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, + 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, + 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, + 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, + 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, + 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, + 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, + 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, + 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, + 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, + 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, + 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, + 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, + 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, + 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, + 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, + 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, + 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, + 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, + 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, + 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, + 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, + 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, + 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, + 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, + 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, + 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, + 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, + 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, + 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, + 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, + 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, + 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, + 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, + 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, + 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, + 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, + 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, + 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, + 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, + 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, + 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, + 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, + 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, + 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, + 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, + 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, + 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, + 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, + 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, + 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, + 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, + 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, + 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, + 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, + 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, + 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, + 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, + 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, + 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, + 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, + 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, + 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, + 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, + 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, + 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, + 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, + 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, + 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, + 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, + 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, + 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, + 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, + 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, + 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, + 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, + 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, + 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, + 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, + 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, + 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, + 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, + 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, + 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, + 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, + 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, + 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, + 0x78, 0xec, 0x00 +}; +static const u8 dec_assoc013[] __initconst = { + 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, + 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, + 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, + 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, + 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, + 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, + 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, + 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 +}; +static const u8 dec_nonce013[] __initconst = { + 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 +}; +static const u8 dec_key013[] __initconst = { + 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, + 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, + 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, + 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 +}; + +static const struct chacha20poly1305_testvec +chacha20poly1305_dec_vectors[] __initconst = { + { dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001, + sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) }, + { dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002, + sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) }, + { dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003, + sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) }, + { dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004, + sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) }, + { dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005, + sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) }, + { dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006, + sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) }, + { dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007, + sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) }, + { dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008, + sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) }, + { dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009, + sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) }, + { dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010, + sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) }, + { dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011, + sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) }, + { dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012, + sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) }, + { dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013, + sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013), + true } +}; + +static const u8 xenc_input001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 xenc_output001[] __initconst = { + 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77, + 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92, + 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18, + 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d, + 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e, + 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86, + 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2, + 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85, + 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09, + 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49, + 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd, + 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8, + 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f, + 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79, + 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8, + 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0, + 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88, + 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71, + 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91, + 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf, + 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89, + 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46, + 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e, + 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90, + 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b, + 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58, + 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54, + 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1, + 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73, + 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69, + 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05, + 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83, + 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13, + 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8, + 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5, + 0x9c +}; +static const u8 xenc_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 xenc_nonce001[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 +}; +static const u8 xenc_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const struct chacha20poly1305_testvec +xchacha20poly1305_enc_vectors[] __initconst = { + { xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001, + sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) } +}; + +static const u8 xdec_input001[] __initconst = { + 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77, + 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92, + 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18, + 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d, + 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e, + 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86, + 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2, + 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85, + 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09, + 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49, + 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd, + 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8, + 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f, + 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79, + 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8, + 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0, + 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88, + 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71, + 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91, + 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf, + 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89, + 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46, + 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e, + 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90, + 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b, + 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58, + 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54, + 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1, + 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73, + 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69, + 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05, + 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83, + 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13, + 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8, + 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5, + 0x9c +}; +static const u8 xdec_output001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 xdec_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 xdec_nonce001[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 +}; +static const u8 xdec_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const struct chacha20poly1305_testvec +xchacha20poly1305_dec_vectors[] __initconst = { + { xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001, + sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) } +}; + +static void __init +chacha20poly1305_selftest_encrypt_bignonce(u8 *dst, const u8 *src, + const size_t src_len, const u8 *ad, + const size_t ad_len, + const u8 nonce[12], + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context; + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + union { + u8 block0[POLY1305_KEY_SIZE]; + __le64 lens[2]; + } b = {{ 0 }}; + + simd_get(&simd_context); + chacha20_init(&chacha20_state, key, 0); + chacha20_state.counter[1] = get_unaligned_le32(nonce + 0); + chacha20_state.counter[2] = get_unaligned_le32(nonce + 4); + chacha20_state.counter[3] = get_unaligned_le32(nonce + 8); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + &simd_context); + poly1305_init(&poly1305_state, b.block0); + poly1305_update(&poly1305_state, ad, ad_len, &simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + &simd_context); + chacha20(&chacha20_state, dst, src, src_len, &simd_context); + poly1305_update(&poly1305_state, dst, src_len, &simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + &simd_context); + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + &simd_context); + poly1305_final(&poly1305_state, dst + src_len, &simd_context); + simd_put(&simd_context); + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); +} + +static void __init +chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 *nonce, const size_t nonce_len, + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + if (nonce_len == 8) + chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + get_unaligned_le64(nonce), key); + else if (nonce_len == 12) + chacha20poly1305_selftest_encrypt_bignonce(dst, src, src_len, + ad, ad_len, nonce, + key); + else + BUG(); +} + +static bool __init +decryption_success(bool func_ret, bool expect_failure, int memcmp_result) +{ + if (expect_failure) + return !func_ret; + return func_ret && !memcmp_result; +} + +static bool __init chacha20poly1305_selftest(void) +{ + enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 }; + size_t i, j, k, total_len; + u8 *computed_output = NULL, *input = NULL; + bool success = true, ret; + simd_context_t simd_context; + struct scatterlist sg_src[3]; + + computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); + input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); + if (!computed_output || !input) { + pr_err("chacha20poly1305 self-test malloc: FAIL\n"); + success = false; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + chacha20poly1305_selftest_encrypt(computed_output, + chacha20poly1305_enc_vectors[i].input, + chacha20poly1305_enc_vectors[i].ilen, + chacha20poly1305_enc_vectors[i].assoc, + chacha20poly1305_enc_vectors[i].alen, + chacha20poly1305_enc_vectors[i].nonce, + chacha20poly1305_enc_vectors[i].nlen, + chacha20poly1305_enc_vectors[i].key); + if (memcmp(computed_output, + chacha20poly1305_enc_vectors[i].output, + chacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE)) { + pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { + if (chacha20poly1305_enc_vectors[i].nlen != 8) + continue; + memcpy(computed_output, chacha20poly1305_enc_vectors[i].input, + chacha20poly1305_enc_vectors[i].ilen); + sg_init_one(sg_src, computed_output, + chacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE); + ret = chacha20poly1305_encrypt_sg_inplace(sg_src, + chacha20poly1305_enc_vectors[i].ilen, + chacha20poly1305_enc_vectors[i].assoc, + chacha20poly1305_enc_vectors[i].alen, + get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce), + chacha20poly1305_enc_vectors[i].key, + &simd_context); + if (!ret || memcmp(computed_output, + chacha20poly1305_enc_vectors[i].output, + chacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE)) { + pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_put(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + ret = chacha20poly1305_decrypt(computed_output, + chacha20poly1305_dec_vectors[i].input, + chacha20poly1305_dec_vectors[i].ilen, + chacha20poly1305_dec_vectors[i].assoc, + chacha20poly1305_dec_vectors[i].alen, + get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce), + chacha20poly1305_dec_vectors[i].key); + if (!decryption_success(ret, + chacha20poly1305_dec_vectors[i].failure, + memcmp(computed_output, + chacha20poly1305_dec_vectors[i].output, + chacha20poly1305_dec_vectors[i].ilen - + POLY1305_MAC_SIZE))) { + pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { + memcpy(computed_output, chacha20poly1305_dec_vectors[i].input, + chacha20poly1305_dec_vectors[i].ilen); + sg_init_one(sg_src, computed_output, + chacha20poly1305_dec_vectors[i].ilen); + ret = chacha20poly1305_decrypt_sg_inplace(sg_src, + chacha20poly1305_dec_vectors[i].ilen, + chacha20poly1305_dec_vectors[i].assoc, + chacha20poly1305_dec_vectors[i].alen, + get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce), + chacha20poly1305_dec_vectors[i].key, &simd_context); + if (!decryption_success(ret, + chacha20poly1305_dec_vectors[i].failure, + memcmp(computed_output, chacha20poly1305_dec_vectors[i].output, + chacha20poly1305_dec_vectors[i].ilen - + POLY1305_MAC_SIZE))) { + pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_put(&simd_context); + for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + xchacha20poly1305_encrypt(computed_output, + xchacha20poly1305_enc_vectors[i].input, + xchacha20poly1305_enc_vectors[i].ilen, + xchacha20poly1305_enc_vectors[i].assoc, + xchacha20poly1305_enc_vectors[i].alen, + xchacha20poly1305_enc_vectors[i].nonce, + xchacha20poly1305_enc_vectors[i].key); + if (memcmp(computed_output, + xchacha20poly1305_enc_vectors[i].output, + xchacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE)) { + pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + ret = xchacha20poly1305_decrypt(computed_output, + xchacha20poly1305_dec_vectors[i].input, + xchacha20poly1305_dec_vectors[i].ilen, + xchacha20poly1305_dec_vectors[i].assoc, + xchacha20poly1305_dec_vectors[i].alen, + xchacha20poly1305_dec_vectors[i].nonce, + xchacha20poly1305_dec_vectors[i].key); + if (!decryption_success(ret, + xchacha20poly1305_dec_vectors[i].failure, + memcmp(computed_output, + xchacha20poly1305_dec_vectors[i].output, + xchacha20poly1305_dec_vectors[i].ilen - + POLY1305_MAC_SIZE))) { + pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + + simd_get(&simd_context); + for (total_len = POLY1305_MAC_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST) + && total_len <= 1 << 10; ++total_len) { + for (i = 0; i <= total_len; ++i) { + for (j = i; j <= total_len; ++j) { + sg_init_table(sg_src, 3); + sg_set_buf(&sg_src[0], input, i); + sg_set_buf(&sg_src[1], input + i, j - i); + sg_set_buf(&sg_src[2], input + j, total_len - j); + memset(computed_output, 0, total_len); + memset(input, 0, total_len); + + if (!chacha20poly1305_encrypt_sg_inplace(sg_src, + total_len - POLY1305_MAC_SIZE, NULL, 0, + 0, enc_key001, &simd_context)) + goto chunkfail; + chacha20poly1305_encrypt(computed_output, + computed_output, + total_len - POLY1305_MAC_SIZE, NULL, 0, 0, + enc_key001); + if (memcmp(computed_output, input, total_len)) + goto chunkfail;; + if (!chacha20poly1305_decrypt(computed_output, + input, total_len, NULL, 0, 0, enc_key001)) + goto chunkfail; + for (k = 0; k < total_len - POLY1305_MAC_SIZE; ++k) { + if (computed_output[k]) + goto chunkfail; + } + if (!chacha20poly1305_decrypt_sg_inplace(sg_src, + total_len, NULL, 0, 0, enc_key001, + &simd_context)) + goto chunkfail; + for (k = 0; k < total_len - POLY1305_MAC_SIZE; ++k) { + if (input[k]) + goto chunkfail; + } + continue; + + chunkfail: + pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n", + total_len, i, j); + success = false; + } + + } + } + simd_put(&simd_context); + +out: + kfree(computed_output); + kfree(input); + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/curve25519.c b/net/wireguard/crypto/zinc/selftest/curve25519.c new file mode 100644 index 000000000000..0e3e3af06ba4 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/curve25519.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +struct curve25519_test_vector { + u8 private[CURVE25519_KEY_SIZE]; + u8 public[CURVE25519_KEY_SIZE]; + u8 result[CURVE25519_KEY_SIZE]; + bool valid; +}; +static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = { + { + .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, + 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, + 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, + 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, + .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, + 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, + 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, + 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, + .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, + 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, + 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, + 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, + .valid = true + }, + { + .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, + 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, + 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, + 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, + .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, + 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, + 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, + 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, + .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, + 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, + 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, + 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, + .valid = true + }, + { + .private = { 1 }, + .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, + 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, + 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98, + 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f }, + .valid = true + }, + { + .private = { 1 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, + 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, + 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3, + 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 }, + .valid = true + }, + { + .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, + 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, + 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, + 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, + .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, + 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, + 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, + 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, + .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, + 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, + 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, + 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, + .valid = true + }, + { + .private = { 1, 2, 3, 4 }, + .public = { 0 }, + .result = { 0 }, + .valid = false + }, + { + .private = { 2, 4, 6, 8 }, + .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 }, + .result = { 0 }, + .valid = false + }, + { + .private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f }, + .result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2, + 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57, + 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05, + 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 }, + .valid = true + }, + { + .private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 }, + .result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d, + 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12, + 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99, + 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c }, + .valid = true + }, + /* wycheproof - normal case */ + { + .private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda, + 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66, + 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3, + 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba }, + .public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5, + 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9, + 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e, + 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a }, + .result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5, + 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38, + 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e, + 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4, + 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5, + 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49, + 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 }, + .public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5, + 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8, + 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3, + 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 }, + .result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff, + 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d, + 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe, + 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9, + 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39, + 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5, + 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 }, + .public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f, + 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b, + 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c, + 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 }, + .result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53, + 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57, + 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0, + 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc, + 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d, + 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67, + 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c }, + .public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97, + 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f, + 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45, + 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a }, + .result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93, + 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2, + 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44, + 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1, + 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95, + 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99, + 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d }, + .public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27, + 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07, + 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae, + 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c }, + .result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73, + 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2, + 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f, + 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9, + 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd, + 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b, + 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 }, + .public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5, + 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52, + 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8, + 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 }, + .result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86, + 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4, + 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6, + 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 }, + .valid = true + }, + /* wycheproof - public key = 0 */ + { + .private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11, + 0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac, + 0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b, + 0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc }, + .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key = 1 */ + { + .private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61, + 0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea, + 0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f, + 0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab }, + .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04, + 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77, + 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90, + 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 }, + .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97, + 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9, + 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7, + 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36, + 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd, + 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c, + 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 }, + .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e, + 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b, + 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e, + 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed, + 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e, + 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd, + 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 }, + .public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff, + 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00, + 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 }, + .result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f, + 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1, + 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10, + 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3, + 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d, + 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00, + 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 }, + .public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00, + 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff, + 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f }, + .result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8, + 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4, + 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70, + 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3, + 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a, + 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e, + 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 }, + .public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57, + 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c, + 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59, + 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f, + 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42, + 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9, + 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 }, + .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c, + 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5, + 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65, + 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6, + 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4, + 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8, + 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe }, + .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7, + 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca, + 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f, + 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa, + 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3, + 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52, + 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3, + 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e, + 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75, + 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26, + 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea, + 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00, + 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, + .result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8, + 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32, + 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87, + 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c, + 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6, + 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb, + 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 }, + .public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, + 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff, + 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f }, + .result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85, + 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f, + 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0, + 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38, + 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b, + 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c, + 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b, + 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81, + 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3, + 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d, + 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42, + 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98, + 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f }, + .result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c, + 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9, + 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89, + 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29, + 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6, + 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c, + 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f }, + .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75, + 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89, + 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c, + 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f }, + .valid = true + }, + /* wycheproof - public key with low order */ + { + .private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30, + 0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69, + 0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14, + 0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 }, + .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3, + 0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b, + 0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef, + 0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 }, + .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20, + 0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf, + 0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43, + 0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 }, + .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f, + 0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65, + 0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06, + 0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 }, + .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe, + 0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9, + 0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f, + 0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f }, + .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8, + 0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85, + 0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c, + 0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c }, + .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8, + 0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d, + 0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0, + 0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 }, + .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a, + 0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b, + 0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67, + 0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 }, + .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46, + 0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02, + 0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3, + 0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 }, + .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30, + 0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1, + 0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6, + 0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe }, + .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f, + 0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77, + 0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0, + 0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c }, + .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e, + 0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f, + 0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77, + 0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b }, + .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key >= p */ + { + .private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc, + 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1, + 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d, + 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae }, + .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09, + 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde, + 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1, + 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81, + 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a, + 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99, + 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d }, + .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17, + 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35, + 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55, + 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11, + 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b, + 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9, + 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 }, + .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53, + 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e, + 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6, + 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78, + 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2, + 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd, + 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb, + 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40, + 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2, + 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9, + 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60, + 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13, + 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 }, + .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c, + 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3, + 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65, + 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a, + 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7, + 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11, + 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e }, + .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82, + 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4, + 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c, + 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e, + 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a, + 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d, + 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f }, + .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2, + 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60, + 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25, + 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb, + 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97, + 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c, + 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 }, + .public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23, + 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8, + 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69, + 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a, + 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23, + 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b, + 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 }, + .public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b, + 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44, + 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37, + 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80, + 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d, + 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b, + 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 }, + .public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63, + 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae, + 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f, + 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0, + 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd, + 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49, + 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 }, + .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41, + 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0, + 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf, + 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9, + 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa, + 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5, + 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e }, + .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47, + 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3, + 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b, + 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8, + 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98, + 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0, + 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 }, + .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0, + 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1, + 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a, + 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02, + 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4, + 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68, + 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d }, + .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f, + 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2, + 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95, + 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7, + 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06, + 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9, + 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 }, + .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5, + 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0, + 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80, + 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd, + 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4, + 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04, + 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0, + 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac, + 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48, + 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 }, + .valid = true + }, + /* wycheproof - RFC 7748 */ + { + .private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, + 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, + 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, + 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 }, + .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, + 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, + 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, + 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, + .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, + 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, + 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, + 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, + .valid = true + }, + /* wycheproof - RFC 7748 */ + { + .private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, + 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5, + 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, + 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d }, + .public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, + 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c, + 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, + 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 }, + .result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, + 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, + 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, + 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde, + 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8, + 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4, + 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 }, + .result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d, + 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64, + 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd, + 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 }, + .result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8, + 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf, + 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94, + 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d }, + .result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84, + 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62, + 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e, + 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 }, + .result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8, + 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58, + 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02, + 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 }, + .result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9, + 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a, + 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44, + 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b }, + .result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd, + 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22, + 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56, + 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b }, + .result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53, + 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f, + 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18, + 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f }, + .result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55, + 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b, + 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79, + 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f }, + .result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39, + 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c, + 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb, + 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e }, + .result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04, + 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10, + 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58, + 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c }, + .result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3, + 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c, + 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88, + 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 }, + .result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a, + 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49, + 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a, + 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca, + 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c, + 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb, + 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58, + 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7, + 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01, + 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d }, + .result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d, + 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27, + 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b, + 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26, + 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2, + 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44, + 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e }, + .result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6, + 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d, + 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e, + 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61, + 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67, + 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e, + 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c }, + .result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65, + 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce, + 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0, + 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee, + 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d, + 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14, + 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 }, + .result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e, + 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc, + 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5, + 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4, + 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5, + 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c, + 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 }, + .result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b, + 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93, + 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f, + 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 }, + .valid = true + }, + /* wycheproof - private key == -1 (mod order) */ + { + .private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8, + 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 }, + .public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, + 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, + 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, + 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, + .result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, + 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, + 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, + 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, + .valid = true + }, + /* wycheproof - private key == 1 (mod order) on twist */ + { + .private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef, + 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f }, + .public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, + 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, + 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, + 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, + .result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, + 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, + 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, + 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, + .valid = true + } +}; + +static bool __init curve25519_selftest(void) +{ + bool success = true, ret, ret2; + size_t i = 0, j; + u8 in[CURVE25519_KEY_SIZE]; + u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE]; + + for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) { + memset(out, 0, CURVE25519_KEY_SIZE); + ret = curve25519(out, curve25519_test_vectors[i].private, + curve25519_test_vectors[i].public); + if (ret != curve25519_test_vectors[i].valid || + memcmp(out, curve25519_test_vectors[i].result, + CURVE25519_KEY_SIZE)) { + pr_err("curve25519 self-test %zu: FAIL\n", i + 1); + success = false; + } + } + + for (i = 0; i < 5; ++i) { + get_random_bytes(in, sizeof(in)); + ret = curve25519_generate_public(out, in); + ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 }); + if (ret != ret2 || memcmp(out, out2, CURVE25519_KEY_SIZE)) { + pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x", + i + 1); + for (j = CURVE25519_KEY_SIZE; j-- > 0;) + printk(KERN_CONT "%02x", in[j]); + printk(KERN_CONT "\n"); + success = false; + } + } + + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/poly1305.c b/net/wireguard/crypto/zinc/selftest/poly1305.c new file mode 100644 index 000000000000..b4d7a9c2f6ec --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/poly1305.c @@ -0,0 +1,1107 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +struct poly1305_testvec { + const u8 *input, *output, *key; + size_t ilen; +}; + +/* RFC7539 */ +static const u8 input01[] __initconst = { + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x70, 0x68, 0x69, 0x63, 0x20, 0x46, 0x6f, + 0x72, 0x75, 0x6d, 0x20, 0x52, 0x65, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x20, 0x47, 0x72, 0x6f, + 0x75, 0x70 +}; +static const u8 output01[] __initconst = { + 0xa8, 0x06, 0x1d, 0xc1, 0x30, 0x51, 0x36, 0xc6, + 0xc2, 0x2b, 0x8b, 0xaf, 0x0c, 0x01, 0x27, 0xa9 +}; +static const u8 key01[] __initconst = { + 0x85, 0xd6, 0xbe, 0x78, 0x57, 0x55, 0x6d, 0x33, + 0x7f, 0x44, 0x52, 0xfe, 0x42, 0xd5, 0x06, 0xa8, + 0x01, 0x03, 0x80, 0x8a, 0xfb, 0x0d, 0xb2, 0xfd, + 0x4a, 0xbf, 0xf6, 0xaf, 0x41, 0x49, 0xf5, 0x1b +}; + +/* "The Poly1305-AES message-authentication code" */ +static const u8 input02[] __initconst = { + 0xf3, 0xf6 +}; +static const u8 output02[] __initconst = { + 0xf4, 0xc6, 0x33, 0xc3, 0x04, 0x4f, 0xc1, 0x45, + 0xf8, 0x4f, 0x33, 0x5c, 0xb8, 0x19, 0x53, 0xde +}; +static const u8 key02[] __initconst = { + 0x85, 0x1f, 0xc4, 0x0c, 0x34, 0x67, 0xac, 0x0b, + 0xe0, 0x5c, 0xc2, 0x04, 0x04, 0xf3, 0xf7, 0x00, + 0x58, 0x0b, 0x3b, 0x0f, 0x94, 0x47, 0xbb, 0x1e, + 0x69, 0xd0, 0x95, 0xb5, 0x92, 0x8b, 0x6d, 0xbc +}; + +static const u8 input03[] __initconst = { }; +static const u8 output03[] __initconst = { + 0xdd, 0x3f, 0xab, 0x22, 0x51, 0xf1, 0x1a, 0xc7, + 0x59, 0xf0, 0x88, 0x71, 0x29, 0xcc, 0x2e, 0xe7 +}; +static const u8 key03[] __initconst = { + 0xa0, 0xf3, 0x08, 0x00, 0x00, 0xf4, 0x64, 0x00, + 0xd0, 0xc7, 0xe9, 0x07, 0x6c, 0x83, 0x44, 0x03, + 0xdd, 0x3f, 0xab, 0x22, 0x51, 0xf1, 0x1a, 0xc7, + 0x59, 0xf0, 0x88, 0x71, 0x29, 0xcc, 0x2e, 0xe7 +}; + +static const u8 input04[] __initconst = { + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output04[] __initconst = { + 0x0e, 0xe1, 0xc1, 0x6b, 0xb7, 0x3f, 0x0f, 0x4f, + 0xd1, 0x98, 0x81, 0x75, 0x3c, 0x01, 0xcd, 0xbe +}; +static const u8 key04[] __initconst = { + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef +}; + +static const u8 input05[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9 +}; +static const u8 output05[] __initconst = { + 0x51, 0x54, 0xad, 0x0d, 0x2c, 0xb2, 0x6e, 0x01, + 0x27, 0x4f, 0xc5, 0x11, 0x48, 0x49, 0x1f, 0x1b +}; +static const u8 key05[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +/* self-generated vectors exercise "significant" lengths, such that they + * are handled by different code paths */ +static const u8 input06[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf +}; +static const u8 output06[] __initconst = { + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66 +}; +static const u8 key06[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input07[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67 +}; +static const u8 output07[] __initconst = { + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61 +}; +static const u8 key07[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input08[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output08[] __initconst = { + 0xbb, 0xb6, 0x13, 0xb2, 0xb6, 0xd7, 0x53, 0xba, + 0x07, 0x39, 0x5b, 0x91, 0x6a, 0xae, 0xce, 0x15 +}; +static const u8 key08[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input09[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24 +}; +static const u8 output09[] __initconst = { + 0xc7, 0x94, 0xd7, 0x05, 0x7d, 0x17, 0x78, 0xc4, + 0xbb, 0xee, 0x0a, 0x39, 0xb3, 0xd9, 0x73, 0x42 +}; +static const u8 key09[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input10[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output10[] __initconst = { + 0xff, 0xbc, 0xb9, 0xb3, 0x71, 0x42, 0x31, 0x52, + 0xd7, 0xfc, 0xa5, 0xad, 0x04, 0x2f, 0xba, 0xa9 +}; +static const u8 key10[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input11[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66 +}; +static const u8 output11[] __initconst = { + 0x06, 0x9e, 0xd6, 0xb8, 0xef, 0x0f, 0x20, 0x7b, + 0x3e, 0x24, 0x3b, 0xb1, 0x01, 0x9f, 0xe6, 0x32 +}; +static const u8 key11[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input12[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61 +}; +static const u8 output12[] __initconst = { + 0xcc, 0xa3, 0x39, 0xd9, 0xa4, 0x5f, 0xa2, 0x36, + 0x8c, 0x2c, 0x68, 0xb3, 0xa4, 0x17, 0x91, 0x33 +}; +static const u8 key12[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input13[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61, + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output13[] __initconst = { + 0x53, 0xf6, 0xe8, 0x28, 0xa2, 0xf0, 0xfe, 0x0e, + 0xe8, 0x15, 0xbf, 0x0b, 0xd5, 0x84, 0x1a, 0x34 +}; +static const u8 key13[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input14[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61, + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61 +}; +static const u8 output14[] __initconst = { + 0xb8, 0x46, 0xd4, 0x4e, 0x9b, 0xbd, 0x53, 0xce, + 0xdf, 0xfb, 0xfb, 0xb6, 0xb7, 0xfa, 0x49, 0x33 +}; +static const u8 key14[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +/* 4th power of the key spills to 131th bit in SIMD key setup */ +static const u8 input15[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 output15[] __initconst = { + 0x07, 0x14, 0x5a, 0x4c, 0x02, 0xfe, 0x5f, 0xa3, + 0x20, 0x36, 0xde, 0x68, 0xfa, 0xbe, 0x90, 0x66 +}; +static const u8 key15[] __initconst = { + 0xad, 0x62, 0x81, 0x07, 0xe8, 0x35, 0x1d, 0x0f, + 0x2c, 0x23, 0x1a, 0x05, 0xdc, 0x4a, 0x41, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* OpenSSL's poly1305_ieee754.c failed this in final stage */ +static const u8 input16[] __initconst = { + 0x84, 0x23, 0x64, 0xe1, 0x56, 0x33, 0x6c, 0x09, + 0x98, 0xb9, 0x33, 0xa6, 0x23, 0x77, 0x26, 0x18, + 0x0d, 0x9e, 0x3f, 0xdc, 0xbd, 0xe4, 0xcd, 0x5d, + 0x17, 0x08, 0x0f, 0xc3, 0xbe, 0xb4, 0x96, 0x14, + 0xd7, 0x12, 0x2c, 0x03, 0x74, 0x63, 0xff, 0x10, + 0x4d, 0x73, 0xf1, 0x9c, 0x12, 0x70, 0x46, 0x28, + 0xd4, 0x17, 0xc4, 0xc5, 0x4a, 0x3f, 0xe3, 0x0d, + 0x3c, 0x3d, 0x77, 0x14, 0x38, 0x2d, 0x43, 0xb0, + 0x38, 0x2a, 0x50, 0xa5, 0xde, 0xe5, 0x4b, 0xe8, + 0x44, 0xb0, 0x76, 0xe8, 0xdf, 0x88, 0x20, 0x1a, + 0x1c, 0xd4, 0x3b, 0x90, 0xeb, 0x21, 0x64, 0x3f, + 0xa9, 0x6f, 0x39, 0xb5, 0x18, 0xaa, 0x83, 0x40, + 0xc9, 0x42, 0xff, 0x3c, 0x31, 0xba, 0xf7, 0xc9, + 0xbd, 0xbf, 0x0f, 0x31, 0xae, 0x3f, 0xa0, 0x96, + 0xbf, 0x8c, 0x63, 0x03, 0x06, 0x09, 0x82, 0x9f, + 0xe7, 0x2e, 0x17, 0x98, 0x24, 0x89, 0x0b, 0xc8, + 0xe0, 0x8c, 0x31, 0x5c, 0x1c, 0xce, 0x2a, 0x83, + 0x14, 0x4d, 0xbb, 0xff, 0x09, 0xf7, 0x4e, 0x3e, + 0xfc, 0x77, 0x0b, 0x54, 0xd0, 0x98, 0x4a, 0x8f, + 0x19, 0xb1, 0x47, 0x19, 0xe6, 0x36, 0x35, 0x64, + 0x1d, 0x6b, 0x1e, 0xed, 0xf6, 0x3e, 0xfb, 0xf0, + 0x80, 0xe1, 0x78, 0x3d, 0x32, 0x44, 0x54, 0x12, + 0x11, 0x4c, 0x20, 0xde, 0x0b, 0x83, 0x7a, 0x0d, + 0xfa, 0x33, 0xd6, 0xb8, 0x28, 0x25, 0xff, 0xf4, + 0x4c, 0x9a, 0x70, 0xea, 0x54, 0xce, 0x47, 0xf0, + 0x7d, 0xf6, 0x98, 0xe6, 0xb0, 0x33, 0x23, 0xb5, + 0x30, 0x79, 0x36, 0x4a, 0x5f, 0xc3, 0xe9, 0xdd, + 0x03, 0x43, 0x92, 0xbd, 0xde, 0x86, 0xdc, 0xcd, + 0xda, 0x94, 0x32, 0x1c, 0x5e, 0x44, 0x06, 0x04, + 0x89, 0x33, 0x6c, 0xb6, 0x5b, 0xf3, 0x98, 0x9c, + 0x36, 0xf7, 0x28, 0x2c, 0x2f, 0x5d, 0x2b, 0x88, + 0x2c, 0x17, 0x1e, 0x74 +}; +static const u8 output16[] __initconst = { + 0xf2, 0x48, 0x31, 0x2e, 0x57, 0x8d, 0x9d, 0x58, + 0xf8, 0xb7, 0xbb, 0x4d, 0x19, 0x10, 0x54, 0x31 +}; +static const u8 key16[] __initconst = { + 0x95, 0xd5, 0xc0, 0x05, 0x50, 0x3e, 0x51, 0x0d, + 0x8c, 0xd0, 0xaa, 0x07, 0x2c, 0x4a, 0x4d, 0x06, + 0x6e, 0xab, 0xc5, 0x2d, 0x11, 0x65, 0x3d, 0xf4, + 0x7f, 0xbf, 0x63, 0xab, 0x19, 0x8b, 0xcc, 0x26 +}; + +/* AVX2 in OpenSSL's poly1305-x86.pl failed this with 176+32 split */ +static const u8 input17[] __initconst = { + 0x24, 0x8a, 0xc3, 0x10, 0x85, 0xb6, 0xc2, 0xad, + 0xaa, 0xa3, 0x82, 0x59, 0xa0, 0xd7, 0x19, 0x2c, + 0x5c, 0x35, 0xd1, 0xbb, 0x4e, 0xf3, 0x9a, 0xd9, + 0x4c, 0x38, 0xd1, 0xc8, 0x24, 0x79, 0xe2, 0xdd, + 0x21, 0x59, 0xa0, 0x77, 0x02, 0x4b, 0x05, 0x89, + 0xbc, 0x8a, 0x20, 0x10, 0x1b, 0x50, 0x6f, 0x0a, + 0x1a, 0xd0, 0xbb, 0xab, 0x76, 0xe8, 0x3a, 0x83, + 0xf1, 0xb9, 0x4b, 0xe6, 0xbe, 0xae, 0x74, 0xe8, + 0x74, 0xca, 0xb6, 0x92, 0xc5, 0x96, 0x3a, 0x75, + 0x43, 0x6b, 0x77, 0x61, 0x21, 0xec, 0x9f, 0x62, + 0x39, 0x9a, 0x3e, 0x66, 0xb2, 0xd2, 0x27, 0x07, + 0xda, 0xe8, 0x19, 0x33, 0xb6, 0x27, 0x7f, 0x3c, + 0x85, 0x16, 0xbc, 0xbe, 0x26, 0xdb, 0xbd, 0x86, + 0xf3, 0x73, 0x10, 0x3d, 0x7c, 0xf4, 0xca, 0xd1, + 0x88, 0x8c, 0x95, 0x21, 0x18, 0xfb, 0xfb, 0xd0, + 0xd7, 0xb4, 0xbe, 0xdc, 0x4a, 0xe4, 0x93, 0x6a, + 0xff, 0x91, 0x15, 0x7e, 0x7a, 0xa4, 0x7c, 0x54, + 0x44, 0x2e, 0xa7, 0x8d, 0x6a, 0xc2, 0x51, 0xd3, + 0x24, 0xa0, 0xfb, 0xe4, 0x9d, 0x89, 0xcc, 0x35, + 0x21, 0xb6, 0x6d, 0x16, 0xe9, 0xc6, 0x6a, 0x37, + 0x09, 0x89, 0x4e, 0x4e, 0xb0, 0xa4, 0xee, 0xdc, + 0x4a, 0xe1, 0x94, 0x68, 0xe6, 0x6b, 0x81, 0xf2, + 0x71, 0x35, 0x1b, 0x1d, 0x92, 0x1e, 0xa5, 0x51, + 0x04, 0x7a, 0xbc, 0xc6, 0xb8, 0x7a, 0x90, 0x1f, + 0xde, 0x7d, 0xb7, 0x9f, 0xa1, 0x81, 0x8c, 0x11, + 0x33, 0x6d, 0xbc, 0x07, 0x24, 0x4a, 0x40, 0xeb +}; +static const u8 output17[] __initconst = { + 0xbc, 0x93, 0x9b, 0xc5, 0x28, 0x14, 0x80, 0xfa, + 0x99, 0xc6, 0xd6, 0x8c, 0x25, 0x8e, 0xc4, 0x2f +}; +static const u8 key17[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* test vectors from Google */ +static const u8 input18[] __initconst = { }; +static const u8 output18[] __initconst = { + 0x47, 0x10, 0x13, 0x0e, 0x9f, 0x6f, 0xea, 0x8d, + 0x72, 0x29, 0x38, 0x50, 0xa6, 0x67, 0xd8, 0x6c +}; +static const u8 key18[] __initconst = { + 0xc8, 0xaf, 0xaa, 0xc3, 0x31, 0xee, 0x37, 0x2c, + 0xd6, 0x08, 0x2d, 0xe1, 0x34, 0x94, 0x3b, 0x17, + 0x47, 0x10, 0x13, 0x0e, 0x9f, 0x6f, 0xea, 0x8d, + 0x72, 0x29, 0x38, 0x50, 0xa6, 0x67, 0xd8, 0x6c +}; + +static const u8 input19[] __initconst = { + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, + 0x72, 0x6c, 0x64, 0x21 +}; +static const u8 output19[] __initconst = { + 0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, + 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0 +}; +static const u8 key19[] __initconst = { + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, + 0x33, 0x32, 0x2d, 0x62, 0x79, 0x74, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x50, 0x6f, 0x6c, 0x79, 0x31, 0x33, 0x30, 0x35 +}; + +static const u8 input20[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output20[] __initconst = { + 0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, + 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07 +}; +static const u8 key20[] __initconst = { + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, + 0x33, 0x32, 0x2d, 0x62, 0x79, 0x74, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x50, 0x6f, 0x6c, 0x79, 0x31, 0x33, 0x30, 0x35 +}; + +static const u8 input21[] __initconst = { + 0x89, 0xda, 0xb8, 0x0b, 0x77, 0x17, 0xc1, 0xdb, + 0x5d, 0xb4, 0x37, 0x86, 0x0a, 0x3f, 0x70, 0x21, + 0x8e, 0x93, 0xe1, 0xb8, 0xf4, 0x61, 0xfb, 0x67, + 0x7f, 0x16, 0xf3, 0x5f, 0x6f, 0x87, 0xe2, 0xa9, + 0x1c, 0x99, 0xbc, 0x3a, 0x47, 0xac, 0xe4, 0x76, + 0x40, 0xcc, 0x95, 0xc3, 0x45, 0xbe, 0x5e, 0xcc, + 0xa5, 0xa3, 0x52, 0x3c, 0x35, 0xcc, 0x01, 0x89, + 0x3a, 0xf0, 0xb6, 0x4a, 0x62, 0x03, 0x34, 0x27, + 0x03, 0x72, 0xec, 0x12, 0x48, 0x2d, 0x1b, 0x1e, + 0x36, 0x35, 0x61, 0x69, 0x8a, 0x57, 0x8b, 0x35, + 0x98, 0x03, 0x49, 0x5b, 0xb4, 0xe2, 0xef, 0x19, + 0x30, 0xb1, 0x7a, 0x51, 0x90, 0xb5, 0x80, 0xf1, + 0x41, 0x30, 0x0d, 0xf3, 0x0a, 0xdb, 0xec, 0xa2, + 0x8f, 0x64, 0x27, 0xa8, 0xbc, 0x1a, 0x99, 0x9f, + 0xd5, 0x1c, 0x55, 0x4a, 0x01, 0x7d, 0x09, 0x5d, + 0x8c, 0x3e, 0x31, 0x27, 0xda, 0xf9, 0xf5, 0x95 +}; +static const u8 output21[] __initconst = { + 0xc8, 0x5d, 0x15, 0xed, 0x44, 0xc3, 0x78, 0xd6, + 0xb0, 0x0e, 0x23, 0x06, 0x4c, 0x7b, 0xcd, 0x51 +}; +static const u8 key21[] __initconst = { + 0x2d, 0x77, 0x3b, 0xe3, 0x7a, 0xdb, 0x1e, 0x4d, + 0x68, 0x3b, 0xf0, 0x07, 0x5e, 0x79, 0xc4, 0xee, + 0x03, 0x79, 0x18, 0x53, 0x5a, 0x7f, 0x99, 0xcc, + 0xb7, 0x04, 0x0f, 0xb5, 0xf5, 0xf4, 0x3a, 0xea +}; + +static const u8 input22[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x17, 0x03, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x06, 0xdb, 0x1f, 0x1f, 0x36, 0x8d, 0x69, 0x6a, + 0x81, 0x0a, 0x34, 0x9c, 0x0c, 0x71, 0x4c, 0x9a, + 0x5e, 0x78, 0x50, 0xc2, 0x40, 0x7d, 0x72, 0x1a, + 0xcd, 0xed, 0x95, 0xe0, 0x18, 0xd7, 0xa8, 0x52, + 0x66, 0xa6, 0xe1, 0x28, 0x9c, 0xdb, 0x4a, 0xeb, + 0x18, 0xda, 0x5a, 0xc8, 0xa2, 0xb0, 0x02, 0x6d, + 0x24, 0xa5, 0x9a, 0xd4, 0x85, 0x22, 0x7f, 0x3e, + 0xae, 0xdb, 0xb2, 0xe7, 0xe3, 0x5e, 0x1c, 0x66, + 0xcd, 0x60, 0xf9, 0xab, 0xf7, 0x16, 0xdc, 0xc9, + 0xac, 0x42, 0x68, 0x2d, 0xd7, 0xda, 0xb2, 0x87, + 0xa7, 0x02, 0x4c, 0x4e, 0xef, 0xc3, 0x21, 0xcc, + 0x05, 0x74, 0xe1, 0x67, 0x93, 0xe3, 0x7c, 0xec, + 0x03, 0xc5, 0xbd, 0xa4, 0x2b, 0x54, 0xc1, 0x14, + 0xa8, 0x0b, 0x57, 0xaf, 0x26, 0x41, 0x6c, 0x7b, + 0xe7, 0x42, 0x00, 0x5e, 0x20, 0x85, 0x5c, 0x73, + 0xe2, 0x1d, 0xc8, 0xe2, 0xed, 0xc9, 0xd4, 0x35, + 0xcb, 0x6f, 0x60, 0x59, 0x28, 0x00, 0x11, 0xc2, + 0x70, 0xb7, 0x15, 0x70, 0x05, 0x1c, 0x1c, 0x9b, + 0x30, 0x52, 0x12, 0x66, 0x20, 0xbc, 0x1e, 0x27, + 0x30, 0xfa, 0x06, 0x6c, 0x7a, 0x50, 0x9d, 0x53, + 0xc6, 0x0e, 0x5a, 0xe1, 0xb4, 0x0a, 0xa6, 0xe3, + 0x9e, 0x49, 0x66, 0x92, 0x28, 0xc9, 0x0e, 0xec, + 0xb4, 0xa5, 0x0d, 0xb3, 0x2a, 0x50, 0xbc, 0x49, + 0xe9, 0x0b, 0x4f, 0x4b, 0x35, 0x9a, 0x1d, 0xfd, + 0x11, 0x74, 0x9c, 0xd3, 0x86, 0x7f, 0xcf, 0x2f, + 0xb7, 0xbb, 0x6c, 0xd4, 0x73, 0x8f, 0x6a, 0x4a, + 0xd6, 0xf7, 0xca, 0x50, 0x58, 0xf7, 0x61, 0x88, + 0x45, 0xaf, 0x9f, 0x02, 0x0f, 0x6c, 0x3b, 0x96, + 0x7b, 0x8f, 0x4c, 0xd4, 0xa9, 0x1e, 0x28, 0x13, + 0xb5, 0x07, 0xae, 0x66, 0xf2, 0xd3, 0x5c, 0x18, + 0x28, 0x4f, 0x72, 0x92, 0x18, 0x60, 0x62, 0xe1, + 0x0f, 0xd5, 0x51, 0x0d, 0x18, 0x77, 0x53, 0x51, + 0xef, 0x33, 0x4e, 0x76, 0x34, 0xab, 0x47, 0x43, + 0xf5, 0xb6, 0x8f, 0x49, 0xad, 0xca, 0xb3, 0x84, + 0xd3, 0xfd, 0x75, 0xf7, 0x39, 0x0f, 0x40, 0x06, + 0xef, 0x2a, 0x29, 0x5c, 0x8c, 0x7a, 0x07, 0x6a, + 0xd5, 0x45, 0x46, 0xcd, 0x25, 0xd2, 0x10, 0x7f, + 0xbe, 0x14, 0x36, 0xc8, 0x40, 0x92, 0x4a, 0xae, + 0xbe, 0x5b, 0x37, 0x08, 0x93, 0xcd, 0x63, 0xd1, + 0x32, 0x5b, 0x86, 0x16, 0xfc, 0x48, 0x10, 0x88, + 0x6b, 0xc1, 0x52, 0xc5, 0x32, 0x21, 0xb6, 0xdf, + 0x37, 0x31, 0x19, 0x39, 0x32, 0x55, 0xee, 0x72, + 0xbc, 0xaa, 0x88, 0x01, 0x74, 0xf1, 0x71, 0x7f, + 0x91, 0x84, 0xfa, 0x91, 0x64, 0x6f, 0x17, 0xa2, + 0x4a, 0xc5, 0x5d, 0x16, 0xbf, 0xdd, 0xca, 0x95, + 0x81, 0xa9, 0x2e, 0xda, 0x47, 0x92, 0x01, 0xf0, + 0xed, 0xbf, 0x63, 0x36, 0x00, 0xd6, 0x06, 0x6d, + 0x1a, 0xb3, 0x6d, 0x5d, 0x24, 0x15, 0xd7, 0x13, + 0x51, 0xbb, 0xcd, 0x60, 0x8a, 0x25, 0x10, 0x8d, + 0x25, 0x64, 0x19, 0x92, 0xc1, 0xf2, 0x6c, 0x53, + 0x1c, 0xf9, 0xf9, 0x02, 0x03, 0xbc, 0x4c, 0xc1, + 0x9f, 0x59, 0x27, 0xd8, 0x34, 0xb0, 0xa4, 0x71, + 0x16, 0xd3, 0x88, 0x4b, 0xbb, 0x16, 0x4b, 0x8e, + 0xc8, 0x83, 0xd1, 0xac, 0x83, 0x2e, 0x56, 0xb3, + 0x91, 0x8a, 0x98, 0x60, 0x1a, 0x08, 0xd1, 0x71, + 0x88, 0x15, 0x41, 0xd5, 0x94, 0xdb, 0x39, 0x9c, + 0x6a, 0xe6, 0x15, 0x12, 0x21, 0x74, 0x5a, 0xec, + 0x81, 0x4c, 0x45, 0xb0, 0xb0, 0x5b, 0x56, 0x54, + 0x36, 0xfd, 0x6f, 0x13, 0x7a, 0xa1, 0x0a, 0x0c, + 0x0b, 0x64, 0x37, 0x61, 0xdb, 0xd6, 0xf9, 0xa9, + 0xdc, 0xb9, 0x9b, 0x1a, 0x6e, 0x69, 0x08, 0x54, + 0xce, 0x07, 0x69, 0xcd, 0xe3, 0x97, 0x61, 0xd8, + 0x2f, 0xcd, 0xec, 0x15, 0xf0, 0xd9, 0x2d, 0x7d, + 0x8e, 0x94, 0xad, 0xe8, 0xeb, 0x83, 0xfb, 0xe0 +}; +static const u8 output22[] __initconst = { + 0x26, 0x37, 0x40, 0x8f, 0xe1, 0x30, 0x86, 0xea, + 0x73, 0xf9, 0x71, 0xe3, 0x42, 0x5e, 0x28, 0x20 +}; +static const u8 key22[] __initconst = { + 0x99, 0xe5, 0x82, 0x2d, 0xd4, 0x17, 0x3c, 0x99, + 0x5e, 0x3d, 0xae, 0x0d, 0xde, 0xfb, 0x97, 0x74, + 0x3f, 0xde, 0x3b, 0x08, 0x01, 0x34, 0xb3, 0x9f, + 0x76, 0xe9, 0xbf, 0x8d, 0x0e, 0x88, 0xd5, 0x46 +}; + +/* test vectors from Hanno Böck */ +static const u8 input23[] __initconst = { + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0x80, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xc5, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xe3, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xac, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xe6, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x00, 0x00, 0x00, + 0xaf, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xff, 0xff, 0xff, 0xf5, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xff, 0xe7, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x71, 0x92, 0x05, 0xa8, 0x52, 0x1d, + 0xfc +}; +static const u8 output23[] __initconst = { + 0x85, 0x59, 0xb8, 0x76, 0xec, 0xee, 0xd6, 0x6e, + 0xb3, 0x77, 0x98, 0xc0, 0x45, 0x7b, 0xaf, 0xf9 +}; +static const u8 key23[] __initconst = { + 0x7f, 0x1b, 0x02, 0x64, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc +}; + +static const u8 input24[] __initconst = { + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x64 +}; +static const u8 output24[] __initconst = { + 0x00, 0xbd, 0x12, 0x58, 0x97, 0x8e, 0x20, 0x54, + 0x44, 0xc9, 0xaa, 0xaa, 0x82, 0x00, 0x6f, 0xed +}; +static const u8 key24[] __initconst = { + 0xe0, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa +}; + +static const u8 input25[] __initconst = { + 0x02, 0xfc +}; +static const u8 output25[] __initconst = { + 0x06, 0x12, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c +}; +static const u8 key25[] __initconst = { + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c +}; + +static const u8 input26[] __initconst = { + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7a, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x5c, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x6e, 0x7b, 0x00, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x5c, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x6e, 0x7b, 0x00, 0x13, 0x00, 0x00, 0x00, + 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x20, 0x00, 0xef, 0xff, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x64, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, + 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0xef, 0xff, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x7a, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc +}; +static const u8 output26[] __initconst = { + 0x33, 0x20, 0x5b, 0xbf, 0x9e, 0x9f, 0x8f, 0x72, + 0x12, 0xab, 0x9e, 0x2a, 0xb9, 0xb7, 0xe4, 0xa5 +}; +static const u8 key26[] __initconst = { + 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x7b +}; + +static const u8 input27[] __initconst = { + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0xff, 0xff, 0xff, 0xe9, + 0xe9, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, + 0xac, 0xac, 0xac, 0xac, 0x00, 0x00, 0xac, 0xac, + 0xec, 0x01, 0x00, 0xac, 0xac, 0xac, 0x2c, 0xac, + 0xa2, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, + 0xac, 0xac, 0xac, 0xac, 0x64, 0xf2 +}; +static const u8 output27[] __initconst = { + 0x02, 0xee, 0x7c, 0x8c, 0x54, 0x6d, 0xde, 0xb1, + 0xa4, 0x67, 0xe4, 0xc3, 0x98, 0x11, 0x58, 0xb9 +}; +static const u8 key27[] __initconst = { + 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x7f, + 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xcf, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77 +}; + +/* nacl */ +static const u8 input28[] __initconst = { + 0x8e, 0x99, 0x3b, 0x9f, 0x48, 0x68, 0x12, 0x73, + 0xc2, 0x96, 0x50, 0xba, 0x32, 0xfc, 0x76, 0xce, + 0x48, 0x33, 0x2e, 0xa7, 0x16, 0x4d, 0x96, 0xa4, + 0x47, 0x6f, 0xb8, 0xc5, 0x31, 0xa1, 0x18, 0x6a, + 0xc0, 0xdf, 0xc1, 0x7c, 0x98, 0xdc, 0xe8, 0x7b, + 0x4d, 0xa7, 0xf0, 0x11, 0xec, 0x48, 0xc9, 0x72, + 0x71, 0xd2, 0xc2, 0x0f, 0x9b, 0x92, 0x8f, 0xe2, + 0x27, 0x0d, 0x6f, 0xb8, 0x63, 0xd5, 0x17, 0x38, + 0xb4, 0x8e, 0xee, 0xe3, 0x14, 0xa7, 0xcc, 0x8a, + 0xb9, 0x32, 0x16, 0x45, 0x48, 0xe5, 0x26, 0xae, + 0x90, 0x22, 0x43, 0x68, 0x51, 0x7a, 0xcf, 0xea, + 0xbd, 0x6b, 0xb3, 0x73, 0x2b, 0xc0, 0xe9, 0xda, + 0x99, 0x83, 0x2b, 0x61, 0xca, 0x01, 0xb6, 0xde, + 0x56, 0x24, 0x4a, 0x9e, 0x88, 0xd5, 0xf9, 0xb3, + 0x79, 0x73, 0xf6, 0x22, 0xa4, 0x3d, 0x14, 0xa6, + 0x59, 0x9b, 0x1f, 0x65, 0x4c, 0xb4, 0x5a, 0x74, + 0xe3, 0x55, 0xa5 +}; +static const u8 output28[] __initconst = { + 0xf3, 0xff, 0xc7, 0x70, 0x3f, 0x94, 0x00, 0xe5, + 0x2a, 0x7d, 0xfb, 0x4b, 0x3d, 0x33, 0x05, 0xd9 +}; +static const u8 key28[] __initconst = { + 0xee, 0xa6, 0xa7, 0x25, 0x1c, 0x1e, 0x72, 0x91, + 0x6d, 0x11, 0xc2, 0xcb, 0x21, 0x4d, 0x3c, 0x25, + 0x25, 0x39, 0x12, 0x1d, 0x8e, 0x23, 0x4e, 0x65, + 0x2d, 0x65, 0x1f, 0xa4, 0xc8, 0xcf, 0xf8, 0x80 +}; + +/* wrap 2^130-5 */ +static const u8 input29[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 output29[] __initconst = { + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key29[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* wrap 2^128 */ +static const u8 input30[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output30[] __initconst = { + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key30[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; + +/* limb carry */ +static const u8 input31[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output31[] __initconst = { + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key31[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 2^130-5 */ +static const u8 input32[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfb, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 +}; +static const u8 output32[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key32[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 2^130-6 */ +static const u8 input33[] __initconst = { + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 output33[] __initconst = { + 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 key33[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 5*H+L reduction intermediate */ +static const u8 input34[] __initconst = { + 0xe3, 0x35, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0xb9, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x33, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0x79, 0xcd, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output34[] __initconst = { + 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key34[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 5*H+L reduction final */ +static const u8 input35[] __initconst = { + 0xe3, 0x35, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0xb9, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x33, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0x79, 0xcd, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output35[] __initconst = { + 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key35[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const struct poly1305_testvec poly1305_testvecs[] __initconst = { + { input01, output01, key01, sizeof(input01) }, + { input02, output02, key02, sizeof(input02) }, + { input03, output03, key03, sizeof(input03) }, + { input04, output04, key04, sizeof(input04) }, + { input05, output05, key05, sizeof(input05) }, + { input06, output06, key06, sizeof(input06) }, + { input07, output07, key07, sizeof(input07) }, + { input08, output08, key08, sizeof(input08) }, + { input09, output09, key09, sizeof(input09) }, + { input10, output10, key10, sizeof(input10) }, + { input11, output11, key11, sizeof(input11) }, + { input12, output12, key12, sizeof(input12) }, + { input13, output13, key13, sizeof(input13) }, + { input14, output14, key14, sizeof(input14) }, + { input15, output15, key15, sizeof(input15) }, + { input16, output16, key16, sizeof(input16) }, + { input17, output17, key17, sizeof(input17) }, + { input18, output18, key18, sizeof(input18) }, + { input19, output19, key19, sizeof(input19) }, + { input20, output20, key20, sizeof(input20) }, + { input21, output21, key21, sizeof(input21) }, + { input22, output22, key22, sizeof(input22) }, + { input23, output23, key23, sizeof(input23) }, + { input24, output24, key24, sizeof(input24) }, + { input25, output25, key25, sizeof(input25) }, + { input26, output26, key26, sizeof(input26) }, + { input27, output27, key27, sizeof(input27) }, + { input28, output28, key28, sizeof(input28) }, + { input29, output29, key29, sizeof(input29) }, + { input30, output30, key30, sizeof(input30) }, + { input31, output31, key31, sizeof(input31) }, + { input32, output32, key32, sizeof(input32) }, + { input33, output33, key33, sizeof(input33) }, + { input34, output34, key34, sizeof(input34) }, + { input35, output35, key35, sizeof(input35) } +}; + +static bool __init poly1305_selftest(void) +{ + simd_context_t simd_context; + bool success = true; + size_t i, j; + + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(poly1305_testvecs); ++i) { + struct poly1305_ctx poly1305; + u8 out[POLY1305_MAC_SIZE]; + + memset(out, 0, sizeof(out)); + memset(&poly1305, 0, sizeof(poly1305)); + poly1305_init(&poly1305, poly1305_testvecs[i].key); + poly1305_update(&poly1305, poly1305_testvecs[i].input, + poly1305_testvecs[i].ilen, &simd_context); + poly1305_final(&poly1305, out, &simd_context); + if (memcmp(out, poly1305_testvecs[i].output, + POLY1305_MAC_SIZE)) { + pr_err("poly1305 self-test %zu: FAIL\n", i + 1); + success = false; + } + simd_relax(&simd_context); + + if (poly1305_testvecs[i].ilen <= 1) + continue; + + for (j = 1; j < poly1305_testvecs[i].ilen - 1; ++j) { + memset(out, 0, sizeof(out)); + memset(&poly1305, 0, sizeof(poly1305)); + poly1305_init(&poly1305, poly1305_testvecs[i].key); + poly1305_update(&poly1305, poly1305_testvecs[i].input, + j, &simd_context); + poly1305_update(&poly1305, + poly1305_testvecs[i].input + j, + poly1305_testvecs[i].ilen - j, + &simd_context); + poly1305_final(&poly1305, out, &simd_context); + if (memcmp(out, poly1305_testvecs[i].output, + POLY1305_MAC_SIZE)) { + pr_err("poly1305 self-test %zu (split %zu): FAIL\n", + i + 1, j); + success = false; + } + + memset(out, 0, sizeof(out)); + memset(&poly1305, 0, sizeof(poly1305)); + poly1305_init(&poly1305, poly1305_testvecs[i].key); + poly1305_update(&poly1305, poly1305_testvecs[i].input, + j, &simd_context); + poly1305_update(&poly1305, + poly1305_testvecs[i].input + j, + poly1305_testvecs[i].ilen - j, + DONT_USE_SIMD); + poly1305_final(&poly1305, out, &simd_context); + if (memcmp(out, poly1305_testvecs[i].output, + POLY1305_MAC_SIZE)) { + pr_err("poly1305 self-test %zu (split %zu, mixed simd): FAIL\n", + i + 1, j); + success = false; + } + simd_relax(&simd_context); + } + } + simd_put(&simd_context); + + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/run.h b/net/wireguard/crypto/zinc/selftest/run.h new file mode 100644 index 000000000000..4ffaf6089eea --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/run.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _ZINC_SELFTEST_RUN_H +#define _ZINC_SELFTEST_RUN_H + +#include +#include +#include + +static inline bool selftest_run(const char *name, bool (*selftest)(void), + bool *const nobs[], unsigned int nobs_len) +{ + unsigned long set = 0, subset = 0, largest_subset = 0; + unsigned int i; + + BUILD_BUG_ON(!__builtin_constant_p(nobs_len) || + nobs_len >= BITS_PER_LONG); + + if (!IS_ENABLED(CONFIG_ZINC_SELFTEST)) + return true; + + for (i = 0; i < nobs_len; ++i) + set |= ((unsigned long)*nobs[i]) << i; + + do { + for (i = 0; i < nobs_len; ++i) + *nobs[i] = BIT(i) & subset; + if (selftest()) + largest_subset = max(subset, largest_subset); + else + pr_err("%s self-test combination 0x%lx: FAIL\n", name, + subset); + subset = (subset - set) & set; + } while (subset); + + for (i = 0; i < nobs_len; ++i) + *nobs[i] = BIT(i) & largest_subset; + + if (largest_subset == set) + pr_info("%s self-tests: pass\n", name); + + return !WARN_ON(largest_subset != set); +} + +#endif diff --git a/net/wireguard/device.c b/net/wireguard/device.c new file mode 100644 index 000000000000..062490f1b8a7 --- /dev/null +++ b/net/wireguard/device.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "queueing.h" +#include "socket.h" +#include "timers.h" +#include "device.h" +#include "ratelimiter.h" +#include "peer.h" +#include "messages.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(device_list); + +static int wg_open(struct net_device *dev) +{ + struct in_device *dev_v4 = __in_dev_get_rtnl(dev); +#ifndef COMPAT_CANNOT_USE_IN6_DEV_GET + struct inet6_dev *dev_v6 = __in6_dev_get(dev); +#endif + struct wg_device *wg = netdev_priv(dev); + struct wg_peer *peer; + int ret; + + if (dev_v4) { + /* At some point we might put this check near the ip_rt_send_ + * redirect call of ip_forward in net/ipv4/ip_forward.c, similar + * to the current secpath check. + */ + IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); + IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; + } +#ifndef COMPAT_CANNOT_USE_IN6_DEV_GET + if (dev_v6) +#ifndef COMPAT_CANNOT_USE_DEV_CNF + dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; +#else + dev_v6->addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; +#endif +#endif + + mutex_lock(&wg->device_update_lock); + ret = wg_socket_init(wg, wg->incoming_port); + if (ret < 0) + goto out; + list_for_each_entry(peer, &wg->peer_list, peer_list) { + wg_packet_send_staged_packets(peer); + if (peer->persistent_keepalive_interval) + wg_packet_send_keepalive(peer); + } +out: + mutex_unlock(&wg->device_update_lock); + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int wg_pm_notification(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct wg_device *wg; + struct wg_peer *peer; + + /* If the machine is constantly suspending and resuming, as part of + * its normal operation rather than as a somewhat rare event, then we + * don't actually want to clear keys. + */ + if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) + return 0; + + if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) + return 0; + + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + mutex_lock(&wg->device_update_lock); + list_for_each_entry(peer, &wg->peer_list, peer_list) { + del_timer(&peer->timer_zero_key_material); + wg_noise_handshake_clear(&peer->handshake); + wg_noise_keypairs_clear(&peer->keypairs); + } + mutex_unlock(&wg->device_update_lock); + } + rtnl_unlock(); + rcu_barrier(); + return 0; +} + +static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; +#endif + +static int wg_stop(struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + struct wg_peer *peer; + struct sk_buff *skb; + + mutex_lock(&wg->device_update_lock); + list_for_each_entry(peer, &wg->peer_list, peer_list) { + wg_packet_purge_staged_packets(peer); + wg_timers_stop(peer); + wg_noise_handshake_clear(&peer->handshake); + wg_noise_keypairs_clear(&peer->keypairs); + wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); + } + mutex_unlock(&wg->device_update_lock); + while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) + kfree_skb(skb); + atomic_set(&wg->handshake_queue_len, 0); + wg_socket_reinit(wg, NULL, NULL); + return 0; +} + +static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + struct sk_buff_head packets; + struct wg_peer *peer; + struct sk_buff *next; + sa_family_t family; + u32 mtu; + int ret; + + if (unlikely(!wg_check_packet_protocol(skb))) { + ret = -EPROTONOSUPPORT; + net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); + goto err; + } + + peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); + if (unlikely(!peer)) { + ret = -ENOKEY; + if (skb->protocol == htons(ETH_P_IP)) + net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", + dev->name, &ip_hdr(skb)->daddr); + else if (skb->protocol == htons(ETH_P_IPV6)) + net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", + dev->name, &ipv6_hdr(skb)->daddr); + goto err_icmp; + } + + family = READ_ONCE(peer->endpoint.addr.sa_family); + if (unlikely(family != AF_INET && family != AF_INET6)) { + ret = -EDESTADDRREQ; + net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", + dev->name, peer->internal_id); + goto err_peer; + } + + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + + __skb_queue_head_init(&packets); + if (!skb_is_gso(skb)) { + skb_mark_not_on_list(skb); + } else { + struct sk_buff *segs = skb_gso_segment(skb, 0); + + if (IS_ERR(segs)) { + ret = PTR_ERR(segs); + goto err_peer; + } + dev_kfree_skb(skb); + skb = segs; + } + + skb_list_walk_safe(skb, skb, next) { + skb_mark_not_on_list(skb); + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + continue; + + /* We only need to keep the original dst around for icmp, + * so at this point we're in a position to drop it. + */ + skb_dst_drop(skb); + + PACKET_CB(skb)->mtu = mtu; + + __skb_queue_tail(&packets, skb); + } + + spin_lock_bh(&peer->staged_packet_queue.lock); + /* If the queue is getting too big, we start removing the oldest packets + * until it's small again. We do this before adding the new packet, so + * we don't remove GSO segments that are in excess. + */ + while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { + dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); + ++dev->stats.tx_dropped; + } + skb_queue_splice_tail(&packets, &peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); + + wg_packet_send_staged_packets(peer); + + wg_peer_put(peer); + return NETDEV_TX_OK; + +err_peer: + wg_peer_put(peer); +err_icmp: + if (skb->protocol == htons(ETH_P_IP)) + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); +err: + ++dev->stats.tx_errors; + kfree_skb(skb); + return ret; +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = wg_open, + .ndo_stop = wg_stop, + .ndo_start_xmit = wg_xmit, + .ndo_get_stats64 = ip_tunnel_get_stats64 +}; + +static void wg_destruct(struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + + rtnl_lock(); + list_del(&wg->device_list); + rtnl_unlock(); + mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); + wg->incoming_port = 0; + wg_socket_reinit(wg, NULL, NULL); + /* The final references are cleared in the below calls to destroy_workqueue. */ + wg_peer_remove_all(wg); + destroy_workqueue(wg->handshake_receive_wq); + destroy_workqueue(wg->handshake_send_wq); + destroy_workqueue(wg->packet_crypt_wq); + wg_packet_queue_free(&wg->handshake_queue, true); + wg_packet_queue_free(&wg->decrypt_queue, false); + wg_packet_queue_free(&wg->encrypt_queue, false); + rcu_barrier(); /* Wait for all the peers to be actually freed. */ + wg_ratelimiter_uninit(); + memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); + free_percpu(dev->tstats); + kvfree(wg->index_hashtable); + kvfree(wg->peer_hashtable); + mutex_unlock(&wg->device_update_lock); + + pr_debug("%s: Interface destroyed\n", dev->name); + free_netdev(dev); +} + +static const struct device_type device_type = { .name = KBUILD_MODNAME }; + +static void wg_setup(struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; + const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + + max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); + + dev->netdev_ops = &netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->needed_headroom = DATA_PACKET_HEAD_ROOM; + dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); + dev->type = ARPHRD_NONE; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; +#ifndef COMPAT_CANNOT_USE_IFF_NO_QUEUE + dev->priv_flags |= IFF_NO_QUEUE; +#else + dev->tx_queue_len = 0; +#endif + dev->features |= NETIF_F_LLTX; + dev->features |= WG_NETDEV_FEATURES; + dev->hw_features |= WG_NETDEV_FEATURES; + dev->hw_enc_features |= WG_NETDEV_FEATURES; + dev->mtu = ETH_DATA_LEN - overhead; +#ifndef COMPAT_CANNOT_USE_MAX_MTU + dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; +#endif + + SET_NETDEV_DEVTYPE(dev, &device_type); + + /* We need to keep the dst around in case of icmp replies. */ + netif_keep_dst(dev); + + memset(wg, 0, sizeof(*wg)); + wg->dev = dev; +} + +static int wg_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct wg_device *wg = netdev_priv(dev); + int ret = -ENOMEM; + + rcu_assign_pointer(wg->creating_net, src_net); + init_rwsem(&wg->static_identity.lock); + mutex_init(&wg->socket_update_lock); + mutex_init(&wg->device_update_lock); + wg_allowedips_init(&wg->peer_allowedips); + wg_cookie_checker_init(&wg->cookie_checker, wg); + INIT_LIST_HEAD(&wg->peer_list); + wg->device_update_gen = 1; + + wg->peer_hashtable = wg_pubkey_hashtable_alloc(); + if (!wg->peer_hashtable) + return ret; + + wg->index_hashtable = wg_index_hashtable_alloc(); + if (!wg->index_hashtable) + goto err_free_peer_hashtable; + + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + goto err_free_index_hashtable; + + wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", + WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); + if (!wg->handshake_receive_wq) + goto err_free_tstats; + + wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", + WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); + if (!wg->handshake_send_wq) + goto err_destroy_handshake_receive; + + wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); + if (!wg->packet_crypt_wq) + goto err_destroy_handshake_send; + + ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, + MAX_QUEUED_PACKETS); + if (ret < 0) + goto err_destroy_packet_crypt; + + ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, + MAX_QUEUED_PACKETS); + if (ret < 0) + goto err_free_encrypt_queue; + + ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, + MAX_QUEUED_INCOMING_HANDSHAKES); + if (ret < 0) + goto err_free_decrypt_queue; + + ret = wg_ratelimiter_init(); + if (ret < 0) + goto err_free_handshake_queue; + + ret = register_netdevice(dev); + if (ret < 0) + goto err_uninit_ratelimiter; + + list_add(&wg->device_list, &device_list); + + /* We wait until the end to assign priv_destructor, so that + * register_netdevice doesn't call it for us if it fails. + */ + dev->priv_destructor = wg_destruct; + + pr_debug("%s: Interface created\n", dev->name); + return ret; + +err_uninit_ratelimiter: + wg_ratelimiter_uninit(); +err_free_handshake_queue: + wg_packet_queue_free(&wg->handshake_queue, false); +err_free_decrypt_queue: + wg_packet_queue_free(&wg->decrypt_queue, false); +err_free_encrypt_queue: + wg_packet_queue_free(&wg->encrypt_queue, false); +err_destroy_packet_crypt: + destroy_workqueue(wg->packet_crypt_wq); +err_destroy_handshake_send: + destroy_workqueue(wg->handshake_send_wq); +err_destroy_handshake_receive: + destroy_workqueue(wg->handshake_receive_wq); +err_free_tstats: + free_percpu(dev->tstats); +err_free_index_hashtable: + kvfree(wg->index_hashtable); +err_free_peer_hashtable: + kvfree(wg->peer_hashtable); + return ret; +} + +static struct rtnl_link_ops link_ops __read_mostly = { + .kind = KBUILD_MODNAME, + .priv_size = sizeof(struct wg_device), + .setup = wg_setup, + .newlink = wg_newlink, +}; + +static void wg_netns_pre_exit(struct net *net) +{ + struct wg_device *wg; + struct wg_peer *peer; + + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + if (rcu_access_pointer(wg->creating_net) == net) { + pr_debug("%s: Creating namespace exiting\n", wg->dev->name); + netif_carrier_off(wg->dev); + mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); + wg_socket_reinit(wg, NULL, NULL); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); + mutex_unlock(&wg->device_update_lock); + } + } + rtnl_unlock(); +} + +static struct pernet_operations pernet_ops = { + .pre_exit = wg_netns_pre_exit +}; + +int __init wg_device_init(void) +{ + int ret; + +#ifdef CONFIG_PM_SLEEP + ret = register_pm_notifier(&pm_notifier); + if (ret) + return ret; +#endif + + ret = register_pernet_device(&pernet_ops); + if (ret) + goto error_pm; + + ret = rtnl_link_register(&link_ops); + if (ret) + goto error_pernet; + + return 0; + +error_pernet: + unregister_pernet_device(&pernet_ops); +error_pm: +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&pm_notifier); +#endif + return ret; +} + +void wg_device_uninit(void) +{ + rtnl_link_unregister(&link_ops); + unregister_pernet_device(&pernet_ops); +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&pm_notifier); +#endif + rcu_barrier(); +} diff --git a/net/wireguard/device.h b/net/wireguard/device.h new file mode 100644 index 000000000000..43c7cebbf50b --- /dev/null +++ b/net/wireguard/device.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_DEVICE_H +#define _WG_DEVICE_H + +#include "noise.h" +#include "allowedips.h" +#include "peerlookup.h" +#include "cookie.h" + +#include +#include +#include +#include +#include +#include + +struct wg_device; + +struct multicore_worker { + void *ptr; + struct work_struct work; +}; + +struct crypt_queue { + struct ptr_ring ring; + struct multicore_worker __percpu *worker; + int last_cpu; +}; + +struct prev_queue { + struct sk_buff *head, *tail, *peeked; + struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. + atomic_t count; +}; + +struct wg_device { + struct net_device *dev; + struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue; + struct sock __rcu *sock4, *sock6; + struct net __rcu *creating_net; + struct noise_static_identity static_identity; + struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq; + struct cookie_checker cookie_checker; + struct pubkey_hashtable *peer_hashtable; + struct index_hashtable *index_hashtable; + struct allowedips peer_allowedips; + struct mutex device_update_lock, socket_update_lock; + struct list_head device_list, peer_list; + atomic_t handshake_queue_len; + unsigned int num_peers, device_update_gen; + u32 fwmark; + u16 incoming_port; +}; + +int wg_device_init(void); +void wg_device_uninit(void); + +#endif /* _WG_DEVICE_H */ diff --git a/net/wireguard/main.c b/net/wireguard/main.c new file mode 100644 index 000000000000..d5ce491e822e --- /dev/null +++ b/net/wireguard/main.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "version.h" +#include "device.h" +#include "noise.h" +#include "queueing.h" +#include "ratelimiter.h" +#include "netlink.h" +#include "uapi/wireguard.h" +#include "crypto/zinc.h" + +#include +#include +#include +#include + +static int __init wg_mod_init(void) +{ + int ret; + + if ((ret = chacha20_mod_init()) || (ret = poly1305_mod_init()) || + (ret = chacha20poly1305_mod_init()) || (ret = blake2s_mod_init()) || + (ret = curve25519_mod_init())) + return ret; + + ret = wg_allowedips_slab_init(); + if (ret < 0) + goto err_allowedips; + +#ifdef DEBUG + ret = -ENOTRECOVERABLE; + if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || + !wg_ratelimiter_selftest()) + goto err_peer; +#endif + wg_noise_init(); + + ret = wg_peer_init(); + if (ret < 0) + goto err_peer; + + ret = wg_device_init(); + if (ret < 0) + goto err_device; + + ret = wg_genetlink_init(); + if (ret < 0) + goto err_netlink; + + pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n"); + pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved.\n"); + + return 0; + +err_netlink: + wg_device_uninit(); +err_device: + wg_peer_uninit(); +err_peer: + wg_allowedips_slab_uninit(); +err_allowedips: + return ret; +} + +static void __exit wg_mod_exit(void) +{ + wg_genetlink_uninit(); + wg_device_uninit(); + wg_peer_uninit(); + wg_allowedips_slab_uninit(); +} + +module_init(wg_mod_init); +module_exit(wg_mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("WireGuard secure network tunnel"); +MODULE_AUTHOR("Jason A. Donenfeld "); +MODULE_VERSION(WIREGUARD_VERSION); +MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME); +MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME); +MODULE_INFO(intree, "Y"); diff --git a/net/wireguard/messages.h b/net/wireguard/messages.h new file mode 100644 index 000000000000..1d1ed18f11f8 --- /dev/null +++ b/net/wireguard/messages.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_MESSAGES_H +#define _WG_MESSAGES_H + +#include +#include +#include + +#include +#include +#include + +enum noise_lengths { + NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE, + NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE, + NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32), + NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE, + NOISE_HASH_LEN = BLAKE2S_HASH_SIZE +}; + +#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN) + +enum cookie_values { + COOKIE_SECRET_MAX_AGE = 2 * 60, + COOKIE_SECRET_LATENCY = 5, + COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE, + COOKIE_LEN = 16 +}; + +enum counter_values { + COUNTER_BITS_TOTAL = 8192, + COUNTER_REDUNDANT_BITS = BITS_PER_LONG, + COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS +}; + +enum limits { + REKEY_AFTER_MESSAGES = 1ULL << 60, + REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1, + REKEY_TIMEOUT = 5, + REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3, + REKEY_AFTER_TIME = 120, + REJECT_AFTER_TIME = 180, + INITIATIONS_PER_SECOND = 50, + MAX_PEERS_PER_DEVICE = 1U << 20, + KEEPALIVE_TIMEOUT = 10, + MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT, + MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */ + MAX_STAGED_PACKETS = 128, + MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */ +}; + +enum message_type { + MESSAGE_INVALID = 0, + MESSAGE_HANDSHAKE_INITIATION = 1, + MESSAGE_HANDSHAKE_RESPONSE = 2, + MESSAGE_HANDSHAKE_COOKIE = 3, + MESSAGE_DATA = 4 +}; + +struct message_header { + /* The actual layout of this that we want is: + * u8 type + * u8 reserved_zero[3] + * + * But it turns out that by encoding this as little endian, + * we achieve the same thing, and it makes checking faster. + */ + __le32 type; +}; + +struct message_macs { + u8 mac1[COOKIE_LEN]; + u8 mac2[COOKIE_LEN]; +}; + +struct message_handshake_initiation { + struct message_header header; + __le32 sender_index; + u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; + u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)]; + u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)]; + struct message_macs macs; +}; + +struct message_handshake_response { + struct message_header header; + __le32 sender_index; + __le32 receiver_index; + u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; + u8 encrypted_nothing[noise_encrypted_len(0)]; + struct message_macs macs; +}; + +struct message_handshake_cookie { + struct message_header header; + __le32 receiver_index; + u8 nonce[COOKIE_NONCE_LEN]; + u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)]; +}; + +struct message_data { + struct message_header header; + __le32 key_idx; + __le64 counter; + u8 encrypted_data[]; +}; + +#define message_data_len(plain_len) \ + (noise_encrypted_len(plain_len) + sizeof(struct message_data)) + +enum message_alignments { + MESSAGE_PADDING_MULTIPLE = 16, + MESSAGE_MINIMUM_LENGTH = message_data_len(0) +}; + +#define SKB_HEADER_LEN \ + (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ + sizeof(struct udphdr) + NET_SKB_PAD) +#define DATA_PACKET_HEAD_ROOM \ + ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4) + +enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ }; + +#endif /* _WG_MESSAGES_H */ diff --git a/net/wireguard/netlink.c b/net/wireguard/netlink.c new file mode 100644 index 000000000000..ef239ab1e2d6 --- /dev/null +++ b/net/wireguard/netlink.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "netlink.h" +#include "device.h" +#include "peer.h" +#include "socket.h" +#include "queueing.h" +#include "messages.h" +#include "uapi/wireguard.h" +#include +#include +#include +#include + +static struct genl_family genl_family; + +static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { + [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, + [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, + [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, + [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, + [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, + [WGDEVICE_A_PEERS] = { .type = NLA_NESTED } +}; + +static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { + [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), + [WGPEER_A_FLAGS] = { .type = NLA_U32 }, + [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)), + [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, + [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), + [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, + [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, + [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, + [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 } +}; + +static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { + [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, + [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)), + [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } +}; + +static struct wg_device *lookup_interface(struct nlattr **attrs, + struct sk_buff *skb) +{ + struct net_device *dev = NULL; + + if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME]) + return ERR_PTR(-EBADR); + if (attrs[WGDEVICE_A_IFINDEX]) + dev = dev_get_by_index(sock_net(skb->sk), + nla_get_u32(attrs[WGDEVICE_A_IFINDEX])); + else if (attrs[WGDEVICE_A_IFNAME]) + dev = dev_get_by_name(sock_net(skb->sk), + nla_data(attrs[WGDEVICE_A_IFNAME])); + if (!dev) + return ERR_PTR(-ENODEV); + if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind || + strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) { + dev_put(dev); + return ERR_PTR(-EOPNOTSUPP); + } + return netdev_priv(dev); +} + +static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr, + int family) +{ + struct nlattr *allowedip_nest; + + allowedip_nest = nla_nest_start(skb, 0); + if (!allowedip_nest) + return -EMSGSIZE; + + if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) || + nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) || + nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ? + sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) { + nla_nest_cancel(skb, allowedip_nest); + return -EMSGSIZE; + } + + nla_nest_end(skb, allowedip_nest); + return 0; +} + +struct dump_ctx { + struct wg_device *wg; + struct wg_peer *next_peer; + u64 allowedips_seq; + struct allowedips_node *next_allowedip; +}; + +#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args) + +static int +get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) +{ + + struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0); + struct allowedips_node *allowedips_node = ctx->next_allowedip; + bool fail; + + if (!peer_nest) + return -EMSGSIZE; + + down_read(&peer->handshake.lock); + fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN, + peer->handshake.remote_static); + up_read(&peer->handshake.lock); + if (fail) + goto err; + + if (!allowedips_node) { + const struct __kernel_timespec last_handshake = { + .tv_sec = peer->walltime_last_handshake.tv_sec, + .tv_nsec = peer->walltime_last_handshake.tv_nsec + }; + + down_read(&peer->handshake.lock); + fail = nla_put(skb, WGPEER_A_PRESHARED_KEY, + NOISE_SYMMETRIC_KEY_LEN, + peer->handshake.preshared_key); + up_read(&peer->handshake.lock); + if (fail) + goto err; + + if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME, + sizeof(last_handshake), &last_handshake) || + nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, + peer->persistent_keepalive_interval) || + nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes, + WGPEER_A_UNSPEC) || + nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes, + WGPEER_A_UNSPEC) || + nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1)) + goto err; + + read_lock_bh(&peer->endpoint_lock); + if (peer->endpoint.addr.sa_family == AF_INET) + fail = nla_put(skb, WGPEER_A_ENDPOINT, + sizeof(peer->endpoint.addr4), + &peer->endpoint.addr4); + else if (peer->endpoint.addr.sa_family == AF_INET6) + fail = nla_put(skb, WGPEER_A_ENDPOINT, + sizeof(peer->endpoint.addr6), + &peer->endpoint.addr6); + read_unlock_bh(&peer->endpoint_lock); + if (fail) + goto err; + allowedips_node = + list_first_entry_or_null(&peer->allowedips_list, + struct allowedips_node, peer_list); + } + if (!allowedips_node) + goto no_allowedips; + if (!ctx->allowedips_seq) + ctx->allowedips_seq = peer->device->peer_allowedips.seq; + else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) + goto no_allowedips; + + allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); + if (!allowedips_nest) + goto err; + + list_for_each_entry_from(allowedips_node, &peer->allowedips_list, + peer_list) { + u8 cidr, ip[16] __aligned(__alignof(u64)); + int family; + + family = wg_allowedips_read_node(allowedips_node, ip, &cidr); + if (get_allowedips(skb, ip, cidr, family)) { + nla_nest_end(skb, allowedips_nest); + nla_nest_end(skb, peer_nest); + ctx->next_allowedip = allowedips_node; + return -EMSGSIZE; + } + } + nla_nest_end(skb, allowedips_nest); +no_allowedips: + nla_nest_end(skb, peer_nest); + ctx->next_allowedip = NULL; + ctx->allowedips_seq = 0; + return 0; +err: + nla_nest_cancel(skb, peer_nest); + return -EMSGSIZE; +} + +static int wg_get_device_start(struct netlink_callback *cb) +{ + struct wg_device *wg; + + wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb); + if (IS_ERR(wg)) + return PTR_ERR(wg); + DUMP_CTX(cb)->wg = wg; + return 0; +} + +static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct wg_peer *peer, *next_peer_cursor; + struct dump_ctx *ctx = DUMP_CTX(cb); + struct wg_device *wg = ctx->wg; + struct nlattr *peers_nest; + int ret = -EMSGSIZE; + bool done = true; + void *hdr; + + rtnl_lock(); + mutex_lock(&wg->device_update_lock); + cb->seq = wg->device_update_gen; + next_peer_cursor = ctx->next_peer; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE); + if (!hdr) + goto out; + genl_dump_check_consistent(cb, hdr); + + if (!ctx->next_peer) { + if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT, + wg->incoming_port) || + nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) || + nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) || + nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name)) + goto out; + + down_read(&wg->static_identity.lock); + if (wg->static_identity.has_identity) { + if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY, + NOISE_PUBLIC_KEY_LEN, + wg->static_identity.static_private) || + nla_put(skb, WGDEVICE_A_PUBLIC_KEY, + NOISE_PUBLIC_KEY_LEN, + wg->static_identity.static_public)) { + up_read(&wg->static_identity.lock); + goto out; + } + } + up_read(&wg->static_identity.lock); + } + + peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS); + if (!peers_nest) + goto out; + ret = 0; + /* If the last cursor was removed via list_del_init in peer_remove, then + * we just treat this the same as there being no more peers left. The + * reason is that seq_nr should indicate to userspace that this isn't a + * coherent dump anyway, so they'll try again. + */ + if (list_empty(&wg->peer_list) || + (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { + nla_nest_cancel(skb, peers_nest); + goto out; + } + lockdep_assert_held(&wg->device_update_lock); + peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); + list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { + if (get_peer(peer, skb, ctx)) { + done = false; + break; + } + next_peer_cursor = peer; + } + nla_nest_end(skb, peers_nest); + +out: + if (!ret && !done && next_peer_cursor) + wg_peer_get(next_peer_cursor); + wg_peer_put(ctx->next_peer); + mutex_unlock(&wg->device_update_lock); + rtnl_unlock(); + + if (ret) { + genlmsg_cancel(skb, hdr); + return ret; + } + genlmsg_end(skb, hdr); + if (done) { + ctx->next_peer = NULL; + return 0; + } + ctx->next_peer = next_peer_cursor; + return skb->len; + + /* At this point, we can't really deal ourselves with safely zeroing out + * the private key material after usage. This will need an additional API + * in the kernel for marking skbs as zero_on_free. + */ +} + +static int wg_get_device_done(struct netlink_callback *cb) +{ + struct dump_ctx *ctx = DUMP_CTX(cb); + + if (ctx->wg) + dev_put(ctx->wg->dev); + wg_peer_put(ctx->next_peer); + return 0; +} + +static int set_port(struct wg_device *wg, u16 port) +{ + struct wg_peer *peer; + + if (wg->incoming_port == port) + return 0; + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); + if (!netif_running(wg->dev)) { + wg->incoming_port = port; + return 0; + } + return wg_socket_init(wg, port); +} + +static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs) +{ + int ret = -EINVAL; + u16 family; + u8 cidr; + + if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] || + !attrs[WGALLOWEDIP_A_CIDR_MASK]) + return ret; + family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]); + cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]); + + if (family == AF_INET && cidr <= 32 && + nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) + ret = wg_allowedips_insert_v4( + &peer->device->peer_allowedips, + nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, + &peer->device->device_update_lock); + else if (family == AF_INET6 && cidr <= 128 && + nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) + ret = wg_allowedips_insert_v6( + &peer->device->peer_allowedips, + nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, + &peer->device->device_update_lock); + + return ret; +} + +static int set_peer(struct wg_device *wg, struct nlattr **attrs) +{ + u8 *public_key = NULL, *preshared_key = NULL; + struct wg_peer *peer = NULL; + u32 flags = 0; + int ret; + + ret = -EINVAL; + if (attrs[WGPEER_A_PUBLIC_KEY] && + nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN) + public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]); + else + goto out; + if (attrs[WGPEER_A_PRESHARED_KEY] && + nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN) + preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]); + + if (attrs[WGPEER_A_FLAGS]) + flags = nla_get_u32(attrs[WGPEER_A_FLAGS]); + ret = -EOPNOTSUPP; + if (flags & ~__WGPEER_F_ALL) + goto out; + + ret = -EPFNOSUPPORT; + if (attrs[WGPEER_A_PROTOCOL_VERSION]) { + if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1) + goto out; + } + + peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, + nla_data(attrs[WGPEER_A_PUBLIC_KEY])); + ret = 0; + if (!peer) { /* Peer doesn't exist yet. Add a new one. */ + if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY)) + goto out; + + /* The peer is new, so there aren't allowed IPs to remove. */ + flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS; + + down_read(&wg->static_identity.lock); + if (wg->static_identity.has_identity && + !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]), + wg->static_identity.static_public, + NOISE_PUBLIC_KEY_LEN)) { + /* We silently ignore peers that have the same public + * key as the device. The reason we do it silently is + * that we'd like for people to be able to reuse the + * same set of API calls across peers. + */ + up_read(&wg->static_identity.lock); + ret = 0; + goto out; + } + up_read(&wg->static_identity.lock); + + peer = wg_peer_create(wg, public_key, preshared_key); + if (IS_ERR(peer)) { + ret = PTR_ERR(peer); + peer = NULL; + goto out; + } + /* Take additional reference, as though we've just been + * looked up. + */ + wg_peer_get(peer); + } + + if (flags & WGPEER_F_REMOVE_ME) { + wg_peer_remove(peer); + goto out; + } + + if (preshared_key) { + down_write(&peer->handshake.lock); + memcpy(&peer->handshake.preshared_key, preshared_key, + NOISE_SYMMETRIC_KEY_LEN); + up_write(&peer->handshake.lock); + } + + if (attrs[WGPEER_A_ENDPOINT]) { + struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); + size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + + if ((len == sizeof(struct sockaddr_in) && + addr->sa_family == AF_INET) || + (len == sizeof(struct sockaddr_in6) && + addr->sa_family == AF_INET6)) { + struct endpoint endpoint = { { { 0 } } }; + + memcpy(&endpoint.addr, addr, len); + wg_socket_set_peer_endpoint(peer, &endpoint); + } + } + + if (flags & WGPEER_F_REPLACE_ALLOWEDIPS) + wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer, + &wg->device_update_lock); + + if (attrs[WGPEER_A_ALLOWEDIPS]) { + struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1]; + int rem; + + nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { + ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX, + attr, allowedip_policy, NULL); + if (ret < 0) + goto out; + ret = set_allowedip(peer, allowedip); + if (ret < 0) + goto out; + } + } + + if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) { + const u16 persistent_keepalive_interval = nla_get_u16( + attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]); + const bool send_keepalive = + !peer->persistent_keepalive_interval && + persistent_keepalive_interval && + netif_running(wg->dev); + + peer->persistent_keepalive_interval = persistent_keepalive_interval; + if (send_keepalive) + wg_packet_send_keepalive(peer); + } + + if (netif_running(wg->dev)) + wg_packet_send_staged_packets(peer); + +out: + wg_peer_put(peer); + if (attrs[WGPEER_A_PRESHARED_KEY]) + memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]), + nla_len(attrs[WGPEER_A_PRESHARED_KEY])); + return ret; +} + +static int wg_set_device(struct sk_buff *skb, struct genl_info *info) +{ + struct wg_device *wg = lookup_interface(info->attrs, skb); + u32 flags = 0; + int ret; + + if (IS_ERR(wg)) { + ret = PTR_ERR(wg); + goto out_nodev; + } + + rtnl_lock(); + mutex_lock(&wg->device_update_lock); + + if (info->attrs[WGDEVICE_A_FLAGS]) + flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]); + ret = -EOPNOTSUPP; + if (flags & ~__WGDEVICE_F_ALL) + goto out; + + if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { + struct net *net; + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; + rcu_read_unlock(); + if (ret) + goto out; + } + + ++wg->device_update_gen; + + if (info->attrs[WGDEVICE_A_FWMARK]) { + struct wg_peer *peer; + + wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); + } + + if (info->attrs[WGDEVICE_A_LISTEN_PORT]) { + ret = set_port(wg, + nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT])); + if (ret) + goto out; + } + + if (flags & WGDEVICE_F_REPLACE_PEERS) + wg_peer_remove_all(wg); + + if (info->attrs[WGDEVICE_A_PRIVATE_KEY] && + nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) == + NOISE_PUBLIC_KEY_LEN) { + u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]); + u8 public_key[NOISE_PUBLIC_KEY_LEN]; + struct wg_peer *peer, *temp; + + if (!crypto_memneq(wg->static_identity.static_private, + private_key, NOISE_PUBLIC_KEY_LEN)) + goto skip_set_private_key; + + /* We remove before setting, to prevent race, which means doing + * two 25519-genpub ops. + */ + if (curve25519_generate_public(public_key, private_key)) { + peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, + public_key); + if (peer) { + wg_peer_put(peer); + wg_peer_remove(peer); + } + } + + down_write(&wg->static_identity.lock); + wg_noise_set_static_identity_private_key(&wg->static_identity, + private_key); + list_for_each_entry_safe(peer, temp, &wg->peer_list, + peer_list) { + wg_noise_precompute_static_static(peer); + wg_noise_expire_current_peer_keypairs(peer); + } + wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); + up_write(&wg->static_identity.lock); + } +skip_set_private_key: + + if (info->attrs[WGDEVICE_A_PEERS]) { + struct nlattr *attr, *peer[WGPEER_A_MAX + 1]; + int rem; + + nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) { + ret = nla_parse_nested(peer, WGPEER_A_MAX, attr, + peer_policy, NULL); + if (ret < 0) + goto out; + ret = set_peer(wg, peer); + if (ret < 0) + goto out; + } + } + ret = 0; + +out: + mutex_unlock(&wg->device_update_lock); + rtnl_unlock(); + dev_put(wg->dev); +out_nodev: + if (info->attrs[WGDEVICE_A_PRIVATE_KEY]) + memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]), + nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY])); + return ret; +} + +#ifndef COMPAT_CANNOT_USE_CONST_GENL_OPS +static const +#else +static +#endif +struct genl_ops genl_ops[] = { + { + .cmd = WG_CMD_GET_DEVICE, +#ifndef COMPAT_CANNOT_USE_NETLINK_START + .start = wg_get_device_start, +#endif + .dumpit = wg_get_device_dump, + .done = wg_get_device_done, +#ifdef COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY + .policy = device_policy, +#endif + .flags = GENL_UNS_ADMIN_PERM + }, { + .cmd = WG_CMD_SET_DEVICE, + .doit = wg_set_device, +#ifdef COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY + .policy = device_policy, +#endif + .flags = GENL_UNS_ADMIN_PERM + } +}; + +static struct genl_family genl_family +#ifndef COMPAT_CANNOT_USE_GENL_NOPS +__ro_after_init = { + .ops = genl_ops, + .n_ops = ARRAY_SIZE(genl_ops), +#else += { +#endif + .name = WG_GENL_NAME, + .version = WG_GENL_VERSION, + .maxattr = WGDEVICE_A_MAX, + .module = THIS_MODULE, +#ifndef COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY + .policy = device_policy, +#endif + .netnsok = true +}; + +int __init wg_genetlink_init(void) +{ + return genl_register_family(&genl_family); +} + +void __exit wg_genetlink_uninit(void) +{ + genl_unregister_family(&genl_family); +} diff --git a/net/wireguard/netlink.h b/net/wireguard/netlink.h new file mode 100644 index 000000000000..15100d92e2e3 --- /dev/null +++ b/net/wireguard/netlink.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_NETLINK_H +#define _WG_NETLINK_H + +int wg_genetlink_init(void); +void wg_genetlink_uninit(void); + +#endif /* _WG_NETLINK_H */ diff --git a/net/wireguard/noise.c b/net/wireguard/noise.c new file mode 100644 index 000000000000..baf455e21e79 --- /dev/null +++ b/net/wireguard/noise.c @@ -0,0 +1,830 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "noise.h" +#include "device.h" +#include "peer.h" +#include "messages.h" +#include "queueing.h" +#include "peerlookup.h" + +#include +#include +#include +#include +#include +#include + +/* This implements Noise_IKpsk2: + * + * <- s + * ****** + * -> e, es, s, ss, {t} + * <- e, ee, se, psk, {} + */ + +static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; +static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com"; +static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init; +static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init; +static atomic64_t keypair_counter = ATOMIC64_INIT(0); + +void __init wg_noise_init(void) +{ + struct blake2s_state blake; + + blake2s(handshake_init_chaining_key, handshake_name, NULL, + NOISE_HASH_LEN, sizeof(handshake_name), 0); + blake2s_init(&blake, NOISE_HASH_LEN); + blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN); + blake2s_update(&blake, identifier_name, sizeof(identifier_name)); + blake2s_final(&blake, handshake_init_hash); +} + +/* Must hold peer->handshake.static_identity->lock */ +void wg_noise_precompute_static_static(struct wg_peer *peer) +{ + down_write(&peer->handshake.lock); + if (!peer->handshake.static_identity->has_identity || + !curve25519(peer->handshake.precomputed_static_static, + peer->handshake.static_identity->static_private, + peer->handshake.remote_static)) + memset(peer->handshake.precomputed_static_static, 0, + NOISE_PUBLIC_KEY_LEN); + up_write(&peer->handshake.lock); +} + +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer) +{ + memset(handshake, 0, sizeof(*handshake)); + init_rwsem(&handshake->lock); + handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE; + handshake->entry.peer = peer; + memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN); + if (peer_preshared_key) + memcpy(handshake->preshared_key, peer_preshared_key, + NOISE_SYMMETRIC_KEY_LEN); + handshake->static_identity = static_identity; + handshake->state = HANDSHAKE_ZEROED; + wg_noise_precompute_static_static(peer); +} + +static void handshake_zero(struct noise_handshake *handshake) +{ + memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN); + memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN); + memset(&handshake->hash, 0, NOISE_HASH_LEN); + memset(&handshake->chaining_key, 0, NOISE_HASH_LEN); + handshake->remote_index = 0; + handshake->state = HANDSHAKE_ZEROED; +} + +void wg_noise_handshake_clear(struct noise_handshake *handshake) +{ + down_write(&handshake->lock); + wg_index_hashtable_remove( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); + handshake_zero(handshake); + up_write(&handshake->lock); +} + +static struct noise_keypair *keypair_create(struct wg_peer *peer) +{ + struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL); + + if (unlikely(!keypair)) + return NULL; + spin_lock_init(&keypair->receiving_counter.lock); + keypair->internal_id = atomic64_inc_return(&keypair_counter); + keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; + keypair->entry.peer = peer; + kref_init(&keypair->refcount); + return keypair; +} + +static void keypair_free_rcu(struct rcu_head *rcu) +{ + kfree_sensitive(container_of(rcu, struct noise_keypair, rcu)); +} + +static void keypair_free_kref(struct kref *kref) +{ + struct noise_keypair *keypair = + container_of(kref, struct noise_keypair, refcount); + + net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n", + keypair->entry.peer->device->dev->name, + keypair->internal_id, + keypair->entry.peer->internal_id); + wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable, + &keypair->entry); + call_rcu(&keypair->rcu, keypair_free_rcu); +} + +void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now) +{ + if (unlikely(!keypair)) + return; + if (unlikely(unreference_now)) + wg_index_hashtable_remove( + keypair->entry.peer->device->index_hashtable, + &keypair->entry); + kref_put(&keypair->refcount, keypair_free_kref); +} + +struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), + "Taking noise keypair reference without holding the RCU BH read lock"); + if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount))) + return NULL; + return keypair; +} + +void wg_noise_keypairs_clear(struct noise_keypairs *keypairs) +{ + struct noise_keypair *old; + + spin_lock_bh(&keypairs->keypair_update_lock); + + /* We zero the next_keypair before zeroing the others, so that + * wg_noise_received_with_keypair returns early before subsequent ones + * are zeroed. + */ + old = rcu_dereference_protected(keypairs->next_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + RCU_INIT_POINTER(keypairs->next_keypair, NULL); + wg_noise_keypair_put(old, true); + + old = rcu_dereference_protected(keypairs->previous_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + RCU_INIT_POINTER(keypairs->previous_keypair, NULL); + wg_noise_keypair_put(old, true); + + old = rcu_dereference_protected(keypairs->current_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + RCU_INIT_POINTER(keypairs->current_keypair, NULL); + wg_noise_keypair_put(old, true); + + spin_unlock_bh(&keypairs->keypair_update_lock); +} + +void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + + wg_noise_handshake_clear(&peer->handshake); + wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); + + spin_lock_bh(&peer->keypairs.keypair_update_lock); + keypair = rcu_dereference_protected(peer->keypairs.next_keypair, + lockdep_is_held(&peer->keypairs.keypair_update_lock)); + if (keypair) + keypair->sending.is_valid = false; + keypair = rcu_dereference_protected(peer->keypairs.current_keypair, + lockdep_is_held(&peer->keypairs.keypair_update_lock)); + if (keypair) + keypair->sending.is_valid = false; + spin_unlock_bh(&peer->keypairs.keypair_update_lock); +} + +static void add_new_keypair(struct noise_keypairs *keypairs, + struct noise_keypair *new_keypair) +{ + struct noise_keypair *previous_keypair, *next_keypair, *current_keypair; + + spin_lock_bh(&keypairs->keypair_update_lock); + previous_keypair = rcu_dereference_protected(keypairs->previous_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + next_keypair = rcu_dereference_protected(keypairs->next_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + current_keypair = rcu_dereference_protected(keypairs->current_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + if (new_keypair->i_am_the_initiator) { + /* If we're the initiator, it means we've sent a handshake, and + * received a confirmation response, which means this new + * keypair can now be used. + */ + if (next_keypair) { + /* If there already was a next keypair pending, we + * demote it to be the previous keypair, and free the + * existing current. Note that this means KCI can result + * in this transition. It would perhaps be more sound to + * always just get rid of the unused next keypair + * instead of putting it in the previous slot, but this + * might be a bit less robust. Something to think about + * for the future. + */ + RCU_INIT_POINTER(keypairs->next_keypair, NULL); + rcu_assign_pointer(keypairs->previous_keypair, + next_keypair); + wg_noise_keypair_put(current_keypair, true); + } else /* If there wasn't an existing next keypair, we replace + * the previous with the current one. + */ + rcu_assign_pointer(keypairs->previous_keypair, + current_keypair); + /* At this point we can get rid of the old previous keypair, and + * set up the new keypair. + */ + wg_noise_keypair_put(previous_keypair, true); + rcu_assign_pointer(keypairs->current_keypair, new_keypair); + } else { + /* If we're the responder, it means we can't use the new keypair + * until we receive confirmation via the first data packet, so + * we get rid of the existing previous one, the possibly + * existing next one, and slide in the new next one. + */ + rcu_assign_pointer(keypairs->next_keypair, new_keypair); + wg_noise_keypair_put(next_keypair, true); + RCU_INIT_POINTER(keypairs->previous_keypair, NULL); + wg_noise_keypair_put(previous_keypair, true); + } + spin_unlock_bh(&keypairs->keypair_update_lock); +} + +bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, + struct noise_keypair *received_keypair) +{ + struct noise_keypair *old_keypair; + bool key_is_new; + + /* We first check without taking the spinlock. */ + key_is_new = received_keypair == + rcu_access_pointer(keypairs->next_keypair); + if (likely(!key_is_new)) + return false; + + spin_lock_bh(&keypairs->keypair_update_lock); + /* After locking, we double check that things didn't change from + * beneath us. + */ + if (unlikely(received_keypair != + rcu_dereference_protected(keypairs->next_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)))) { + spin_unlock_bh(&keypairs->keypair_update_lock); + return false; + } + + /* When we've finally received the confirmation, we slide the next + * into the current, the current into the previous, and get rid of + * the old previous. + */ + old_keypair = rcu_dereference_protected(keypairs->previous_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + rcu_assign_pointer(keypairs->previous_keypair, + rcu_dereference_protected(keypairs->current_keypair, + lockdep_is_held(&keypairs->keypair_update_lock))); + wg_noise_keypair_put(old_keypair, true); + rcu_assign_pointer(keypairs->current_keypair, received_keypair); + RCU_INIT_POINTER(keypairs->next_keypair, NULL); + + spin_unlock_bh(&keypairs->keypair_update_lock); + return true; +} + +/* Must hold static_identity->lock */ +void wg_noise_set_static_identity_private_key( + struct noise_static_identity *static_identity, + const u8 private_key[NOISE_PUBLIC_KEY_LEN]) +{ + memcpy(static_identity->static_private, private_key, + NOISE_PUBLIC_KEY_LEN); + curve25519_clamp_secret(static_identity->static_private); + static_identity->has_identity = curve25519_generate_public( + static_identity->static_public, private_key); +} + +/* This is Hugo Krawczyk's HKDF: + * - https://eprint.iacr.org/2010/264.pdf + * - https://tools.ietf.org/html/rfc5869 + */ +static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, + size_t first_len, size_t second_len, size_t third_len, + size_t data_len, const u8 chaining_key[NOISE_HASH_LEN]) +{ + u8 output[BLAKE2S_HASH_SIZE + 1]; + u8 secret[BLAKE2S_HASH_SIZE]; + + WARN_ON(IS_ENABLED(DEBUG) && + (first_len > BLAKE2S_HASH_SIZE || + second_len > BLAKE2S_HASH_SIZE || + third_len > BLAKE2S_HASH_SIZE || + ((second_len || second_dst || third_len || third_dst) && + (!first_len || !first_dst)) || + ((third_len || third_dst) && (!second_len || !second_dst)))); + + /* Extract entropy from data into secret */ + blake2s_hmac(secret, data, chaining_key, BLAKE2S_HASH_SIZE, data_len, + NOISE_HASH_LEN); + + if (!first_dst || !first_len) + goto out; + + /* Expand first key: key = secret, data = 0x1 */ + output[0] = 1; + blake2s_hmac(output, output, secret, BLAKE2S_HASH_SIZE, 1, + BLAKE2S_HASH_SIZE); + memcpy(first_dst, output, first_len); + + if (!second_dst || !second_len) + goto out; + + /* Expand second key: key = secret, data = first-key || 0x2 */ + output[BLAKE2S_HASH_SIZE] = 2; + blake2s_hmac(output, output, secret, BLAKE2S_HASH_SIZE, + BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); + memcpy(second_dst, output, second_len); + + if (!third_dst || !third_len) + goto out; + + /* Expand third key: key = secret, data = second-key || 0x3 */ + output[BLAKE2S_HASH_SIZE] = 3; + blake2s_hmac(output, output, secret, BLAKE2S_HASH_SIZE, + BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); + memcpy(third_dst, output, third_len); + +out: + /* Clear sensitive data from stack */ + memzero_explicit(secret, BLAKE2S_HASH_SIZE); + memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); +} + +static void derive_keys(struct noise_symmetric_key *first_dst, + struct noise_symmetric_key *second_dst, + const u8 chaining_key[NOISE_HASH_LEN]) +{ + u64 birthdate = ktime_get_coarse_boottime_ns(); + kdf(first_dst->key, second_dst->key, NULL, NULL, + NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, + chaining_key); + first_dst->birthdate = second_dst->birthdate = birthdate; + first_dst->is_valid = second_dst->is_valid = true; +} + +static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 private[NOISE_PUBLIC_KEY_LEN], + const u8 public[NOISE_PUBLIC_KEY_LEN]) +{ + u8 dh_calculation[NOISE_PUBLIC_KEY_LEN]; + + if (unlikely(!curve25519(dh_calculation, private, public))) + return false; + kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key); + memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN); + return true; +} + +static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) +{ + static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; + if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) + return false; + kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, + chaining_key); + return true; +} + +static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) +{ + struct blake2s_state blake; + + blake2s_init(&blake, NOISE_HASH_LEN); + blake2s_update(&blake, hash, NOISE_HASH_LEN); + blake2s_update(&blake, src, src_len); + blake2s_final(&blake, hash); +} + +static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 psk[NOISE_SYMMETRIC_KEY_LEN]) +{ + u8 temp_hash[NOISE_HASH_LEN]; + + kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key); + mix_hash(hash, temp_hash, NOISE_HASH_LEN); + memzero_explicit(temp_hash, NOISE_HASH_LEN); +} + +static void handshake_init(u8 chaining_key[NOISE_HASH_LEN], + u8 hash[NOISE_HASH_LEN], + const u8 remote_static[NOISE_PUBLIC_KEY_LEN]) +{ + memcpy(hash, handshake_init_hash, NOISE_HASH_LEN); + memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN); + mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN); +} + +static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext, + size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], + u8 hash[NOISE_HASH_LEN]) +{ + chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash, + NOISE_HASH_LEN, + 0 /* Always zero for Noise_IK */, key); + mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len)); +} + +static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext, + size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], + u8 hash[NOISE_HASH_LEN]) +{ + if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len, + hash, NOISE_HASH_LEN, + 0 /* Always zero for Noise_IK */, key)) + return false; + mix_hash(hash, src_ciphertext, src_len); + return true; +} + +static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN], + const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN], + u8 chaining_key[NOISE_HASH_LEN], + u8 hash[NOISE_HASH_LEN]) +{ + if (ephemeral_dst != ephemeral_src) + memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN); + mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN); + kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0, + NOISE_PUBLIC_KEY_LEN, chaining_key); +} + +static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN]) +{ + struct timespec64 now; + + ktime_get_real_ts64(&now); + + /* In order to prevent some sort of infoleak from precise timers, we + * round down the nanoseconds part to the closest rounded-down power of + * two to the maximum initiations per second allowed anyway by the + * implementation. + */ + now.tv_nsec = ALIGN_DOWN(now.tv_nsec, + rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND)); + + /* https://cr.yp.to/libtai/tai64.html */ + *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec); + *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec); +} + +bool +wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, + struct noise_handshake *handshake) +{ + u8 timestamp[NOISE_TIMESTAMP_LEN]; + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + bool ret = false; + + /* We need to wait for crng _before_ taking any locks, since + * curve25519_generate_secret uses get_random_bytes_wait. + */ + wait_for_random_bytes(); + + down_read(&handshake->static_identity->lock); + down_write(&handshake->lock); + + if (unlikely(!handshake->static_identity->has_identity)) + goto out; + + dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION); + + handshake_init(handshake->chaining_key, handshake->hash, + handshake->remote_static); + + /* e */ + curve25519_generate_secret(handshake->ephemeral_private); + if (!curve25519_generate_public(dst->unencrypted_ephemeral, + handshake->ephemeral_private)) + goto out; + message_ephemeral(dst->unencrypted_ephemeral, + dst->unencrypted_ephemeral, handshake->chaining_key, + handshake->hash); + + /* es */ + if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private, + handshake->remote_static)) + goto out; + + /* s */ + message_encrypt(dst->encrypted_static, + handshake->static_identity->static_public, + NOISE_PUBLIC_KEY_LEN, key, handshake->hash); + + /* ss */ + if (!mix_precomputed_dh(handshake->chaining_key, key, + handshake->precomputed_static_static)) + goto out; + + /* {t} */ + tai64n_now(timestamp); + message_encrypt(dst->encrypted_timestamp, timestamp, + NOISE_TIMESTAMP_LEN, key, handshake->hash); + + dst->sender_index = wg_index_hashtable_insert( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); + + handshake->state = HANDSHAKE_CREATED_INITIATION; + ret = true; + +out: + up_write(&handshake->lock); + up_read(&handshake->static_identity->lock); + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + return ret; +} + +struct wg_peer * +wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, + struct wg_device *wg) +{ + struct wg_peer *peer = NULL, *ret_peer = NULL; + struct noise_handshake *handshake; + bool replay_attack, flood_attack; + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + u8 chaining_key[NOISE_HASH_LEN]; + u8 hash[NOISE_HASH_LEN]; + u8 s[NOISE_PUBLIC_KEY_LEN]; + u8 e[NOISE_PUBLIC_KEY_LEN]; + u8 t[NOISE_TIMESTAMP_LEN]; + u64 initiation_consumption; + + down_read(&wg->static_identity.lock); + if (unlikely(!wg->static_identity.has_identity)) + goto out; + + handshake_init(chaining_key, hash, wg->static_identity.static_public); + + /* e */ + message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); + + /* es */ + if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e)) + goto out; + + /* s */ + if (!message_decrypt(s, src->encrypted_static, + sizeof(src->encrypted_static), key, hash)) + goto out; + + /* Lookup which peer we're actually talking to */ + peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s); + if (!peer) + goto out; + handshake = &peer->handshake; + + /* ss */ + if (!mix_precomputed_dh(chaining_key, key, + handshake->precomputed_static_static)) + goto out; + + /* {t} */ + if (!message_decrypt(t, src->encrypted_timestamp, + sizeof(src->encrypted_timestamp), key, hash)) + goto out; + + down_read(&handshake->lock); + replay_attack = memcmp(t, handshake->latest_timestamp, + NOISE_TIMESTAMP_LEN) <= 0; + flood_attack = (s64)handshake->last_initiation_consumption + + NSEC_PER_SEC / INITIATIONS_PER_SECOND > + (s64)ktime_get_coarse_boottime_ns(); + up_read(&handshake->lock); + if (replay_attack || flood_attack) + goto out; + + /* Success! Copy everything to peer */ + down_write(&handshake->lock); + memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); + if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0) + memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN); + memcpy(handshake->hash, hash, NOISE_HASH_LEN); + memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); + handshake->remote_index = src->sender_index; + initiation_consumption = ktime_get_coarse_boottime_ns(); + if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) + handshake->last_initiation_consumption = initiation_consumption; + handshake->state = HANDSHAKE_CONSUMED_INITIATION; + up_write(&handshake->lock); + ret_peer = peer; + +out: + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + memzero_explicit(hash, NOISE_HASH_LEN); + memzero_explicit(chaining_key, NOISE_HASH_LEN); + up_read(&wg->static_identity.lock); + if (!ret_peer) + wg_peer_put(peer); + return ret_peer; +} + +bool wg_noise_handshake_create_response(struct message_handshake_response *dst, + struct noise_handshake *handshake) +{ + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + bool ret = false; + + /* We need to wait for crng _before_ taking any locks, since + * curve25519_generate_secret uses get_random_bytes_wait. + */ + wait_for_random_bytes(); + + down_read(&handshake->static_identity->lock); + down_write(&handshake->lock); + + if (handshake->state != HANDSHAKE_CONSUMED_INITIATION) + goto out; + + dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE); + dst->receiver_index = handshake->remote_index; + + /* e */ + curve25519_generate_secret(handshake->ephemeral_private); + if (!curve25519_generate_public(dst->unencrypted_ephemeral, + handshake->ephemeral_private)) + goto out; + message_ephemeral(dst->unencrypted_ephemeral, + dst->unencrypted_ephemeral, handshake->chaining_key, + handshake->hash); + + /* ee */ + if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, + handshake->remote_ephemeral)) + goto out; + + /* se */ + if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, + handshake->remote_static)) + goto out; + + /* psk */ + mix_psk(handshake->chaining_key, handshake->hash, key, + handshake->preshared_key); + + /* {} */ + message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash); + + dst->sender_index = wg_index_hashtable_insert( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); + + handshake->state = HANDSHAKE_CREATED_RESPONSE; + ret = true; + +out: + up_write(&handshake->lock); + up_read(&handshake->static_identity->lock); + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + return ret; +} + +struct wg_peer * +wg_noise_handshake_consume_response(struct message_handshake_response *src, + struct wg_device *wg) +{ + enum noise_handshake_state state = HANDSHAKE_ZEROED; + struct wg_peer *peer = NULL, *ret_peer = NULL; + struct noise_handshake *handshake; + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + u8 hash[NOISE_HASH_LEN]; + u8 chaining_key[NOISE_HASH_LEN]; + u8 e[NOISE_PUBLIC_KEY_LEN]; + u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; + u8 static_private[NOISE_PUBLIC_KEY_LEN]; + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; + + down_read(&wg->static_identity.lock); + + if (unlikely(!wg->static_identity.has_identity)) + goto out; + + handshake = (struct noise_handshake *)wg_index_hashtable_lookup( + wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE, + src->receiver_index, &peer); + if (unlikely(!handshake)) + goto out; + + down_read(&handshake->lock); + state = handshake->state; + memcpy(hash, handshake->hash, NOISE_HASH_LEN); + memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); + memcpy(ephemeral_private, handshake->ephemeral_private, + NOISE_PUBLIC_KEY_LEN); + memcpy(preshared_key, handshake->preshared_key, + NOISE_SYMMETRIC_KEY_LEN); + up_read(&handshake->lock); + + if (state != HANDSHAKE_CREATED_INITIATION) + goto fail; + + /* e */ + message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); + + /* ee */ + if (!mix_dh(chaining_key, NULL, ephemeral_private, e)) + goto fail; + + /* se */ + if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e)) + goto fail; + + /* psk */ + mix_psk(chaining_key, hash, key, preshared_key); + + /* {} */ + if (!message_decrypt(NULL, src->encrypted_nothing, + sizeof(src->encrypted_nothing), key, hash)) + goto fail; + + /* Success! Copy everything to peer */ + down_write(&handshake->lock); + /* It's important to check that the state is still the same, while we + * have an exclusive lock. + */ + if (handshake->state != state) { + up_write(&handshake->lock); + goto fail; + } + memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); + memcpy(handshake->hash, hash, NOISE_HASH_LEN); + memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); + handshake->remote_index = src->sender_index; + handshake->state = HANDSHAKE_CONSUMED_RESPONSE; + up_write(&handshake->lock); + ret_peer = peer; + goto out; + +fail: + wg_peer_put(peer); +out: + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + memzero_explicit(hash, NOISE_HASH_LEN); + memzero_explicit(chaining_key, NOISE_HASH_LEN); + memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); + memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); + memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); + up_read(&wg->static_identity.lock); + return ret_peer; +} + +bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, + struct noise_keypairs *keypairs) +{ + struct noise_keypair *new_keypair; + bool ret = false; + + down_write(&handshake->lock); + if (handshake->state != HANDSHAKE_CREATED_RESPONSE && + handshake->state != HANDSHAKE_CONSUMED_RESPONSE) + goto out; + + new_keypair = keypair_create(handshake->entry.peer); + if (!new_keypair) + goto out; + new_keypair->i_am_the_initiator = handshake->state == + HANDSHAKE_CONSUMED_RESPONSE; + new_keypair->remote_index = handshake->remote_index; + + if (new_keypair->i_am_the_initiator) + derive_keys(&new_keypair->sending, &new_keypair->receiving, + handshake->chaining_key); + else + derive_keys(&new_keypair->receiving, &new_keypair->sending, + handshake->chaining_key); + + handshake_zero(handshake); + rcu_read_lock_bh(); + if (likely(!READ_ONCE(container_of(handshake, struct wg_peer, + handshake)->is_dead))) { + add_new_keypair(keypairs, new_keypair); + net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n", + handshake->entry.peer->device->dev->name, + new_keypair->internal_id, + handshake->entry.peer->internal_id); + ret = wg_index_hashtable_replace( + handshake->entry.peer->device->index_hashtable, + &handshake->entry, &new_keypair->entry); + } else { + kfree_sensitive(new_keypair); + } + rcu_read_unlock_bh(); + +out: + up_write(&handshake->lock); + return ret; +} diff --git a/net/wireguard/noise.h b/net/wireguard/noise.h new file mode 100644 index 000000000000..c527253dba80 --- /dev/null +++ b/net/wireguard/noise.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ +#ifndef _WG_NOISE_H +#define _WG_NOISE_H + +#include "messages.h" +#include "peerlookup.h" + +#include +#include +#include +#include +#include +#include + +struct noise_replay_counter { + u64 counter; + spinlock_t lock; + unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; +}; + +struct noise_symmetric_key { + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + u64 birthdate; + bool is_valid; +}; + +struct noise_keypair { + struct index_hashtable_entry entry; + struct noise_symmetric_key sending; + atomic64_t sending_counter; + struct noise_symmetric_key receiving; + struct noise_replay_counter receiving_counter; + __le32 remote_index; + bool i_am_the_initiator; + struct kref refcount; + struct rcu_head rcu; + u64 internal_id; +}; + +struct noise_keypairs { + struct noise_keypair __rcu *current_keypair; + struct noise_keypair __rcu *previous_keypair; + struct noise_keypair __rcu *next_keypair; + spinlock_t keypair_update_lock; +}; + +struct noise_static_identity { + u8 static_public[NOISE_PUBLIC_KEY_LEN]; + u8 static_private[NOISE_PUBLIC_KEY_LEN]; + struct rw_semaphore lock; + bool has_identity; +}; + +enum noise_handshake_state { + HANDSHAKE_ZEROED, + HANDSHAKE_CREATED_INITIATION, + HANDSHAKE_CONSUMED_INITIATION, + HANDSHAKE_CREATED_RESPONSE, + HANDSHAKE_CONSUMED_RESPONSE +}; + +struct noise_handshake { + struct index_hashtable_entry entry; + + enum noise_handshake_state state; + u64 last_initiation_consumption; + + struct noise_static_identity *static_identity; + + u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; + u8 remote_static[NOISE_PUBLIC_KEY_LEN]; + u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN]; + u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN]; + + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; + + u8 hash[NOISE_HASH_LEN]; + u8 chaining_key[NOISE_HASH_LEN]; + + u8 latest_timestamp[NOISE_TIMESTAMP_LEN]; + __le32 remote_index; + + /* Protects all members except the immutable (after noise_handshake_ + * init): remote_static, precomputed_static_static, static_identity. + */ + struct rw_semaphore lock; +}; + +struct wg_device; + +void wg_noise_init(void); +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer); +void wg_noise_handshake_clear(struct noise_handshake *handshake); +static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) +{ + atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() - + (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC); +} + +void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now); +struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair); +void wg_noise_keypairs_clear(struct noise_keypairs *keypairs); +bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, + struct noise_keypair *received_keypair); +void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); + +void wg_noise_set_static_identity_private_key( + struct noise_static_identity *static_identity, + const u8 private_key[NOISE_PUBLIC_KEY_LEN]); +void wg_noise_precompute_static_static(struct wg_peer *peer); + +bool +wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, + struct noise_handshake *handshake); +struct wg_peer * +wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, + struct wg_device *wg); + +bool wg_noise_handshake_create_response(struct message_handshake_response *dst, + struct noise_handshake *handshake); +struct wg_peer * +wg_noise_handshake_consume_response(struct message_handshake_response *src, + struct wg_device *wg); + +bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, + struct noise_keypairs *keypairs); + +#endif /* _WG_NOISE_H */ diff --git a/net/wireguard/peer.c b/net/wireguard/peer.c new file mode 100644 index 000000000000..1acd00ab2fbc --- /dev/null +++ b/net/wireguard/peer.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "peer.h" +#include "device.h" +#include "queueing.h" +#include "timers.h" +#include "peerlookup.h" +#include "noise.h" + +#include +#include +#include +#include + +static struct kmem_cache *peer_cache; +static atomic64_t peer_counter = ATOMIC64_INIT(0); + +struct wg_peer *wg_peer_create(struct wg_device *wg, + const u8 public_key[NOISE_PUBLIC_KEY_LEN], + const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) +{ + struct wg_peer *peer; + int ret = -ENOMEM; + + lockdep_assert_held(&wg->device_update_lock); + + if (wg->num_peers >= MAX_PEERS_PER_DEVICE) + return ERR_PTR(ret); + + peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); + if (unlikely(!peer)) + return ERR_PTR(ret); + if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) + goto err; + + peer->device = wg; + wg_noise_handshake_init(&peer->handshake, &wg->static_identity, + public_key, preshared_key, peer); + peer->internal_id = atomic64_inc_return(&peer_counter); + peer->serial_work_cpu = nr_cpumask_bits; + wg_cookie_init(&peer->latest_cookie); + wg_timers_init(peer); + wg_cookie_checker_precompute_peer_keys(peer); + spin_lock_init(&peer->keypairs.keypair_update_lock); + INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); + INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); + wg_prev_queue_init(&peer->tx_queue); + wg_prev_queue_init(&peer->rx_queue); + rwlock_init(&peer->endpoint_lock); + kref_init(&peer->refcount); + skb_queue_head_init(&peer->staged_packet_queue); + wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); + set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); + netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll, + NAPI_POLL_WEIGHT); + napi_enable(&peer->napi); + list_add_tail(&peer->peer_list, &wg->peer_list); + INIT_LIST_HEAD(&peer->allowedips_list); + wg_pubkey_hashtable_add(wg->peer_hashtable, peer); + ++wg->num_peers; + pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); + return peer; + +err: + kmem_cache_free(peer_cache, peer); + return ERR_PTR(ret); +} + +struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), + "Taking peer reference without holding the RCU read lock"); + if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) + return NULL; + return peer; +} + +static void peer_make_dead(struct wg_peer *peer) +{ + /* Remove from configuration-time lookup structures. */ + list_del_init(&peer->peer_list); + wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, + &peer->device->device_update_lock); + wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); + + /* Mark as dead, so that we don't allow jumping contexts after. */ + WRITE_ONCE(peer->is_dead, true); + + /* The caller must now synchronize_net() for this to take effect. */ +} + +static void peer_remove_after_dead(struct wg_peer *peer) +{ + WARN_ON(!peer->is_dead); + + /* No more keypairs can be created for this peer, since is_dead protects + * add_new_keypair, so we can now destroy existing ones. + */ + wg_noise_keypairs_clear(&peer->keypairs); + + /* Destroy all ongoing timers that were in-flight at the beginning of + * this function. + */ + wg_timers_stop(peer); + + /* The transition between packet encryption/decryption queues isn't + * guarded by is_dead, but each reference's life is strictly bounded by + * two generations: once for parallel crypto and once for serial + * ingestion, so we can simply flush twice, and be sure that we no + * longer have references inside these queues. + */ + + /* a) For encrypt/decrypt. */ + flush_workqueue(peer->device->packet_crypt_wq); + /* b.1) For send (but not receive, since that's napi). */ + flush_workqueue(peer->device->packet_crypt_wq); + /* b.2.1) For receive (but not send, since that's wq). */ + napi_disable(&peer->napi); + /* b.2.1) It's now safe to remove the napi struct, which must be done + * here from process context. + */ + netif_napi_del(&peer->napi); + + /* Ensure any workstructs we own (like transmit_handshake_work or + * clear_peer_work) no longer are in use. + */ + flush_workqueue(peer->device->handshake_send_wq); + + /* After the above flushes, a peer might still be active in a few + * different contexts: 1) from xmit(), before hitting is_dead and + * returning, 2) from wg_packet_consume_data(), before hitting is_dead + * and returning, 3) from wg_receive_handshake_packet() after a point + * where it has processed an incoming handshake packet, but where + * all calls to pass it off to timers fails because of is_dead. We won't + * have new references in (1) eventually, because we're removed from + * allowedips; we won't have new references in (2) eventually, because + * wg_index_hashtable_lookup will always return NULL, since we removed + * all existing keypairs and no more can be created; we won't have new + * references in (3) eventually, because we're removed from the pubkey + * hash table, which allows for a maximum of one handshake response, + * via the still-uncleared index hashtable entry, but not more than one, + * and in wg_cookie_message_consume, the lookup eventually gets a peer + * with a refcount of zero, so no new reference is taken. + */ + + --peer->device->num_peers; + wg_peer_put(peer); +} + +/* We have a separate "remove" function make sure that all active places where + * a peer is currently operating will eventually come to an end and not pass + * their reference onto another context. + */ +void wg_peer_remove(struct wg_peer *peer) +{ + if (unlikely(!peer)) + return; + lockdep_assert_held(&peer->device->device_update_lock); + + peer_make_dead(peer); + synchronize_net(); + peer_remove_after_dead(peer); +} + +void wg_peer_remove_all(struct wg_device *wg) +{ + struct wg_peer *peer, *temp; + LIST_HEAD(dead_peers); + + lockdep_assert_held(&wg->device_update_lock); + + /* Avoid having to traverse individually for each one. */ + wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); + + list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { + peer_make_dead(peer); + list_add_tail(&peer->peer_list, &dead_peers); + } + synchronize_net(); + list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) + peer_remove_after_dead(peer); +} + +static void rcu_release(struct rcu_head *rcu) +{ + struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); + + dst_cache_destroy(&peer->endpoint_cache); + WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); + + /* The final zeroing takes care of clearing any remaining handshake key + * material and other potentially sensitive information. + */ + memzero_explicit(peer, sizeof(*peer)); + kmem_cache_free(peer_cache, peer); +} + +static void kref_release(struct kref *refcount) +{ + struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); + + pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + + /* Remove ourself from dynamic runtime lookup structures, now that the + * last reference is gone. + */ + wg_index_hashtable_remove(peer->device->index_hashtable, + &peer->handshake.entry); + + /* Remove any lingering packets that didn't have a chance to be + * transmitted. + */ + wg_packet_purge_staged_packets(peer); + + /* Free the memory used. */ + call_rcu(&peer->rcu, rcu_release); +} + +void wg_peer_put(struct wg_peer *peer) +{ + if (unlikely(!peer)) + return; + kref_put(&peer->refcount, kref_release); +} + +int __init wg_peer_init(void) +{ + peer_cache = KMEM_CACHE(wg_peer, 0); + return peer_cache ? 0 : -ENOMEM; +} + +void wg_peer_uninit(void) +{ + kmem_cache_destroy(peer_cache); +} diff --git a/net/wireguard/peer.h b/net/wireguard/peer.h new file mode 100644 index 000000000000..76e4d3128ad4 --- /dev/null +++ b/net/wireguard/peer.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_PEER_H +#define _WG_PEER_H + +#include "device.h" +#include "noise.h" +#include "cookie.h" + +#include +#include +#include +#include +#include + +struct wg_device; + +struct endpoint { + union { + struct sockaddr addr; + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + }; + union { + struct { + struct in_addr src4; + /* Essentially the same as addr6->scope_id */ + int src_if4; + }; + struct in6_addr src6; + }; +}; + +struct wg_peer { + struct wg_device *device; + struct prev_queue tx_queue, rx_queue; + struct sk_buff_head staged_packet_queue; + int serial_work_cpu; + bool is_dead; + struct noise_keypairs keypairs; + struct endpoint endpoint; + struct dst_cache endpoint_cache; + rwlock_t endpoint_lock; + struct noise_handshake handshake; + atomic64_t last_sent_handshake; + struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; + struct cookie latest_cookie; + struct hlist_node pubkey_hash; + u64 rx_bytes, tx_bytes; + struct timer_list timer_retransmit_handshake, timer_send_keepalive; + struct timer_list timer_new_handshake, timer_zero_key_material; + struct timer_list timer_persistent_keepalive; + unsigned int timer_handshake_attempts; + u16 persistent_keepalive_interval; + bool timer_need_another_keepalive; + bool sent_lastminute_handshake; + struct timespec64 walltime_last_handshake; + struct kref refcount; + struct rcu_head rcu; + struct list_head peer_list; + struct list_head allowedips_list; + struct napi_struct napi; + u64 internal_id; +}; + +struct wg_peer *wg_peer_create(struct wg_device *wg, + const u8 public_key[NOISE_PUBLIC_KEY_LEN], + const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]); + +struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer); +static inline struct wg_peer *wg_peer_get(struct wg_peer *peer) +{ + kref_get(&peer->refcount); + return peer; +} +void wg_peer_put(struct wg_peer *peer); +void wg_peer_remove(struct wg_peer *peer); +void wg_peer_remove_all(struct wg_device *wg); + +int wg_peer_init(void); +void wg_peer_uninit(void); + +#endif /* _WG_PEER_H */ diff --git a/net/wireguard/peerlookup.c b/net/wireguard/peerlookup.c new file mode 100644 index 000000000000..f2783aa7a88f --- /dev/null +++ b/net/wireguard/peerlookup.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "peerlookup.h" +#include "peer.h" +#include "noise.h" + +static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, + const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) +{ + /* siphash gives us a secure 64bit number based on a random key. Since + * the bits are uniformly distributed, we can then mask off to get the + * bits we need. + */ + const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); + + return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; +} + +struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void) +{ + struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + + get_random_bytes(&table->key, sizeof(table->key)); + hash_init(table->hashtable); + mutex_init(&table->lock); + return table; +} + +void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, + struct wg_peer *peer) +{ + mutex_lock(&table->lock); + hlist_add_head_rcu(&peer->pubkey_hash, + pubkey_bucket(table, peer->handshake.remote_static)); + mutex_unlock(&table->lock); +} + +void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, + struct wg_peer *peer) +{ + mutex_lock(&table->lock); + hlist_del_init_rcu(&peer->pubkey_hash); + mutex_unlock(&table->lock); +} + +/* Returns a strong reference to a peer */ +struct wg_peer * +wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, + const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) +{ + struct wg_peer *iter_peer, *peer = NULL; + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), + pubkey_hash) { + if (!memcmp(pubkey, iter_peer->handshake.remote_static, + NOISE_PUBLIC_KEY_LEN)) { + peer = iter_peer; + break; + } + } + peer = wg_peer_get_maybe_zero(peer); + rcu_read_unlock_bh(); + return peer; +} + +static struct hlist_head *index_bucket(struct index_hashtable *table, + const __le32 index) +{ + /* Since the indices are random and thus all bits are uniformly + * distributed, we can find its bucket simply by masking. + */ + return &table->hashtable[(__force u32)index & + (HASH_SIZE(table->hashtable) - 1)]; +} + +struct index_hashtable *wg_index_hashtable_alloc(void) +{ + struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + + hash_init(table->hashtable); + spin_lock_init(&table->lock); + return table; +} + +/* At the moment, we limit ourselves to 2^20 total peers, which generally might + * amount to 2^20*3 items in this hashtable. The algorithm below works by + * picking a random number and testing it. We can see that these limits mean we + * usually succeed pretty quickly: + * + * >>> def calculation(tries, size): + * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32)) + * ... + * >>> calculation(1, 2**20 * 3) + * 0.999267578125 + * >>> calculation(2, 2**20 * 3) + * 0.0007318854331970215 + * >>> calculation(3, 2**20 * 3) + * 5.360489012673497e-07 + * >>> calculation(4, 2**20 * 3) + * 3.9261394135792216e-10 + * + * At the moment, we don't do any masking, so this algorithm isn't exactly + * constant time in either the random guessing or in the hash list lookup. We + * could require a minimum of 3 tries, which would successfully mask the + * guessing. this would not, however, help with the growing hash lengths, which + * is another thing to consider moving forward. + */ + +__le32 wg_index_hashtable_insert(struct index_hashtable *table, + struct index_hashtable_entry *entry) +{ + struct index_hashtable_entry *existing_entry; + + spin_lock_bh(&table->lock); + hlist_del_init_rcu(&entry->index_hash); + spin_unlock_bh(&table->lock); + + rcu_read_lock_bh(); + +search_unused_slot: + /* First we try to find an unused slot, randomly, while unlocked. */ + entry->index = (__force __le32)get_random_u32(); + hlist_for_each_entry_rcu_bh(existing_entry, + index_bucket(table, entry->index), + index_hash) { + if (existing_entry->index == entry->index) + /* If it's already in use, we continue searching. */ + goto search_unused_slot; + } + + /* Once we've found an unused slot, we lock it, and then double-check + * that nobody else stole it from us. + */ + spin_lock_bh(&table->lock); + hlist_for_each_entry_rcu_bh(existing_entry, + index_bucket(table, entry->index), + index_hash) { + if (existing_entry->index == entry->index) { + spin_unlock_bh(&table->lock); + /* If it was stolen, we start over. */ + goto search_unused_slot; + } + } + /* Otherwise, we know we have it exclusively (since we're locked), + * so we insert. + */ + hlist_add_head_rcu(&entry->index_hash, + index_bucket(table, entry->index)); + spin_unlock_bh(&table->lock); + + rcu_read_unlock_bh(); + + return entry->index; +} + +bool wg_index_hashtable_replace(struct index_hashtable *table, + struct index_hashtable_entry *old, + struct index_hashtable_entry *new) +{ + bool ret; + + spin_lock_bh(&table->lock); + ret = !hlist_unhashed(&old->index_hash); + if (unlikely(!ret)) + goto out; + + new->index = old->index; + hlist_replace_rcu(&old->index_hash, &new->index_hash); + + /* Calling init here NULLs out index_hash, and in fact after this + * function returns, it's theoretically possible for this to get + * reinserted elsewhere. That means the RCU lookup below might either + * terminate early or jump between buckets, in which case the packet + * simply gets dropped, which isn't terrible. + */ + INIT_HLIST_NODE(&old->index_hash); +out: + spin_unlock_bh(&table->lock); + return ret; +} + +void wg_index_hashtable_remove(struct index_hashtable *table, + struct index_hashtable_entry *entry) +{ + spin_lock_bh(&table->lock); + hlist_del_init_rcu(&entry->index_hash); + spin_unlock_bh(&table->lock); +} + +/* Returns a strong reference to a entry->peer */ +struct index_hashtable_entry * +wg_index_hashtable_lookup(struct index_hashtable *table, + const enum index_hashtable_type type_mask, + const __le32 index, struct wg_peer **peer) +{ + struct index_hashtable_entry *iter_entry, *entry = NULL; + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), + index_hash) { + if (iter_entry->index == index) { + if (likely(iter_entry->type & type_mask)) + entry = iter_entry; + break; + } + } + if (likely(entry)) { + entry->peer = wg_peer_get_maybe_zero(entry->peer); + if (likely(entry->peer)) + *peer = entry->peer; + else + entry = NULL; + } + rcu_read_unlock_bh(); + return entry; +} diff --git a/net/wireguard/peerlookup.h b/net/wireguard/peerlookup.h new file mode 100644 index 000000000000..ced811797680 --- /dev/null +++ b/net/wireguard/peerlookup.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_PEERLOOKUP_H +#define _WG_PEERLOOKUP_H + +#include "messages.h" + +#include +#include +#include + +struct wg_peer; + +struct pubkey_hashtable { + /* TODO: move to rhashtable */ + DECLARE_HASHTABLE(hashtable, 11); + siphash_key_t key; + struct mutex lock; +}; + +struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void); +void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, + struct wg_peer *peer); +void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, + struct wg_peer *peer); +struct wg_peer * +wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, + const u8 pubkey[NOISE_PUBLIC_KEY_LEN]); + +struct index_hashtable { + /* TODO: move to rhashtable */ + DECLARE_HASHTABLE(hashtable, 13); + spinlock_t lock; +}; + +enum index_hashtable_type { + INDEX_HASHTABLE_HANDSHAKE = 1U << 0, + INDEX_HASHTABLE_KEYPAIR = 1U << 1 +}; + +struct index_hashtable_entry { + struct wg_peer *peer; + struct hlist_node index_hash; + enum index_hashtable_type type; + __le32 index; +}; + +struct index_hashtable *wg_index_hashtable_alloc(void); +__le32 wg_index_hashtable_insert(struct index_hashtable *table, + struct index_hashtable_entry *entry); +bool wg_index_hashtable_replace(struct index_hashtable *table, + struct index_hashtable_entry *old, + struct index_hashtable_entry *new); +void wg_index_hashtable_remove(struct index_hashtable *table, + struct index_hashtable_entry *entry); +struct index_hashtable_entry * +wg_index_hashtable_lookup(struct index_hashtable *table, + const enum index_hashtable_type type_mask, + const __le32 index, struct wg_peer **peer); + +#endif /* _WG_PEERLOOKUP_H */ diff --git a/net/wireguard/queueing.c b/net/wireguard/queueing.c new file mode 100644 index 000000000000..8084e7408c0a --- /dev/null +++ b/net/wireguard/queueing.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "queueing.h" +#include + +struct multicore_worker __percpu * +wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) +{ + int cpu; + struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); + + if (!worker) + return NULL; + + for_each_possible_cpu(cpu) { + per_cpu_ptr(worker, cpu)->ptr = ptr; + INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); + } + return worker; +} + +int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, + unsigned int len) +{ + int ret; + + memset(queue, 0, sizeof(*queue)); + ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); + if (ret) + return ret; + queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); + if (!queue->worker) { + ptr_ring_cleanup(&queue->ring, NULL); + return -ENOMEM; + } + return 0; +} + +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) +{ + free_percpu(queue->worker); + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); +} + +#define NEXT(skb) ((skb)->prev) +#define STUB(queue) ((struct sk_buff *)&queue->empty) + +void wg_prev_queue_init(struct prev_queue *queue) +{ + NEXT(STUB(queue)) = NULL; + queue->head = queue->tail = STUB(queue); + queue->peeked = NULL; + atomic_set(&queue->count, 0); + BUILD_BUG_ON( + offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - + offsetof(struct prev_queue, empty) || + offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - + offsetof(struct prev_queue, empty)); +} + +static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) +{ + WRITE_ONCE(NEXT(skb), NULL); + WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); +} + +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) +{ + if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) + return false; + __wg_prev_queue_enqueue(queue, skb); + return true; +} + +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) +{ + struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); + + if (tail == STUB(queue)) { + if (!next) + return NULL; + queue->tail = next; + tail = next; + next = smp_load_acquire(&NEXT(next)); + } + if (next) { + queue->tail = next; + atomic_dec(&queue->count); + return tail; + } + if (tail != READ_ONCE(queue->head)) + return NULL; + __wg_prev_queue_enqueue(queue, STUB(queue)); + next = smp_load_acquire(&NEXT(tail)); + if (next) { + queue->tail = next; + atomic_dec(&queue->count); + return tail; + } + return NULL; +} + +#undef NEXT +#undef STUB diff --git a/net/wireguard/queueing.h b/net/wireguard/queueing.h new file mode 100644 index 000000000000..03850c43ebaf --- /dev/null +++ b/net/wireguard/queueing.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_QUEUEING_H +#define _WG_QUEUEING_H + +#include "peer.h" +#include +#include +#include +#include +#include + +struct wg_device; +struct wg_peer; +struct multicore_worker; +struct crypt_queue; +struct prev_queue; +struct sk_buff; + +/* queueing.c APIs: */ +int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, + unsigned int len); +void wg_packet_queue_free(struct crypt_queue *queue, bool purge); +struct multicore_worker __percpu * +wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); + +/* receive.c APIs: */ +void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); +void wg_packet_handshake_receive_worker(struct work_struct *work); +/* NAPI poll function: */ +int wg_packet_rx_poll(struct napi_struct *napi, int budget); +/* Workqueue worker: */ +void wg_packet_decrypt_worker(struct work_struct *work); + +/* send.c APIs: */ +void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, + bool is_retry); +void wg_packet_send_handshake_response(struct wg_peer *peer); +void wg_packet_send_handshake_cookie(struct wg_device *wg, + struct sk_buff *initiating_skb, + __le32 sender_index); +void wg_packet_send_keepalive(struct wg_peer *peer); +void wg_packet_purge_staged_packets(struct wg_peer *peer); +void wg_packet_send_staged_packets(struct wg_peer *peer); +/* Workqueue workers: */ +void wg_packet_handshake_send_worker(struct work_struct *work); +void wg_packet_tx_worker(struct work_struct *work); +void wg_packet_encrypt_worker(struct work_struct *work); + +enum packet_state { + PACKET_STATE_UNCRYPTED, + PACKET_STATE_CRYPTED, + PACKET_STATE_DEAD +}; + +struct packet_cb { + u64 nonce; + struct noise_keypair *keypair; + atomic_t state; + u32 mtu; + u8 ds; +}; + +#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) +#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) + +static inline bool wg_check_packet_protocol(struct sk_buff *skb) +{ + __be16 real_protocol = ip_tunnel_parse_protocol(skb); + return real_protocol && skb->protocol == real_protocol; +} + +static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) +{ + const int pfmemalloc = skb->pfmemalloc; + u32 hash = skb->hash; + u8 l4_hash = skb->l4_hash; + u8 sw_hash = skb->sw_hash; + + skb_scrub_packet(skb, true); + memset(&skb->headers_start, 0, + offsetof(struct sk_buff, headers_end) - + offsetof(struct sk_buff, headers_start)); + skb->pfmemalloc = pfmemalloc; + if (encapsulating) { + skb->hash = hash; + skb->l4_hash = l4_hash; + skb->sw_hash = sw_hash; + } + skb->queue_mapping = 0; + skb->nohdr = 0; + skb->peeked = 0; + skb->mac_len = 0; + skb->dev = NULL; +#ifdef CONFIG_NET_SCHED + skb->tc_index = 0; +#endif + skb_reset_redirect(skb); + skb->hdr_len = skb_headroom(skb); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + skb_probe_transport_header(skb); + skb_reset_inner_headers(skb); +} + +static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) +{ + unsigned int cpu = *stored_cpu, cpu_index, i; + + if (unlikely(cpu == nr_cpumask_bits || + !cpumask_test_cpu(cpu, cpu_online_mask))) { + cpu_index = id % cpumask_weight(cpu_online_mask); + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < cpu_index; ++i) + cpu = cpumask_next(cpu, cpu_online_mask); + *stored_cpu = cpu; + } + return cpu; +} + +/* This function is racy, in the sense that next is unlocked, so it could return + * the same CPU twice. A race-free version of this would be to instead store an + * atomic sequence number, do an increment-and-return, and then iterate through + * every possible CPU until we get to that index -- choose_cpu. However that's + * a bit slower, and it doesn't seem like this potential race actually + * introduces any performance loss, so we live with it. + */ +static inline int wg_cpumask_next_online(int *next) +{ + int cpu = *next; + + while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) + cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; + *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; + return cpu; +} + +void wg_prev_queue_init(struct prev_queue *queue); + +/* Multi producer */ +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); + +/* Single consumer */ +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); + +/* Single consumer */ +static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) +{ + if (queue->peeked) + return queue->peeked; + queue->peeked = wg_prev_queue_dequeue(queue); + return queue->peeked; +} + +/* Single consumer */ +static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) +{ + queue->peeked = NULL; +} + +static inline int wg_queue_enqueue_per_device_and_peer( + struct crypt_queue *device_queue, struct prev_queue *peer_queue, + struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) +{ + int cpu; + + atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); + /* We first queue this up for the peer ingestion, but the consumer + * will wait for the state to change to CRYPTED or DEAD before. + */ + if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) + return -ENOSPC; + + /* Then we queue it up in the device queue, which consumes the + * packet as soon as it can. + */ + cpu = wg_cpumask_next_online(next_cpu); + if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) + return -EPIPE; + queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); + return 0; +} + +static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) +{ + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. + */ + struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); + + atomic_set_release(&PACKET_CB(skb)->state, state); + queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), + peer->device->packet_crypt_wq, &peer->transmit_packet_work); + wg_peer_put(peer); +} + +static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) +{ + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. + */ + struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); + + atomic_set_release(&PACKET_CB(skb)->state, state); + napi_schedule(&peer->napi); + wg_peer_put(peer); +} + +#ifdef DEBUG +bool wg_packet_counter_selftest(void); +#endif + +#endif /* _WG_QUEUEING_H */ diff --git a/net/wireguard/ratelimiter.c b/net/wireguard/ratelimiter.c new file mode 100644 index 000000000000..ecee41f528a5 --- /dev/null +++ b/net/wireguard/ratelimiter.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifdef COMPAT_CANNOT_DEPRECIATE_BH_RCU +/* We normally alias all non-_bh functions to the _bh ones in the compat layer, + * but that's not appropriate here, where we actually do want non-_bh ones. + */ +#undef synchronize_rcu +#define synchronize_rcu old_synchronize_rcu +#undef call_rcu +#define call_rcu old_call_rcu +#undef rcu_barrier +#define rcu_barrier old_rcu_barrier +#endif + +#include "ratelimiter.h" +#include +#include +#include +#include + +static struct kmem_cache *entry_cache; +static hsiphash_key_t key; +static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock"); +static DEFINE_MUTEX(init_lock); +static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */ +static atomic_t total_entries = ATOMIC_INIT(0); +static unsigned int max_entries, table_size; +static void wg_ratelimiter_gc_entries(struct work_struct *); +static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); +static struct hlist_head *table_v4; +#if IS_ENABLED(CONFIG_IPV6) +static struct hlist_head *table_v6; +#endif + +struct ratelimiter_entry { + u64 last_time_ns, tokens, ip; + void *net; + spinlock_t lock; + struct hlist_node hash; + struct rcu_head rcu; +}; + +enum { + PACKETS_PER_SECOND = 20, + PACKETS_BURSTABLE = 5, + PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND, + TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE +}; + +static void entry_free(struct rcu_head *rcu) +{ + kmem_cache_free(entry_cache, + container_of(rcu, struct ratelimiter_entry, rcu)); + atomic_dec(&total_entries); +} + +static void entry_uninit(struct ratelimiter_entry *entry) +{ + hlist_del_rcu(&entry->hash); + call_rcu(&entry->rcu, entry_free); +} + +/* Calling this function with a NULL work uninits all entries. */ +static void wg_ratelimiter_gc_entries(struct work_struct *work) +{ + const u64 now = ktime_get_coarse_boottime_ns(); + struct ratelimiter_entry *entry; + struct hlist_node *temp; + unsigned int i; + + for (i = 0; i < table_size; ++i) { + spin_lock(&table_lock); + hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) { + if (unlikely(!work) || + now - entry->last_time_ns > NSEC_PER_SEC) + entry_uninit(entry); + } +#if IS_ENABLED(CONFIG_IPV6) + hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) { + if (unlikely(!work) || + now - entry->last_time_ns > NSEC_PER_SEC) + entry_uninit(entry); + } +#endif + spin_unlock(&table_lock); + if (likely(work)) + cond_resched(); + } + if (likely(work)) + queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); +} + +bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net) +{ + /* We only take the bottom half of the net pointer, so that we can hash + * 3 words in the end. This way, siphash's len param fits into the final + * u32, and we don't incur an extra round. + */ + const u32 net_word = (unsigned long)net; + struct ratelimiter_entry *entry; + struct hlist_head *bucket; + u64 ip; + + if (skb->protocol == htons(ETH_P_IP)) { + ip = (u64 __force)ip_hdr(skb)->saddr; + bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & + (table_size - 1)]; + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) { + /* Only use 64 bits, so as to ratelimit the whole /64. */ + memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip)); + bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & + (table_size - 1)]; + } +#endif + else + return false; + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, bucket, hash) { + if (entry->net == net && entry->ip == ip) { + u64 now, tokens; + bool ret; + /* Quasi-inspired by nft_limit.c, but this is actually a + * slightly different algorithm. Namely, we incorporate + * the burst as part of the maximum tokens, rather than + * as part of the rate. + */ + spin_lock(&entry->lock); + now = ktime_get_coarse_boottime_ns(); + tokens = min_t(u64, TOKEN_MAX, + entry->tokens + now - + entry->last_time_ns); + entry->last_time_ns = now; + ret = tokens >= PACKET_COST; + entry->tokens = ret ? tokens - PACKET_COST : tokens; + spin_unlock(&entry->lock); + rcu_read_unlock(); + return ret; + } + } + rcu_read_unlock(); + + if (atomic_inc_return(&total_entries) > max_entries) + goto err_oom; + + entry = kmem_cache_alloc(entry_cache, GFP_KERNEL); + if (unlikely(!entry)) + goto err_oom; + + entry->net = net; + entry->ip = ip; + INIT_HLIST_NODE(&entry->hash); + spin_lock_init(&entry->lock); + entry->last_time_ns = ktime_get_coarse_boottime_ns(); + entry->tokens = TOKEN_MAX - PACKET_COST; + spin_lock(&table_lock); + hlist_add_head_rcu(&entry->hash, bucket); + spin_unlock(&table_lock); + return true; + +err_oom: + atomic_dec(&total_entries); + return false; +} + +int wg_ratelimiter_init(void) +{ + mutex_lock(&init_lock); + if (++init_refcnt != 1) + goto out; + + entry_cache = KMEM_CACHE(ratelimiter_entry, 0); + if (!entry_cache) + goto err; + + /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting, + * but what it shares in common is that it uses a massive hashtable. So, + * we borrow their wisdom about good table sizes on different systems + * dependent on RAM. This calculation here comes from there. + */ + table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 : + max_t(unsigned long, 16, roundup_pow_of_two( + (totalram_pages() << PAGE_SHIFT) / + (1U << 14) / sizeof(struct hlist_head))); + max_entries = table_size * 8; + + table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL); + if (unlikely(!table_v4)) + goto err_kmemcache; + +#if IS_ENABLED(CONFIG_IPV6) + table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL); + if (unlikely(!table_v6)) { + kvfree(table_v4); + goto err_kmemcache; + } +#endif + + queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); + get_random_bytes(&key, sizeof(key)); +out: + mutex_unlock(&init_lock); + return 0; + +err_kmemcache: + kmem_cache_destroy(entry_cache); +err: + --init_refcnt; + mutex_unlock(&init_lock); + return -ENOMEM; +} + +void wg_ratelimiter_uninit(void) +{ + mutex_lock(&init_lock); + if (!init_refcnt || --init_refcnt) + goto out; + + cancel_delayed_work_sync(&gc_work); + wg_ratelimiter_gc_entries(NULL); + rcu_barrier(); + kvfree(table_v4); +#if IS_ENABLED(CONFIG_IPV6) + kvfree(table_v6); +#endif + kmem_cache_destroy(entry_cache); +out: + mutex_unlock(&init_lock); +} + +#include "selftest/ratelimiter.c" diff --git a/net/wireguard/ratelimiter.h b/net/wireguard/ratelimiter.h new file mode 100644 index 000000000000..83067f71ea99 --- /dev/null +++ b/net/wireguard/ratelimiter.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_RATELIMITER_H +#define _WG_RATELIMITER_H + +#include + +int wg_ratelimiter_init(void); +void wg_ratelimiter_uninit(void); +bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net); + +#ifdef DEBUG +bool wg_ratelimiter_selftest(void); +#endif + +#endif /* _WG_RATELIMITER_H */ diff --git a/net/wireguard/receive.c b/net/wireguard/receive.c new file mode 100644 index 000000000000..214889edb48e --- /dev/null +++ b/net/wireguard/receive.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "queueing.h" +#include "device.h" +#include "peer.h" +#include "timers.h" +#include "messages.h" +#include "cookie.h" +#include "socket.h" + +#include +#include +#include +#include +#include + +/* Must be called with bh disabled. */ +static void update_rx_stats(struct wg_peer *peer, size_t len) +{ + struct pcpu_sw_netstats *tstats = + get_cpu_ptr(peer->device->dev->tstats); + + u64_stats_update_begin(&tstats->syncp); + ++tstats->rx_packets; + tstats->rx_bytes += len; + peer->rx_bytes += len; + u64_stats_update_end(&tstats->syncp); + put_cpu_ptr(tstats); +} + +#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) + +static size_t validate_header_len(struct sk_buff *skb) +{ + if (unlikely(skb->len < sizeof(struct message_header))) + return 0; + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && + skb->len >= MESSAGE_MINIMUM_LENGTH) + return sizeof(struct message_data); + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && + skb->len == sizeof(struct message_handshake_initiation)) + return sizeof(struct message_handshake_initiation); + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && + skb->len == sizeof(struct message_handshake_response)) + return sizeof(struct message_handshake_response); + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && + skb->len == sizeof(struct message_handshake_cookie)) + return sizeof(struct message_handshake_cookie); + return 0; +} + +static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) +{ + size_t data_offset, data_len, header_len; + struct udphdr *udp; + + if (unlikely(!wg_check_packet_protocol(skb) || + skb_transport_header(skb) < skb->head || + (skb_transport_header(skb) + sizeof(struct udphdr)) > + skb_tail_pointer(skb))) + return -EINVAL; /* Bogus IP header */ + udp = udp_hdr(skb); + data_offset = (u8 *)udp - skb->data; + if (unlikely(data_offset > U16_MAX || + data_offset + sizeof(struct udphdr) > skb->len)) + /* Packet has offset at impossible location or isn't big enough + * to have UDP fields. + */ + return -EINVAL; + data_len = ntohs(udp->len); + if (unlikely(data_len < sizeof(struct udphdr) || + data_len > skb->len - data_offset)) + /* UDP packet is reporting too small of a size or lying about + * its size. + */ + return -EINVAL; + data_len -= sizeof(struct udphdr); + data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; + if (unlikely(!pskb_may_pull(skb, + data_offset + sizeof(struct message_header)) || + pskb_trim(skb, data_len + data_offset) < 0)) + return -EINVAL; + skb_pull(skb, data_offset); + if (unlikely(skb->len != data_len)) + /* Final len does not agree with calculated len */ + return -EINVAL; + header_len = validate_header_len(skb); + if (unlikely(!header_len)) + return -EINVAL; + __skb_push(skb, data_offset); + if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) + return -EINVAL; + __skb_pull(skb, data_offset); + return 0; +} + +static void wg_receive_handshake_packet(struct wg_device *wg, + struct sk_buff *skb) +{ + enum cookie_mac_state mac_state; + struct wg_peer *peer = NULL; + /* This is global, so that our load calculation applies to the whole + * system. We don't care about races with it at all. + */ + static u64 last_under_load; + bool packet_needs_cookie; + bool under_load; + + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { + net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n", + wg->dev->name, skb); + wg_cookie_message_consume( + (struct message_handshake_cookie *)skb->data, wg); + return; + } + + under_load = atomic_read(&wg->handshake_queue_len) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; + if (under_load) { + last_under_load = ktime_get_coarse_boottime_ns(); + } else if (last_under_load) { + under_load = !wg_birthdate_has_expired(last_under_load, 1); + if (!under_load) + last_under_load = 0; + } + mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, + under_load); + if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || + (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) { + packet_needs_cookie = false; + } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) { + packet_needs_cookie = true; + } else { + net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n", + wg->dev->name, skb); + return; + } + + switch (SKB_TYPE_LE32(skb)) { + case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): { + struct message_handshake_initiation *message = + (struct message_handshake_initiation *)skb->data; + + if (packet_needs_cookie) { + wg_packet_send_handshake_cookie(wg, skb, + message->sender_index); + return; + } + peer = wg_noise_handshake_consume_initiation(message, wg); + if (unlikely(!peer)) { + net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n", + wg->dev->name, skb); + return; + } + wg_socket_set_peer_endpoint_from_skb(peer, skb); + net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n", + wg->dev->name, peer->internal_id, + &peer->endpoint.addr); + wg_packet_send_handshake_response(peer); + break; + } + case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): { + struct message_handshake_response *message = + (struct message_handshake_response *)skb->data; + + if (packet_needs_cookie) { + wg_packet_send_handshake_cookie(wg, skb, + message->sender_index); + return; + } + peer = wg_noise_handshake_consume_response(message, wg); + if (unlikely(!peer)) { + net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n", + wg->dev->name, skb); + return; + } + wg_socket_set_peer_endpoint_from_skb(peer, skb); + net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n", + wg->dev->name, peer->internal_id, + &peer->endpoint.addr); + if (wg_noise_handshake_begin_session(&peer->handshake, + &peer->keypairs)) { + wg_timers_session_derived(peer); + wg_timers_handshake_complete(peer); + /* Calling this function will either send any existing + * packets in the queue and not send a keepalive, which + * is the best case, Or, if there's nothing in the + * queue, it will send a keepalive, in order to give + * immediate confirmation of the session. + */ + wg_packet_send_keepalive(peer); + } + break; + } + } + + if (unlikely(!peer)) { + WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n"); + return; + } + + local_bh_disable(); + update_rx_stats(peer, skb->len); + local_bh_enable(); + + wg_timers_any_authenticated_packet_received(peer); + wg_timers_any_authenticated_packet_traversal(peer); + wg_peer_put(peer); +} + +void wg_packet_handshake_receive_worker(struct work_struct *work) +{ + struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; + struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); + struct sk_buff *skb; + + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { + wg_receive_handshake_packet(wg, skb); + dev_kfree_skb(skb); + atomic_dec(&wg->handshake_queue_len); + cond_resched(); + } +} + +static void keep_key_fresh(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + bool send; + + if (peer->sent_lastminute_handshake) + return; + + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); + send = keypair && READ_ONCE(keypair->sending.is_valid) && + keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, + REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); + rcu_read_unlock_bh(); + + if (unlikely(send)) { + peer->sent_lastminute_handshake = true; + wg_packet_send_queued_handshake_initiation(peer, false); + } +} + +static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair, + simd_context_t *simd_context) +{ + struct scatterlist sg[MAX_SKB_FRAGS + 8]; + struct sk_buff *trailer; + unsigned int offset; + int num_frags; + + if (unlikely(!keypair)) + return false; + + if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || + wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || + keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { + WRITE_ONCE(keypair->receiving.is_valid, false); + return false; + } + + PACKET_CB(skb)->nonce = + le64_to_cpu(((struct message_data *)skb->data)->counter); + + /* We ensure that the network header is part of the packet before we + * call skb_cow_data, so that there's no chance that data is removed + * from the skb, so that later we can extract the original endpoint. + */ + offset = skb->data - skb_network_header(skb); + skb_push(skb, offset); + num_frags = skb_cow_data(skb, 0, &trailer); + offset += sizeof(struct message_data); + skb_pull(skb, offset); + if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) + return false; + + sg_init_table(sg, num_frags); + if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) + return false; + + if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, + PACKET_CB(skb)->nonce, + keypair->receiving.key, + simd_context)) + return false; + + /* Another ugly situation of pushing and pulling the header so as to + * keep endpoint information intact. + */ + skb_push(skb, offset); + if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) + return false; + skb_pull(skb, offset); + + return true; +} + +/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ +static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) +{ + unsigned long index, index_current, top, i; + bool ret = false; + + spin_lock_bh(&counter->lock); + + if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || + their_counter >= REJECT_AFTER_MESSAGES)) + goto out; + + ++their_counter; + + if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < + counter->counter)) + goto out; + + index = their_counter >> ilog2(BITS_PER_LONG); + + if (likely(their_counter > counter->counter)) { + index_current = counter->counter >> ilog2(BITS_PER_LONG); + top = min_t(unsigned long, index - index_current, + COUNTER_BITS_TOTAL / BITS_PER_LONG); + for (i = 1; i <= top; ++i) + counter->backtrack[(i + index_current) & + ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; + counter->counter = their_counter; + } + + index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; + ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), + &counter->backtrack[index]); + +out: + spin_unlock_bh(&counter->lock); + return ret; +} + +#include "selftest/counter.c" + +static void wg_packet_consume_data_done(struct wg_peer *peer, + struct sk_buff *skb, + struct endpoint *endpoint) +{ + struct net_device *dev = peer->device->dev; + unsigned int len, len_before_trim; + struct wg_peer *routed_peer; + + wg_socket_set_peer_endpoint(peer, endpoint); + + if (unlikely(wg_noise_received_with_keypair(&peer->keypairs, + PACKET_CB(skb)->keypair))) { + wg_timers_handshake_complete(peer); + wg_packet_send_staged_packets(peer); + } + + keep_key_fresh(peer); + + wg_timers_any_authenticated_packet_received(peer); + wg_timers_any_authenticated_packet_traversal(peer); + + /* A packet with length 0 is a keepalive packet */ + if (unlikely(!skb->len)) { + update_rx_stats(peer, message_data_len(0)); + net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n", + dev->name, peer->internal_id, + &peer->endpoint.addr); + goto packet_processed; + } + + wg_timers_data_received(peer); + + if (unlikely(skb_network_header(skb) < skb->head)) + goto dishonest_packet_size; + if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && + (ip_hdr(skb)->version == 4 || + (ip_hdr(skb)->version == 6 && + pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) + goto dishonest_packet_type; + + skb->dev = dev; + /* We've already verified the Poly1305 auth tag, which means this packet + * was not modified in transit. We can therefore tell the networking + * stack that all checksums of every layer of encapsulation have already + * been checked "by the hardware" and therefore is unnecessary to check + * again in software. + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifndef COMPAT_CANNOT_USE_CSUM_LEVEL + skb->csum_level = ~0; /* All levels */ +#endif + skb->protocol = ip_tunnel_parse_protocol(skb); + if (skb->protocol == htons(ETH_P_IP)) { + len = ntohs(ip_hdr(skb)->tot_len); + if (unlikely(len < sizeof(struct iphdr))) + goto dishonest_packet_size; + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + len = ntohs(ipv6_hdr(skb)->payload_len) + + sizeof(struct ipv6hdr); + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); + } else { + goto dishonest_packet_type; + } + + if (unlikely(len > skb->len)) + goto dishonest_packet_size; + len_before_trim = skb->len; + if (unlikely(pskb_trim(skb, len))) + goto packet_processed; + + routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips, + skb); + wg_peer_put(routed_peer); /* We don't need the extra reference. */ + + if (unlikely(routed_peer != peer)) + goto dishonest_packet_peer; + + napi_gro_receive(&peer->napi, skb); + update_rx_stats(peer, message_data_len(len_before_trim)); + return; + +dishonest_packet_peer: + net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", + dev->name, skb, peer->internal_id, + &peer->endpoint.addr); + ++dev->stats.rx_errors; + ++dev->stats.rx_frame_errors; + goto packet_processed; +dishonest_packet_type: + net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", + dev->name, peer->internal_id, &peer->endpoint.addr); + ++dev->stats.rx_errors; + ++dev->stats.rx_frame_errors; + goto packet_processed; +dishonest_packet_size: + net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", + dev->name, peer->internal_id, &peer->endpoint.addr); + ++dev->stats.rx_errors; + ++dev->stats.rx_length_errors; + goto packet_processed; +packet_processed: + dev_kfree_skb(skb); +} + +int wg_packet_rx_poll(struct napi_struct *napi, int budget) +{ + struct wg_peer *peer = container_of(napi, struct wg_peer, napi); + struct noise_keypair *keypair; + struct endpoint endpoint; + enum packet_state state; + struct sk_buff *skb; + int work_done = 0; + bool free; + + if (unlikely(budget <= 0)) + return 0; + + while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != + PACKET_STATE_UNCRYPTED) { + wg_prev_queue_drop_peeked(&peer->rx_queue); + keypair = PACKET_CB(skb)->keypair; + free = true; + + if (unlikely(state != PACKET_STATE_CRYPTED)) + goto next; + + if (unlikely(!counter_validate(&keypair->receiving_counter, + PACKET_CB(skb)->nonce))) { + net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", + peer->device->dev->name, + PACKET_CB(skb)->nonce, + keypair->receiving_counter.counter); + goto next; + } + + if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) + goto next; + + wg_reset_packet(skb, false); + wg_packet_consume_data_done(peer, skb, &endpoint); + free = false; + +next: + wg_noise_keypair_put(keypair, false); + wg_peer_put(peer); + if (unlikely(free)) + dev_kfree_skb(skb); + + if (++work_done >= budget) + break; + } + + if (work_done < budget) + napi_complete_done(napi, work_done); + + return work_done; +} + +void wg_packet_decrypt_worker(struct work_struct *work) +{ + struct crypt_queue *queue = container_of(work, struct multicore_worker, + work)->ptr; + simd_context_t simd_context; + struct sk_buff *skb; + + simd_get(&simd_context); + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { + enum packet_state state = + likely(decrypt_packet(skb, PACKET_CB(skb)->keypair, + &simd_context)) ? + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; + wg_queue_enqueue_per_peer_rx(skb, state); + simd_relax(&simd_context); + } + + simd_put(&simd_context); +} + +static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) +{ + __le32 idx = ((struct message_data *)skb->data)->key_idx; + struct wg_peer *peer = NULL; + int ret; + + rcu_read_lock_bh(); + PACKET_CB(skb)->keypair = + (struct noise_keypair *)wg_index_hashtable_lookup( + wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, + &peer); + if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) + goto err_keypair; + + if (unlikely(READ_ONCE(peer->is_dead))) + goto err; + + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, + wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); + if (unlikely(ret == -EPIPE)) + wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); + if (likely(!ret || ret == -EPIPE)) { + rcu_read_unlock_bh(); + return; + } +err: + wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); +err_keypair: + rcu_read_unlock_bh(); + wg_peer_put(peer); + dev_kfree_skb(skb); +} + +void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) +{ + if (unlikely(prepare_skb_header(skb, wg) < 0)) + goto err; + switch (SKB_TYPE_LE32(skb)) { + case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): + case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): + case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { + int cpu, ret = -EBUSY; + + if (unlikely(!rng_is_initialized())) + goto drop; + if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { + if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { + ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); + spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); + } + } else + ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); + if (ret) { + drop: + net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", + wg->dev->name, skb); + goto err; + } + atomic_inc(&wg->handshake_queue_len); + cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); + /* Queues up a call to packet_process_queued_handshake_packets(skb): */ + queue_work_on(cpu, wg->handshake_receive_wq, + &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); + break; + } + case cpu_to_le32(MESSAGE_DATA): + PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); + wg_packet_consume_data(wg, skb); + break; + default: + WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); + goto err; + } + return; + +err: + dev_kfree_skb(skb); +} diff --git a/net/wireguard/selftest/allowedips.c b/net/wireguard/selftest/allowedips.c new file mode 100644 index 000000000000..e173204ae7d7 --- /dev/null +++ b/net/wireguard/selftest/allowedips.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This contains some basic static unit tests for the allowedips data structure. + * It also has two additional modes that are disabled and meant to be used by + * folks directly playing with this file. If you define the macro + * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in + * memory, it will be printed out as KERN_DEBUG in a format that can be passed + * to graphviz (the dot command) to visualize it. If you define the macro + * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of + * randomized tests done against a trivial implementation, which may take + * upwards of a half-hour to complete. There's no set of users who should be + * enabling these, and the only developers that should go anywhere near these + * nobs are the ones who are reading this comment. + */ + +#ifdef DEBUG + +#include + +static __init void print_node(struct allowedips_node *node, u8 bits) +{ + char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; + char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; + u8 ip1[16], ip2[16], cidr1, cidr2; + char *style = "dotted"; + u32 color = 0; + + if (node == NULL) + return; + if (bits == 32) { + fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; + fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; + } else if (bits == 128) { + fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; + fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; + } + if (node->peer) { + hsiphash_key_t key = { { 0 } }; + + memcpy(&key, &node->peer, sizeof(node->peer)); + color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 | + hsiphash_1u32(0xbabecafe, &key) % 200 << 8 | + hsiphash_1u32(0xabad1dea, &key) % 200; + style = "bold"; + } + wg_allowedips_read_node(node, ip1, &cidr1); + printk(fmt_declaration, ip1, cidr1, style, color); + if (node->bit[0]) { + wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); + printk(fmt_connection, ip1, cidr1, ip2, cidr2); + } + if (node->bit[1]) { + wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); + printk(fmt_connection, ip1, cidr1, ip2, cidr2); + } + if (node->bit[0]) + print_node(rcu_dereference_raw(node->bit[0]), bits); + if (node->bit[1]) + print_node(rcu_dereference_raw(node->bit[1]), bits); +} + +static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) +{ + printk(KERN_DEBUG "digraph trie {\n"); + print_node(rcu_dereference_raw(top), bits); + printk(KERN_DEBUG "}\n"); +} + +enum { + NUM_PEERS = 2000, + NUM_RAND_ROUTES = 400, + NUM_MUTATED_ROUTES = 100, + NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30 +}; + +struct horrible_allowedips { + struct hlist_head head; +}; + +struct horrible_allowedips_node { + struct hlist_node table; + union nf_inet_addr ip; + union nf_inet_addr mask; + u8 ip_version; + void *value; +}; + +static __init void horrible_allowedips_init(struct horrible_allowedips *table) +{ + INIT_HLIST_HEAD(&table->head); +} + +static __init void horrible_allowedips_free(struct horrible_allowedips *table) +{ + struct horrible_allowedips_node *node; + struct hlist_node *h; + + hlist_for_each_entry_safe(node, h, &table->head, table) { + hlist_del(&node->table); + kfree(node); + } +} + +static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) +{ + union nf_inet_addr mask; + + memset(&mask, 0, sizeof(mask)); + memset(&mask.all, 0xff, cidr / 8); + if (cidr % 32) + mask.all[cidr / 32] = (__force u32)htonl( + (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); + return mask; +} + +static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet) +{ + return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) + + hweight32(subnet.all[2]) + hweight32(subnet.all[3]); +} + +static __init inline void +horrible_mask_self(struct horrible_allowedips_node *node) +{ + if (node->ip_version == 4) { + node->ip.ip &= node->mask.ip; + } else if (node->ip_version == 6) { + node->ip.ip6[0] &= node->mask.ip6[0]; + node->ip.ip6[1] &= node->mask.ip6[1]; + node->ip.ip6[2] &= node->mask.ip6[2]; + node->ip.ip6[3] &= node->mask.ip6[3]; + } +} + +static __init inline bool +horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) +{ + return (ip->s_addr & node->mask.ip) == node->ip.ip; +} + +static __init inline bool +horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) +{ + return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && + (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && + (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && + (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; +} + +static __init void +horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) +{ + struct horrible_allowedips_node *other = NULL, *where = NULL; + u8 my_cidr = horrible_mask_to_cidr(node->mask); + + hlist_for_each_entry(other, &table->head, table) { + if (other->ip_version == node->ip_version && + !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && + !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { + other->value = node->value; + kfree(node); + return; + } + } + hlist_for_each_entry(other, &table->head, table) { + where = other; + if (horrible_mask_to_cidr(other->mask) <= my_cidr) + break; + } + if (!other && !where) + hlist_add_head(&node->table, &table->head); + else if (!other) + hlist_add_behind(&node->table, &where->table); + else + hlist_add_before(&node->table, &where->table); +} + +static __init int +horrible_allowedips_insert_v4(struct horrible_allowedips *table, + struct in_addr *ip, u8 cidr, void *value) +{ + struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (unlikely(!node)) + return -ENOMEM; + node->ip.in = *ip; + node->mask = horrible_cidr_to_mask(cidr); + node->ip_version = 4; + node->value = value; + horrible_mask_self(node); + horrible_insert_ordered(table, node); + return 0; +} + +static __init int +horrible_allowedips_insert_v6(struct horrible_allowedips *table, + struct in6_addr *ip, u8 cidr, void *value) +{ + struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (unlikely(!node)) + return -ENOMEM; + node->ip.in6 = *ip; + node->mask = horrible_cidr_to_mask(cidr); + node->ip_version = 6; + node->value = value; + horrible_mask_self(node); + horrible_insert_ordered(table, node); + return 0; +} + +static __init void * +horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) +{ + struct horrible_allowedips_node *node; + + hlist_for_each_entry(node, &table->head, table) { + if (node->ip_version == 4 && horrible_match_v4(node, ip)) + return node->value; + } + return NULL; +} + +static __init void * +horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) +{ + struct horrible_allowedips_node *node; + + hlist_for_each_entry(node, &table->head, table) { + if (node->ip_version == 6 && horrible_match_v6(node, ip)) + return node->value; + } + return NULL; +} + + +static __init void +horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) +{ + struct horrible_allowedips_node *node; + struct hlist_node *h; + + hlist_for_each_entry_safe(node, h, &table->head, table) { + if (node->value != value) + continue; + hlist_del(&node->table); + kfree(node); + } + +} + +static __init bool randomized_test(void) +{ + unsigned int i, j, k, mutate_amount, cidr; + u8 ip[16], mutate_mask[16], mutated[16]; + struct wg_peer **peers, *peer; + struct horrible_allowedips h; + DEFINE_MUTEX(mutex); + struct allowedips t; + bool ret = false; + + mutex_init(&mutex); + + wg_allowedips_init(&t); + horrible_allowedips_init(&h); + + peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL); + if (unlikely(!peers)) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free; + } + for (i = 0; i < NUM_PEERS; ++i) { + peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL); + if (unlikely(!peers[i])) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free; + } + kref_init(&peers[i]->refcount); + INIT_LIST_HEAD(&peers[i]->allowedips_list); + } + + mutex_lock(&mutex); + + for (i = 0; i < NUM_RAND_ROUTES; ++i) { + prandom_bytes(ip, 4); + cidr = prandom_u32_max(32) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, + peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip, + cidr, peer) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { + memcpy(mutated, ip, 4); + prandom_bytes(mutate_mask, 4); + mutate_amount = prandom_u32_max(32); + for (k = 0; k < mutate_amount / 8; ++k) + mutate_mask[k] = 0xff; + mutate_mask[k] = 0xff + << ((8 - (mutate_amount % 8)) % 8); + for (; k < 4; ++k) + mutate_mask[k] = 0; + for (k = 0; k < 4; ++k) + mutated[k] = (mutated[k] & mutate_mask[k]) | + (~mutate_mask[k] & + prandom_u32_max(256)); + cidr = prandom_u32_max(32) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v4(&t, + (struct in_addr *)mutated, + cidr, peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v4(&h, + (struct in_addr *)mutated, cidr, peer)) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + } + } + + for (i = 0; i < NUM_RAND_ROUTES; ++i) { + prandom_bytes(ip, 16); + cidr = prandom_u32_max(128) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, + peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip, + cidr, peer) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { + memcpy(mutated, ip, 16); + prandom_bytes(mutate_mask, 16); + mutate_amount = prandom_u32_max(128); + for (k = 0; k < mutate_amount / 8; ++k) + mutate_mask[k] = 0xff; + mutate_mask[k] = 0xff + << ((8 - (mutate_amount % 8)) % 8); + for (; k < 4; ++k) + mutate_mask[k] = 0; + for (k = 0; k < 4; ++k) + mutated[k] = (mutated[k] & mutate_mask[k]) | + (~mutate_mask[k] & + prandom_u32_max(256)); + cidr = prandom_u32_max(128) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v6(&t, + (struct in6_addr *)mutated, + cidr, peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v6( + &h, (struct in6_addr *)mutated, cidr, + peer)) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + } + } + + mutex_unlock(&mutex); + + if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { + print_tree(t.root4, 32); + print_tree(t.root6, 128); + } + + for (j = 0;; ++j) { + for (i = 0; i < NUM_QUERIES; ++i) { + prandom_bytes(ip, 4); + if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { + horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); + pr_err("allowedips random v4 self-test: FAIL\n"); + goto free; + } + prandom_bytes(ip, 16); + if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { + pr_err("allowedips random v6 self-test: FAIL\n"); + goto free; + } + } + if (j >= NUM_PEERS) + break; + mutex_lock(&mutex); + wg_allowedips_remove_by_peer(&t, peers[j], &mutex); + mutex_unlock(&mutex); + horrible_allowedips_remove_by_value(&h, peers[j]); + } + + if (t.root4 || t.root6) { + pr_err("allowedips random self-test removal: FAIL\n"); + goto free; + } + + ret = true; + +free: + mutex_lock(&mutex); +free_locked: + wg_allowedips_free(&t, &mutex); + mutex_unlock(&mutex); + horrible_allowedips_free(&h); + if (peers) { + for (i = 0; i < NUM_PEERS; ++i) + kfree(peers[i]); + } + kfree(peers); + return ret; +} + +static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d) +{ + static struct in_addr ip; + u8 *split = (u8 *)&ip; + + split[0] = a; + split[1] = b; + split[2] = c; + split[3] = d; + return &ip; +} + +static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d) +{ + static struct in6_addr ip; + __be32 *split = (__be32 *)&ip; + + split[0] = cpu_to_be32(a); + split[1] = cpu_to_be32(b); + split[2] = cpu_to_be32(c); + split[3] = cpu_to_be32(d); + return &ip; +} + +static __init struct wg_peer *init_peer(void) +{ + struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL); + + if (!peer) + return NULL; + kref_init(&peer->refcount); + INIT_LIST_HEAD(&peer->allowedips_list); + return peer; +} + +#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ + wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \ + cidr, mem, &mutex) + +#define maybe_fail() do { \ + ++i; \ + if (!_s) { \ + pr_info("allowedips self-test %zu: FAIL\n", i); \ + success = false; \ + } \ + } while (0) + +#define test(version, mem, ipa, ipb, ipc, ipd) do { \ + bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ + ip##version(ipa, ipb, ipc, ipd)) == (mem); \ + maybe_fail(); \ + } while (0) + +#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \ + bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ + ip##version(ipa, ipb, ipc, ipd)) != (mem); \ + maybe_fail(); \ + } while (0) + +#define test_boolean(cond) do { \ + bool _s = (cond); \ + maybe_fail(); \ + } while (0) + +bool __init wg_allowedips_selftest(void) +{ + bool found_a = false, found_b = false, found_c = false, found_d = false, + found_e = false, found_other = false; + struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(), + *d = init_peer(), *e = init_peer(), *f = init_peer(), + *g = init_peer(), *h = init_peer(); + struct allowedips_node *iter_node; + bool success = false; + struct allowedips t; + DEFINE_MUTEX(mutex); + struct in6_addr ip; + size_t i = 0, count = 0; + __be64 part; + + mutex_init(&mutex); + mutex_lock(&mutex); + wg_allowedips_init(&t); + + if (!a || !b || !c || !d || !e || !f || !g || !h) { + pr_err("allowedips self-test malloc: FAIL\n"); + goto free; + } + + insert(4, a, 192, 168, 4, 0, 24); + insert(4, b, 192, 168, 4, 4, 32); + insert(4, c, 192, 168, 0, 0, 16); + insert(4, d, 192, 95, 5, 64, 27); + /* replaces previous entry, and maskself is required */ + insert(4, c, 192, 95, 5, 65, 27); + insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); + insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); + insert(4, e, 0, 0, 0, 0, 0); + insert(6, e, 0, 0, 0, 0, 0); + /* replaces previous entry */ + insert(6, f, 0, 0, 0, 0, 0); + insert(6, g, 0x24046800, 0, 0, 0, 32); + /* maskself is required */ + insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64); + insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128); + insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128); + insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98); + insert(4, g, 64, 15, 112, 0, 20); + /* maskself is required */ + insert(4, h, 64, 15, 123, 211, 25); + insert(4, a, 10, 0, 0, 0, 25); + insert(4, b, 10, 0, 0, 128, 25); + insert(4, a, 10, 1, 0, 0, 30); + insert(4, b, 10, 1, 0, 4, 30); + insert(4, c, 10, 1, 0, 8, 29); + insert(4, d, 10, 1, 0, 16, 29); + + if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { + print_tree(t.root4, 32); + print_tree(t.root6, 128); + } + + success = true; + + test(4, a, 192, 168, 4, 20); + test(4, a, 192, 168, 4, 0); + test(4, b, 192, 168, 4, 4); + test(4, c, 192, 168, 200, 182); + test(4, c, 192, 95, 5, 68); + test(4, e, 192, 95, 5, 96); + test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543); + test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee); + test(6, f, 0x26075300, 0x60006b01, 0, 0); + test(6, g, 0x24046800, 0x40040806, 0, 0x1006); + test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678); + test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678); + test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678); + test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678); + test(6, h, 0x24046800, 0x40040800, 0, 0); + test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010); + test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef); + test(4, g, 64, 15, 116, 26); + test(4, g, 64, 15, 127, 3); + test(4, g, 64, 15, 123, 1); + test(4, h, 64, 15, 123, 128); + test(4, h, 64, 15, 123, 129); + test(4, a, 10, 0, 0, 52); + test(4, b, 10, 0, 0, 220); + test(4, a, 10, 1, 0, 2); + test(4, b, 10, 1, 0, 6); + test(4, c, 10, 1, 0, 10); + test(4, d, 10, 1, 0, 20); + + insert(4, a, 1, 0, 0, 0, 32); + insert(4, a, 64, 0, 0, 0, 32); + insert(4, a, 128, 0, 0, 0, 32); + insert(4, a, 192, 0, 0, 0, 32); + insert(4, a, 255, 0, 0, 0, 32); + wg_allowedips_remove_by_peer(&t, a, &mutex); + test_negative(4, a, 1, 0, 0, 0); + test_negative(4, a, 64, 0, 0, 0); + test_negative(4, a, 128, 0, 0, 0); + test_negative(4, a, 192, 0, 0, 0); + test_negative(4, a, 255, 0, 0, 0); + + wg_allowedips_free(&t, &mutex); + wg_allowedips_init(&t); + insert(4, a, 192, 168, 0, 0, 16); + insert(4, a, 192, 168, 0, 0, 24); + wg_allowedips_remove_by_peer(&t, a, &mutex); + test_negative(4, a, 192, 168, 0, 1); + + /* These will hit the WARN_ON(len >= 128) in free_node if something + * goes wrong. + */ + for (i = 0; i < 128; ++i) { + part = cpu_to_be64(~(1LLU << (i % 64))); + memset(&ip, 0xff, 16); + memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); + } + + wg_allowedips_free(&t, &mutex); + + wg_allowedips_init(&t); + insert(4, a, 192, 95, 5, 93, 27); + insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); + insert(4, a, 10, 1, 0, 20, 29); + insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83); + insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21); + list_for_each_entry(iter_node, &a->allowedips_list, peer_list) { + u8 cidr, ip[16] __aligned(__alignof(u64)); + int family = wg_allowedips_read_node(iter_node, ip, &cidr); + + count++; + + if (cidr == 27 && family == AF_INET && + !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr))) + found_a = true; + else if (cidr == 128 && family == AF_INET6 && + !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543), + sizeof(struct in6_addr))) + found_b = true; + else if (cidr == 29 && family == AF_INET && + !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr))) + found_c = true; + else if (cidr == 83 && family == AF_INET6 && + !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0), + sizeof(struct in6_addr))) + found_d = true; + else if (cidr == 21 && family == AF_INET6 && + !memcmp(ip, ip6(0x26075000, 0, 0, 0), + sizeof(struct in6_addr))) + found_e = true; + else + found_other = true; + } + test_boolean(count == 5); + test_boolean(found_a); + test_boolean(found_b); + test_boolean(found_c); + test_boolean(found_d); + test_boolean(found_e); + test_boolean(!found_other); + + if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success) + success = randomized_test(); + + if (success) + pr_info("allowedips self-tests: pass\n"); + +free: + wg_allowedips_free(&t, &mutex); + kfree(a); + kfree(b); + kfree(c); + kfree(d); + kfree(e); + kfree(f); + kfree(g); + kfree(h); + mutex_unlock(&mutex); + + return success; +} + +#undef test_negative +#undef test +#undef remove +#undef insert +#undef init_peer + +#endif diff --git a/net/wireguard/selftest/counter.c b/net/wireguard/selftest/counter.c new file mode 100644 index 000000000000..ec3c156bf91b --- /dev/null +++ b/net/wireguard/selftest/counter.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifdef DEBUG +bool __init wg_packet_counter_selftest(void) +{ + struct noise_replay_counter *counter; + unsigned int test_num = 0, i; + bool success = true; + + counter = kmalloc(sizeof(*counter), GFP_KERNEL); + if (unlikely(!counter)) { + pr_err("nonce counter self-test malloc: FAIL\n"); + return false; + } + +#define T_INIT do { \ + memset(counter, 0, sizeof(*counter)); \ + spin_lock_init(&counter->lock); \ + } while (0) +#define T_LIM (COUNTER_WINDOW_SIZE + 1) +#define T(n, v) do { \ + ++test_num; \ + if (counter_validate(counter, n) != (v)) { \ + pr_err("nonce counter self-test %u: FAIL\n", \ + test_num); \ + success = false; \ + } \ + } while (0) + + T_INIT; + /* 1 */ T(0, true); + /* 2 */ T(1, true); + /* 3 */ T(1, false); + /* 4 */ T(9, true); + /* 5 */ T(8, true); + /* 6 */ T(7, true); + /* 7 */ T(7, false); + /* 8 */ T(T_LIM, true); + /* 9 */ T(T_LIM - 1, true); + /* 10 */ T(T_LIM - 1, false); + /* 11 */ T(T_LIM - 2, true); + /* 12 */ T(2, true); + /* 13 */ T(2, false); + /* 14 */ T(T_LIM + 16, true); + /* 15 */ T(3, false); + /* 16 */ T(T_LIM + 16, false); + /* 17 */ T(T_LIM * 4, true); + /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true); + /* 19 */ T(10, false); + /* 20 */ T(T_LIM * 4 - T_LIM, false); + /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false); + /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true); + /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false); + /* 24 */ T(0, false); + /* 25 */ T(REJECT_AFTER_MESSAGES, false); + /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true); + /* 27 */ T(REJECT_AFTER_MESSAGES, false); + /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false); + /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true); + /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false); + /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false); + /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false); + /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true); + /* 34 */ T(0, false); + + T_INIT; + for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i) + T(i, true); + T(0, true); + T(0, false); + + T_INIT; + for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i) + T(i, true); + T(1, true); + T(0, false); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;) + T(i, true); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;) + T(i, true); + T(0, false); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) + T(i, true); + T(COUNTER_WINDOW_SIZE + 1, true); + T(0, false); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) + T(i, true); + T(0, true); + T(COUNTER_WINDOW_SIZE + 1, true); + +#undef T +#undef T_LIM +#undef T_INIT + + if (success) + pr_info("nonce counter self-tests: pass\n"); + kfree(counter); + return success; +} +#endif diff --git a/net/wireguard/selftest/ratelimiter.c b/net/wireguard/selftest/ratelimiter.c new file mode 100644 index 000000000000..007cd4457c5f --- /dev/null +++ b/net/wireguard/selftest/ratelimiter.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifdef DEBUG + +#include + +static const struct { + bool result; + unsigned int msec_to_sleep_before; +} expected_results[] __initconst = { + [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, + [PACKETS_BURSTABLE] = { false, 0 }, + [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, + [PACKETS_BURSTABLE + 2] = { false, 0 }, + [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, + [PACKETS_BURSTABLE + 4] = { true, 0 }, + [PACKETS_BURSTABLE + 5] = { false, 0 } +}; + +static __init unsigned int maximum_jiffies_at_index(int index) +{ + unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; + int i; + + for (i = 0; i <= index; ++i) + total_msecs += expected_results[i].msec_to_sleep_before; + return msecs_to_jiffies(total_msecs); +} + +static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, + struct sk_buff *skb6, struct ipv6hdr *hdr6, + int *test) +{ + unsigned long loop_start_time; + int i; + + wg_ratelimiter_gc_entries(NULL); + rcu_barrier(); + loop_start_time = jiffies; + + for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { + if (expected_results[i].msec_to_sleep_before) + msleep(expected_results[i].msec_to_sleep_before); + + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (wg_ratelimiter_allow(skb4, &init_net) != + expected_results[i].result) + return -EXFULL; + ++(*test); + + hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1); + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (!wg_ratelimiter_allow(skb4, &init_net)) + return -EXFULL; + ++(*test); + + hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1); + +#if IS_ENABLED(CONFIG_IPV6) + hdr6->saddr.in6_u.u6_addr32[2] = htonl(i); + hdr6->saddr.in6_u.u6_addr32[3] = htonl(i); + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (wg_ratelimiter_allow(skb6, &init_net) != + expected_results[i].result) + return -EXFULL; + ++(*test); + + hdr6->saddr.in6_u.u6_addr32[0] = + htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1); + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (!wg_ratelimiter_allow(skb6, &init_net)) + return -EXFULL; + ++(*test); + + hdr6->saddr.in6_u.u6_addr32[0] = + htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1); + + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; +#endif + } + return 0; +} + +static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4, + int *test) +{ + int i; + + wg_ratelimiter_gc_entries(NULL); + rcu_barrier(); + + if (atomic_read(&total_entries)) + return -EXFULL; + ++(*test); + + for (i = 0; i <= max_entries; ++i) { + hdr4->saddr = htonl(i); + if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries)) + return -EXFULL; + ++(*test); + } + return 0; +} + +bool __init wg_ratelimiter_selftest(void) +{ + enum { TRIALS_BEFORE_GIVING_UP = 5000 }; + bool success = false; + int test = 0, trials; + struct sk_buff *skb4, *skb6 = NULL; + struct iphdr *hdr4; + struct ipv6hdr *hdr6 = NULL; + + if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) + return true; + + BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); + + if (wg_ratelimiter_init()) + goto out; + ++test; + if (wg_ratelimiter_init()) { + wg_ratelimiter_uninit(); + goto out; + } + ++test; + if (wg_ratelimiter_init()) { + wg_ratelimiter_uninit(); + wg_ratelimiter_uninit(); + goto out; + } + ++test; + + skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL); + if (unlikely(!skb4)) + goto err_nofree; + skb4->protocol = htons(ETH_P_IP); + hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4)); + hdr4->saddr = htonl(8182); + skb_reset_network_header(skb4); + ++test; + +#if IS_ENABLED(CONFIG_IPV6) + skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL); + if (unlikely(!skb6)) { + kfree_skb(skb4); + goto err_nofree; + } + skb6->protocol = htons(ETH_P_IPV6); + hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6)); + hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212); + hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188); + skb_reset_network_header(skb6); + ++test; +#endif + + for (trials = TRIALS_BEFORE_GIVING_UP;;) { + int test_count = 0, ret; + + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); + if (ret == -ETIMEDOUT) { + if (!trials--) { + test += test_count; + goto err; + } + msleep(500); + continue; + } else if (ret < 0) { + test += test_count; + goto err; + } else { + test += test_count; + break; + } + } + + for (trials = TRIALS_BEFORE_GIVING_UP;;) { + int test_count = 0; + + if (capacity_test(skb4, hdr4, &test_count) < 0) { + if (!trials--) { + test += test_count; + goto err; + } + msleep(50); + continue; + } + test += test_count; + break; + } + + success = true; + +err: + kfree_skb(skb4); +#if IS_ENABLED(CONFIG_IPV6) + kfree_skb(skb6); +#endif +err_nofree: + wg_ratelimiter_uninit(); + wg_ratelimiter_uninit(); + wg_ratelimiter_uninit(); + /* Uninit one extra time to check underflow detection. */ + wg_ratelimiter_uninit(); +out: + if (success) + pr_info("ratelimiter self-tests: pass\n"); + else + pr_err("ratelimiter self-test %d: FAIL\n", test); + + return success; +} +#endif diff --git a/net/wireguard/send.c b/net/wireguard/send.c new file mode 100644 index 000000000000..55bb0c9313d7 --- /dev/null +++ b/net/wireguard/send.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "queueing.h" +#include "timers.h" +#include "device.h" +#include "peer.h" +#include "socket.h" +#include "messages.h" +#include "cookie.h" + +#include +#include +#include +#include +#include +#include +#include + +static void wg_packet_send_handshake_initiation(struct wg_peer *peer) +{ + struct message_handshake_initiation packet; + + if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), + REKEY_TIMEOUT)) + return; /* This function is rate limited. */ + + atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); + net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + + if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) { + wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); + wg_timers_any_authenticated_packet_traversal(peer); + wg_timers_any_authenticated_packet_sent(peer); + atomic64_set(&peer->last_sent_handshake, + ktime_get_coarse_boottime_ns()); + wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet), + HANDSHAKE_DSCP); + wg_timers_handshake_initiated(peer); + } +} + +void wg_packet_handshake_send_worker(struct work_struct *work) +{ + struct wg_peer *peer = container_of(work, struct wg_peer, + transmit_handshake_work); + + wg_packet_send_handshake_initiation(peer); + wg_peer_put(peer); +} + +void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, + bool is_retry) +{ + if (!is_retry) + peer->timer_handshake_attempts = 0; + + rcu_read_lock_bh(); + /* We check last_sent_handshake here in addition to the actual function + * we're queueing up, so that we don't queue things if not strictly + * necessary: + */ + if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), + REKEY_TIMEOUT) || + unlikely(READ_ONCE(peer->is_dead))) + goto out; + + wg_peer_get(peer); + /* Queues up calling packet_send_queued_handshakes(peer), where we do a + * peer_put(peer) after: + */ + if (!queue_work(peer->device->handshake_send_wq, + &peer->transmit_handshake_work)) + /* If the work was already queued, we want to drop the + * extra reference: + */ + wg_peer_put(peer); +out: + rcu_read_unlock_bh(); +} + +void wg_packet_send_handshake_response(struct wg_peer *peer) +{ + struct message_handshake_response packet; + + atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); + net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + + if (wg_noise_handshake_create_response(&packet, &peer->handshake)) { + wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); + if (wg_noise_handshake_begin_session(&peer->handshake, + &peer->keypairs)) { + wg_timers_session_derived(peer); + wg_timers_any_authenticated_packet_traversal(peer); + wg_timers_any_authenticated_packet_sent(peer); + atomic64_set(&peer->last_sent_handshake, + ktime_get_coarse_boottime_ns()); + wg_socket_send_buffer_to_peer(peer, &packet, + sizeof(packet), + HANDSHAKE_DSCP); + } + } +} + +void wg_packet_send_handshake_cookie(struct wg_device *wg, + struct sk_buff *initiating_skb, + __le32 sender_index) +{ + struct message_handshake_cookie packet; + + net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", + wg->dev->name, initiating_skb); + wg_cookie_message_create(&packet, initiating_skb, sender_index, + &wg->cookie_checker); + wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, + sizeof(packet)); +} + +static void keep_key_fresh(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + bool send; + + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); + send = keypair && READ_ONCE(keypair->sending.is_valid) && + (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || + (keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); + rcu_read_unlock_bh(); + + if (unlikely(send)) + wg_packet_send_queued_handshake_initiation(peer, false); +} + +static unsigned int calculate_skb_padding(struct sk_buff *skb) +{ + unsigned int padded_size, last_unit = skb->len; + + if (unlikely(!PACKET_CB(skb)->mtu)) + return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; + + /* We do this modulo business with the MTU, just in case the networking + * layer gives us a packet that's bigger than the MTU. In that case, we + * wouldn't want the final subtraction to overflow in the case of the + * padded_size being clamped. Fortunately, that's very rarely the case, + * so we optimize for that not happening. + */ + if (unlikely(last_unit > PACKET_CB(skb)->mtu)) + last_unit %= PACKET_CB(skb)->mtu; + + padded_size = min(PACKET_CB(skb)->mtu, + ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); + return padded_size - last_unit; +} + +static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair, + simd_context_t *simd_context) +{ + unsigned int padding_len, plaintext_len, trailer_len; + struct scatterlist sg[MAX_SKB_FRAGS + 8]; + struct message_data *header; + struct sk_buff *trailer; + int num_frags; + + /* Force hash calculation before encryption so that flow analysis is + * consistent over the inner packet. + */ + skb_get_hash(skb); + + /* Calculate lengths. */ + padding_len = calculate_skb_padding(skb); + trailer_len = padding_len + noise_encrypted_len(0); + plaintext_len = skb->len + padding_len; + + /* Expand data section to have room for padding and auth tag. */ + num_frags = skb_cow_data(skb, trailer_len, &trailer); + if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) + return false; + + /* Set the padding to zeros, and make sure it and the auth tag are part + * of the skb. + */ + memset(skb_tail_pointer(trailer), 0, padding_len); + + /* Expand head section to have room for our header and the network + * stack's headers. + */ + if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0)) + return false; + + /* Finalize checksum calculation for the inner packet, if required. */ + if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb))) + return false; + + /* Only after checksumming can we safely add on the padding at the end + * and the header. + */ + skb_set_inner_network_header(skb, 0); + header = (struct message_data *)skb_push(skb, sizeof(*header)); + header->header.type = cpu_to_le32(MESSAGE_DATA); + header->key_idx = keypair->remote_index; + header->counter = cpu_to_le64(PACKET_CB(skb)->nonce); + pskb_put(skb, trailer, trailer_len); + + /* Now we can encrypt the scattergather segments */ + sg_init_table(sg, num_frags); + if (skb_to_sgvec(skb, sg, sizeof(struct message_data), + noise_encrypted_len(plaintext_len)) <= 0) + return false; + return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0, + PACKET_CB(skb)->nonce, + keypair->sending.key, + simd_context); +} + +void wg_packet_send_keepalive(struct wg_peer *peer) +{ + struct sk_buff *skb; + + if (skb_queue_empty(&peer->staged_packet_queue)) { + skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, + GFP_ATOMIC); + if (unlikely(!skb)) + return; + skb_reserve(skb, DATA_PACKET_HEAD_ROOM); + skb->dev = peer->device->dev; + PACKET_CB(skb)->mtu = skb->dev->mtu; + skb_queue_tail(&peer->staged_packet_queue, skb); + net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + } + + wg_packet_send_staged_packets(peer); +} + +static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) +{ + struct sk_buff *skb, *next; + bool is_keepalive, data_sent = false; + + wg_timers_any_authenticated_packet_traversal(peer); + wg_timers_any_authenticated_packet_sent(peer); + skb_list_walk_safe(first, skb, next) { + is_keepalive = skb->len == message_data_len(0); + if (likely(!wg_socket_send_skb_to_peer(peer, skb, + PACKET_CB(skb)->ds) && !is_keepalive)) + data_sent = true; + } + + if (likely(data_sent)) + wg_timers_data_sent(peer); + + keep_key_fresh(peer); +} + +void wg_packet_tx_worker(struct work_struct *work) +{ + struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); + struct noise_keypair *keypair; + enum packet_state state; + struct sk_buff *first; + + while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(first)->state)) != + PACKET_STATE_UNCRYPTED) { + wg_prev_queue_drop_peeked(&peer->tx_queue); + keypair = PACKET_CB(first)->keypair; + + if (likely(state == PACKET_STATE_CRYPTED)) + wg_packet_create_data_done(peer, first); + else + kfree_skb_list(first); + + wg_noise_keypair_put(keypair, false); + wg_peer_put(peer); + if (need_resched()) + cond_resched(); + } +} + +void wg_packet_encrypt_worker(struct work_struct *work) +{ + struct crypt_queue *queue = container_of(work, struct multicore_worker, + work)->ptr; + struct sk_buff *first, *skb, *next; + simd_context_t simd_context; + + simd_get(&simd_context); + while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { + enum packet_state state = PACKET_STATE_CRYPTED; + + skb_list_walk_safe(first, skb, next) { + if (likely(encrypt_packet(skb, + PACKET_CB(first)->keypair, + &simd_context))) { + wg_reset_packet(skb, true); + } else { + state = PACKET_STATE_DEAD; + break; + } + } + wg_queue_enqueue_per_peer_tx(first, state); + + simd_relax(&simd_context); + } + simd_put(&simd_context); +} + +static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) +{ + struct wg_device *wg = peer->device; + int ret = -EINVAL; + + rcu_read_lock_bh(); + if (unlikely(READ_ONCE(peer->is_dead))) + goto err; + + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, + wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); + if (unlikely(ret == -EPIPE)) + wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); +err: + rcu_read_unlock_bh(); + if (likely(!ret || ret == -EPIPE)) + return; + wg_noise_keypair_put(PACKET_CB(first)->keypair, false); + wg_peer_put(peer); + kfree_skb_list(first); +} + +void wg_packet_purge_staged_packets(struct wg_peer *peer) +{ + spin_lock_bh(&peer->staged_packet_queue.lock); + peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; + __skb_queue_purge(&peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); +} + +void wg_packet_send_staged_packets(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + struct sk_buff_head packets; + struct sk_buff *skb; + + /* Steal the current queue into our local one. */ + __skb_queue_head_init(&packets); + spin_lock_bh(&peer->staged_packet_queue.lock); + skb_queue_splice_init(&peer->staged_packet_queue, &packets); + spin_unlock_bh(&peer->staged_packet_queue.lock); + if (unlikely(skb_queue_empty(&packets))) + return; + + /* First we make sure we have a valid reference to a valid key. */ + rcu_read_lock_bh(); + keypair = wg_noise_keypair_get( + rcu_dereference_bh(peer->keypairs.current_keypair)); + rcu_read_unlock_bh(); + if (unlikely(!keypair)) + goto out_nokey; + if (unlikely(!READ_ONCE(keypair->sending.is_valid))) + goto out_nokey; + if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, + REJECT_AFTER_TIME))) + goto out_invalid; + + /* After we know we have a somewhat valid key, we now try to assign + * nonces to all of the packets in the queue. If we can't assign nonces + * for all of them, we just consider it a failure and wait for the next + * handshake. + */ + skb_queue_walk(&packets, skb) { + /* 0 for no outer TOS: no leak. TODO: at some later point, we + * might consider using flowi->tos as outer instead. + */ + PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); + PACKET_CB(skb)->nonce = + atomic64_inc_return(&keypair->sending_counter) - 1; + if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) + goto out_invalid; + } + + packets.prev->next = NULL; + wg_peer_get(keypair->entry.peer); + PACKET_CB(packets.next)->keypair = keypair; + wg_packet_create_data(peer, packets.next); + return; + +out_invalid: + WRITE_ONCE(keypair->sending.is_valid, false); +out_nokey: + wg_noise_keypair_put(keypair, false); + + /* We orphan the packets if we're waiting on a handshake, so that they + * don't block a socket's pool. + */ + skb_queue_walk(&packets, skb) + skb_orphan(skb); + /* Then we put them back on the top of the queue. We're not too + * concerned about accidentally getting things a little out of order if + * packets are being added really fast, because this queue is for before + * packets can even be sent and it's small anyway. + */ + spin_lock_bh(&peer->staged_packet_queue.lock); + skb_queue_splice(&packets, &peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); + + /* If we're exiting because there's something wrong with the key, it + * means we should initiate a new handshake. + */ + wg_packet_send_queued_handshake_initiation(peer, false); +} diff --git a/net/wireguard/socket.c b/net/wireguard/socket.c new file mode 100644 index 000000000000..9e0af9320c6b --- /dev/null +++ b/net/wireguard/socket.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "device.h" +#include "peer.h" +#include "socket.h" +#include "queueing.h" +#include "messages.h" + +#include +#include +#include +#include +#include +#include +#include + +static int send4(struct wg_device *wg, struct sk_buff *skb, + struct endpoint *endpoint, u8 ds, struct dst_cache *cache) +{ + struct flowi4 fl = { + .saddr = endpoint->src4.s_addr, + .daddr = endpoint->addr4.sin_addr.s_addr, + .fl4_dport = endpoint->addr4.sin_port, + .flowi4_mark = wg->fwmark, + .flowi4_proto = IPPROTO_UDP + }; + struct rtable *rt = NULL; + struct sock *sock; + int ret = 0; + + skb_mark_not_on_list(skb); + skb->dev = wg->dev; + skb->mark = wg->fwmark; + + rcu_read_lock_bh(); + sock = rcu_dereference_bh(wg->sock4); + + if (unlikely(!sock)) { + ret = -ENONET; + goto err; + } + + fl.fl4_sport = inet_sk(sock)->inet_sport; + + if (cache) + rt = dst_cache_get_ip4(cache, &fl.saddr); + + if (!rt) { + security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); + if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, + fl.saddr, RT_SCOPE_HOST))) { + endpoint->src4.s_addr = 0; + endpoint->src_if4 = 0; + fl.saddr = 0; + if (cache) + dst_cache_reset(cache); + } + rt = ip_route_output_flow(sock_net(sock), &fl, sock); + if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) && + PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && + rt->dst.dev->ifindex != endpoint->src_if4)))) { + endpoint->src4.s_addr = 0; + endpoint->src_if4 = 0; + fl.saddr = 0; + if (cache) + dst_cache_reset(cache); + if (!IS_ERR(rt)) + ip_rt_put(rt); + rt = ip_route_output_flow(sock_net(sock), &fl, sock); + } + if (IS_ERR(rt)) { + ret = PTR_ERR(rt); + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; + } + if (cache) + dst_cache_set_ip4(cache, &rt->dst, fl.saddr); + } + + skb->ignore_df = 1; + udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds, + ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, + fl.fl4_dport, false, false); + goto out; + +err: + kfree_skb(skb); +out: + rcu_read_unlock_bh(); + return ret; +} + +static int send6(struct wg_device *wg, struct sk_buff *skb, + struct endpoint *endpoint, u8 ds, struct dst_cache *cache) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct flowi6 fl = { + .saddr = endpoint->src6, + .daddr = endpoint->addr6.sin6_addr, + .fl6_dport = endpoint->addr6.sin6_port, + .flowi6_mark = wg->fwmark, + .flowi6_oif = endpoint->addr6.sin6_scope_id, + .flowi6_proto = IPPROTO_UDP + /* TODO: addr->sin6_flowinfo */ + }; + struct dst_entry *dst = NULL; + struct sock *sock; + int ret = 0; + + skb_mark_not_on_list(skb); + skb->dev = wg->dev; + skb->mark = wg->fwmark; + + rcu_read_lock_bh(); + sock = rcu_dereference_bh(wg->sock6); + + if (unlikely(!sock)) { + ret = -ENONET; + goto err; + } + + fl.fl6_sport = inet_sk(sock)->inet_sport; + + if (cache) + dst = dst_cache_get_ip6(cache, &fl.saddr); + + if (!dst) { + security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); + if (unlikely(!ipv6_addr_any(&fl.saddr) && + !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { + endpoint->src6 = fl.saddr = in6addr_any; + if (cache) + dst_cache_reset(cache); + } + dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, + NULL); + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; + } + if (cache) + dst_cache_set_ip6(cache, dst, &fl.saddr); + } + + skb->ignore_df = 1; + udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds, + ip6_dst_hoplimit(dst), 0, fl.fl6_sport, + fl.fl6_dport, false); + goto out; + +err: + kfree_skb(skb); +out: + rcu_read_unlock_bh(); + return ret; +#else + kfree_skb(skb); + return -EAFNOSUPPORT; +#endif +} + +int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds) +{ + size_t skb_len = skb->len; + int ret = -EAFNOSUPPORT; + + read_lock_bh(&peer->endpoint_lock); + if (peer->endpoint.addr.sa_family == AF_INET) + ret = send4(peer->device, skb, &peer->endpoint, ds, + &peer->endpoint_cache); + else if (peer->endpoint.addr.sa_family == AF_INET6) + ret = send6(peer->device, skb, &peer->endpoint, ds, + &peer->endpoint_cache); + else + dev_kfree_skb(skb); + if (likely(!ret)) + peer->tx_bytes += skb_len; + read_unlock_bh(&peer->endpoint_lock); + + return ret; +} + +int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer, + size_t len, u8 ds) +{ + struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); + + if (unlikely(!skb)) + return -ENOMEM; + + skb_reserve(skb, SKB_HEADER_LEN); + skb_set_inner_network_header(skb, 0); + skb_put_data(skb, buffer, len); + return wg_socket_send_skb_to_peer(peer, skb, ds); +} + +int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, + struct sk_buff *in_skb, void *buffer, + size_t len) +{ + int ret = 0; + struct sk_buff *skb; + struct endpoint endpoint; + + if (unlikely(!in_skb)) + return -EINVAL; + ret = wg_socket_endpoint_from_skb(&endpoint, in_skb); + if (unlikely(ret < 0)) + return ret; + + skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + skb_reserve(skb, SKB_HEADER_LEN); + skb_set_inner_network_header(skb, 0); + skb_put_data(skb, buffer, len); + + if (endpoint.addr.sa_family == AF_INET) + ret = send4(wg, skb, &endpoint, 0, NULL); + else if (endpoint.addr.sa_family == AF_INET6) + ret = send6(wg, skb, &endpoint, 0, NULL); + /* No other possibilities if the endpoint is valid, which it is, + * as we checked above. + */ + + return ret; +} + +int wg_socket_endpoint_from_skb(struct endpoint *endpoint, + const struct sk_buff *skb) +{ + memset(endpoint, 0, sizeof(*endpoint)); + if (skb->protocol == htons(ETH_P_IP)) { + endpoint->addr4.sin_family = AF_INET; + endpoint->addr4.sin_port = udp_hdr(skb)->source; + endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; + endpoint->src4.s_addr = ip_hdr(skb)->daddr; + endpoint->src_if4 = skb->skb_iif; + } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { + endpoint->addr6.sin6_family = AF_INET6; + endpoint->addr6.sin6_port = udp_hdr(skb)->source; + endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; + endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id( + &ipv6_hdr(skb)->saddr, skb->skb_iif); + endpoint->src6 = ipv6_hdr(skb)->daddr; + } else { + return -EINVAL; + } + return 0; +} + +static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b) +{ + return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET && + a->addr4.sin_port == b->addr4.sin_port && + a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr && + a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) || + (a->addr.sa_family == AF_INET6 && + b->addr.sa_family == AF_INET6 && + a->addr6.sin6_port == b->addr6.sin6_port && + ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) && + a->addr6.sin6_scope_id == b->addr6.sin6_scope_id && + ipv6_addr_equal(&a->src6, &b->src6)) || + unlikely(!a->addr.sa_family && !b->addr.sa_family); +} + +void wg_socket_set_peer_endpoint(struct wg_peer *peer, + const struct endpoint *endpoint) +{ + /* First we check unlocked, in order to optimize, since it's pretty rare + * that an endpoint will change. If we happen to be mid-write, and two + * CPUs wind up writing the same thing or something slightly different, + * it doesn't really matter much either. + */ + if (endpoint_eq(endpoint, &peer->endpoint)) + return; + write_lock_bh(&peer->endpoint_lock); + if (endpoint->addr.sa_family == AF_INET) { + peer->endpoint.addr4 = endpoint->addr4; + peer->endpoint.src4 = endpoint->src4; + peer->endpoint.src_if4 = endpoint->src_if4; + } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) { + peer->endpoint.addr6 = endpoint->addr6; + peer->endpoint.src6 = endpoint->src6; + } else { + goto out; + } + dst_cache_reset(&peer->endpoint_cache); +out: + write_unlock_bh(&peer->endpoint_lock); +} + +void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, + const struct sk_buff *skb) +{ + struct endpoint endpoint; + + if (!wg_socket_endpoint_from_skb(&endpoint, skb)) + wg_socket_set_peer_endpoint(peer, &endpoint); +} + +void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) +{ + write_lock_bh(&peer->endpoint_lock); + memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); + dst_cache_reset_now(&peer->endpoint_cache); + write_unlock_bh(&peer->endpoint_lock); +} + +static int wg_receive(struct sock *sk, struct sk_buff *skb) +{ + struct wg_device *wg; + + if (unlikely(!sk)) + goto err; + wg = sk->sk_user_data; + if (unlikely(!wg)) + goto err; + skb_mark_not_on_list(skb); + wg_packet_receive(wg, skb); + return 0; + +err: + kfree_skb(skb); + return 0; +} + +static void sock_free(struct sock *sock) +{ + if (unlikely(!sock)) + return; + sk_clear_memalloc(sock); + udp_tunnel_sock_release(sock->sk_socket); +} + +static void set_sock_opts(struct socket *sock) +{ + sock->sk->sk_allocation = GFP_ATOMIC; + sock->sk->sk_sndbuf = INT_MAX; + sk_set_memalloc(sock->sk); +} + +int wg_socket_init(struct wg_device *wg, u16 port) +{ + struct net *net; + int ret; + struct udp_tunnel_sock_cfg cfg = { + .sk_user_data = wg, + .encap_type = 1, + .encap_rcv = wg_receive + }; + struct socket *new4 = NULL, *new6 = NULL; + struct udp_port_cfg port4 = { + .family = AF_INET, + .local_ip.s_addr = htonl(INADDR_ANY), + .local_udp_port = htons(port), + .use_udp_checksums = true + }; +#if IS_ENABLED(CONFIG_IPV6) + int retries = 0; + struct udp_port_cfg port6 = { + .family = AF_INET6, + .local_ip6 = IN6ADDR_ANY_INIT, + .use_udp6_tx_checksums = true, + .use_udp6_rx_checksums = true, + .ipv6_v6only = true + }; +#endif + + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + net = net ? maybe_get_net(net) : NULL; + rcu_read_unlock(); + if (unlikely(!net)) + return -ENONET; + +#if IS_ENABLED(CONFIG_IPV6) +retry: +#endif + + ret = udp_sock_create(net, &port4, &new4); + if (ret < 0) { + pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); + goto out; + } + set_sock_opts(new4); + setup_udp_tunnel_sock(net, new4, &cfg); + +#if IS_ENABLED(CONFIG_IPV6) + if (ipv6_mod_enabled()) { + port6.local_udp_port = inet_sk(new4->sk)->inet_sport; + ret = udp_sock_create(net, &port6, &new6); + if (ret < 0) { + udp_tunnel_sock_release(new4); + if (ret == -EADDRINUSE && !port && retries++ < 100) + goto retry; + pr_err("%s: Could not create IPv6 socket\n", + wg->dev->name); + goto out; + } + set_sock_opts(new6); + setup_udp_tunnel_sock(net, new6, &cfg); + } +#endif + + wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); + ret = 0; +out: + put_net(net); + return ret; +} + +void wg_socket_reinit(struct wg_device *wg, struct sock *new4, + struct sock *new6) +{ + struct sock *old4, *old6; + + mutex_lock(&wg->socket_update_lock); + old4 = rcu_dereference_protected(wg->sock4, + lockdep_is_held(&wg->socket_update_lock)); + old6 = rcu_dereference_protected(wg->sock6, + lockdep_is_held(&wg->socket_update_lock)); + rcu_assign_pointer(wg->sock4, new4); + rcu_assign_pointer(wg->sock6, new6); + if (new4) + wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); + mutex_unlock(&wg->socket_update_lock); + synchronize_net(); + sock_free(old4); + sock_free(old6); +} diff --git a/net/wireguard/socket.h b/net/wireguard/socket.h new file mode 100644 index 000000000000..bab5848efbcd --- /dev/null +++ b/net/wireguard/socket.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_SOCKET_H +#define _WG_SOCKET_H + +#include +#include +#include +#include + +int wg_socket_init(struct wg_device *wg, u16 port); +void wg_socket_reinit(struct wg_device *wg, struct sock *new4, + struct sock *new6); +int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data, + size_t len, u8 ds); +int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, + u8 ds); +int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, + struct sk_buff *in_skb, + void *out_buffer, size_t len); + +int wg_socket_endpoint_from_skb(struct endpoint *endpoint, + const struct sk_buff *skb); +void wg_socket_set_peer_endpoint(struct wg_peer *peer, + const struct endpoint *endpoint); +void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, + const struct sk_buff *skb); +void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer); + +#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) +#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \ + struct endpoint __endpoint; \ + wg_socket_endpoint_from_skb(&__endpoint, skb); \ + net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \ + ##__VA_ARGS__); \ + } while (0) +#else +#define net_dbg_skb_ratelimited(fmt, skb, ...) +#endif + +#endif /* _WG_SOCKET_H */ diff --git a/net/wireguard/timers.c b/net/wireguard/timers.c new file mode 100644 index 000000000000..d54d32ac9bc4 --- /dev/null +++ b/net/wireguard/timers.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include "timers.h" +#include "device.h" +#include "peer.h" +#include "queueing.h" +#include "socket.h" + +/* + * - Timer for retransmitting the handshake if we don't hear back after + * `REKEY_TIMEOUT + jitter` ms. + * + * - Timer for sending empty packet if we have received a packet but after have + * not sent one for `KEEPALIVE_TIMEOUT` ms. + * + * - Timer for initiating new handshake if we have sent a packet but after have + * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) + + * jitter` ms. + * + * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms + * if no new keys have been received. + * + * - Timer for, if enabled, sending an empty authenticated packet every user- + * specified seconds. + */ + +static inline void mod_peer_timer(struct wg_peer *peer, + struct timer_list *timer, + unsigned long expires) +{ + rcu_read_lock_bh(); + if (likely(netif_running(peer->device->dev) && + !READ_ONCE(peer->is_dead))) + mod_timer(timer, expires); + rcu_read_unlock_bh(); +} + +static void wg_expired_retransmit_handshake(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, + timer_retransmit_handshake); + + if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { + pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); + + del_timer(&peer->timer_send_keepalive); + /* We drop all packets without a keypair and don't try again, + * if we try unsuccessfully for too long to make a handshake. + */ + wg_packet_purge_staged_packets(peer); + + /* We set a timer for destroying any residue that might be left + * of a partial exchange. + */ + if (!timer_pending(&peer->timer_zero_key_material)) + mod_peer_timer(peer, &peer->timer_zero_key_material, + jiffies + REJECT_AFTER_TIME * 3 * HZ); + } else { + ++peer->timer_handshake_attempts; + pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, REKEY_TIMEOUT, + peer->timer_handshake_attempts + 1); + + /* We clear the endpoint address src address, in case this is + * the cause of trouble. + */ + wg_socket_clear_peer_endpoint_src(peer); + + wg_packet_send_queued_handshake_initiation(peer, true); + } +} + +static void wg_expired_send_keepalive(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive); + + wg_packet_send_keepalive(peer); + if (peer->timer_need_another_keepalive) { + peer->timer_need_another_keepalive = false; + mod_peer_timer(peer, &peer->timer_send_keepalive, + jiffies + KEEPALIVE_TIMEOUT * HZ); + } +} + +static void wg_expired_new_handshake(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake); + + pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); + /* We clear the endpoint address src address, in case this is the cause + * of trouble. + */ + wg_socket_clear_peer_endpoint_src(peer); + wg_packet_send_queued_handshake_initiation(peer, false); +} + +static void wg_expired_zero_key_material(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material); + + rcu_read_lock_bh(); + if (!READ_ONCE(peer->is_dead)) { + wg_peer_get(peer); + if (!queue_work(peer->device->handshake_send_wq, + &peer->clear_peer_work)) + /* If the work was already on the queue, we want to drop + * the extra reference. + */ + wg_peer_put(peer); + } + rcu_read_unlock_bh(); +} + +static void wg_queued_expired_zero_key_material(struct work_struct *work) +{ + struct wg_peer *peer = container_of(work, struct wg_peer, + clear_peer_work); + + pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, REJECT_AFTER_TIME * 3); + wg_noise_handshake_clear(&peer->handshake); + wg_noise_keypairs_clear(&peer->keypairs); + wg_peer_put(peer); +} + +static void wg_expired_send_persistent_keepalive(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, + timer_persistent_keepalive); + + if (likely(peer->persistent_keepalive_interval)) + wg_packet_send_keepalive(peer); +} + +/* Should be called after an authenticated data packet is sent. */ +void wg_timers_data_sent(struct wg_peer *peer) +{ + if (!timer_pending(&peer->timer_new_handshake)) + mod_peer_timer(peer, &peer->timer_new_handshake, + jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ + + prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); +} + +/* Should be called after an authenticated data packet is received. */ +void wg_timers_data_received(struct wg_peer *peer) +{ + if (likely(netif_running(peer->device->dev))) { + if (!timer_pending(&peer->timer_send_keepalive)) + mod_peer_timer(peer, &peer->timer_send_keepalive, + jiffies + KEEPALIVE_TIMEOUT * HZ); + else + peer->timer_need_another_keepalive = true; + } +} + +/* Should be called after any type of authenticated packet is sent, whether + * keepalive, data, or handshake. + */ +void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) +{ + del_timer(&peer->timer_send_keepalive); +} + +/* Should be called after any type of authenticated packet is received, whether + * keepalive, data, or handshake. + */ +void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) +{ + del_timer(&peer->timer_new_handshake); +} + +/* Should be called after a handshake initiation message is sent. */ +void wg_timers_handshake_initiated(struct wg_peer *peer) +{ + mod_peer_timer(peer, &peer->timer_retransmit_handshake, + jiffies + REKEY_TIMEOUT * HZ + + prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); +} + +/* Should be called after a handshake response message is received and processed + * or when getting key confirmation via the first data message. + */ +void wg_timers_handshake_complete(struct wg_peer *peer) +{ + del_timer(&peer->timer_retransmit_handshake); + peer->timer_handshake_attempts = 0; + peer->sent_lastminute_handshake = false; + ktime_get_real_ts64(&peer->walltime_last_handshake); +} + +/* Should be called after an ephemeral key is created, which is before sending a + * handshake response or after receiving a handshake response. + */ +void wg_timers_session_derived(struct wg_peer *peer) +{ + mod_peer_timer(peer, &peer->timer_zero_key_material, + jiffies + REJECT_AFTER_TIME * 3 * HZ); +} + +/* Should be called before a packet with authentication, whether + * keepalive, data, or handshakem is sent, or after one is received. + */ +void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer) +{ + if (peer->persistent_keepalive_interval) + mod_peer_timer(peer, &peer->timer_persistent_keepalive, + jiffies + peer->persistent_keepalive_interval * HZ); +} + +void wg_timers_init(struct wg_peer *peer) +{ + timer_setup(&peer->timer_retransmit_handshake, + wg_expired_retransmit_handshake, 0); + timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0); + timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0); + timer_setup(&peer->timer_zero_key_material, + wg_expired_zero_key_material, 0); + timer_setup(&peer->timer_persistent_keepalive, + wg_expired_send_persistent_keepalive, 0); + INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material); + peer->timer_handshake_attempts = 0; + peer->sent_lastminute_handshake = false; + peer->timer_need_another_keepalive = false; +} + +void wg_timers_stop(struct wg_peer *peer) +{ + del_timer_sync(&peer->timer_retransmit_handshake); + del_timer_sync(&peer->timer_send_keepalive); + del_timer_sync(&peer->timer_new_handshake); + del_timer_sync(&peer->timer_zero_key_material); + del_timer_sync(&peer->timer_persistent_keepalive); + flush_work(&peer->clear_peer_work); +} diff --git a/net/wireguard/timers.h b/net/wireguard/timers.h new file mode 100644 index 000000000000..f0653dcb1326 --- /dev/null +++ b/net/wireguard/timers.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _WG_TIMERS_H +#define _WG_TIMERS_H + +#include + +struct wg_peer; + +void wg_timers_init(struct wg_peer *peer); +void wg_timers_stop(struct wg_peer *peer); +void wg_timers_data_sent(struct wg_peer *peer); +void wg_timers_data_received(struct wg_peer *peer); +void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer); +void wg_timers_any_authenticated_packet_received(struct wg_peer *peer); +void wg_timers_handshake_initiated(struct wg_peer *peer); +void wg_timers_handshake_complete(struct wg_peer *peer); +void wg_timers_session_derived(struct wg_peer *peer); +void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer); + +static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds, + u64 expiration_seconds) +{ + return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC) + <= (s64)ktime_get_coarse_boottime_ns(); +} + +#endif /* _WG_TIMERS_H */ diff --git a/net/wireguard/uapi/wireguard.h b/net/wireguard/uapi/wireguard.h new file mode 100644 index 000000000000..ae88be14c947 --- /dev/null +++ b/net/wireguard/uapi/wireguard.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * Documentation + * ============= + * + * The below enums and macros are for interfacing with WireGuard, using generic + * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two + * methods: get and set. Note that while they share many common attributes, + * these two functions actually accept a slightly different set of inputs and + * outputs. + * + * WG_CMD_GET_DEVICE + * ----------------- + * + * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain + * one but not both of: + * + * WGDEVICE_A_IFINDEX: NLA_U32 + * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * + * The kernel will then return several messages (NLM_F_MULTI) containing the + * following tree of nested items: + * + * WGDEVICE_A_IFINDEX: NLA_U32 + * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGDEVICE_A_LISTEN_PORT: NLA_U16 + * WGDEVICE_A_FWMARK: NLA_U32 + * WGDEVICE_A_PEERS: NLA_NESTED + * 0: NLA_NESTED + * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6 + * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16 + * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec + * WGPEER_A_RX_BYTES: NLA_U64 + * WGPEER_A_TX_BYTES: NLA_U64 + * WGPEER_A_ALLOWEDIPS: NLA_NESTED + * 0: NLA_NESTED + * WGALLOWEDIP_A_FAMILY: NLA_U16 + * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr + * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 + * 0: NLA_NESTED + * ... + * 0: NLA_NESTED + * ... + * ... + * WGPEER_A_PROTOCOL_VERSION: NLA_U32 + * 0: NLA_NESTED + * ... + * ... + * + * It is possible that all of the allowed IPs of a single peer will not + * fit within a single netlink message. In that case, the same peer will + * be written in the following message, except it will only contain + * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several + * times in a row for the same peer. It is then up to the receiver to + * coalesce adjacent peers. Likewise, it is possible that all peers will + * not fit within a single message. So, subsequent peers will be sent + * in following messages, except those will only contain WGDEVICE_A_IFNAME + * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these + * messages to form the complete list of peers. + * + * Since this is an NLA_F_DUMP command, the final message will always be + * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message + * contains an integer error code. It is either zero or a negative error + * code corresponding to the errno. + * + * WG_CMD_SET_DEVICE + * ----------------- + * + * May only be called via NLM_F_REQUEST. The command should contain the + * following tree of nested items, containing one but not both of + * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: + * + * WGDEVICE_A_IFINDEX: NLA_U32 + * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current + * peers should be removed prior to adding the list below. + * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove + * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly + * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable + * WGDEVICE_A_PEERS: NLA_NESTED + * 0: NLA_NESTED + * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN + * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the + * specified peer should not exist at the end of the + * operation, rather than added/updated and/or + * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed + * IPs of this peer should be removed prior to adding + * the list below and/or WGPEER_F_UPDATE_ONLY if the + * peer should only be set if it already exists. + * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove + * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6 + * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable + * WGPEER_A_ALLOWEDIPS: NLA_NESTED + * 0: NLA_NESTED + * WGALLOWEDIP_A_FAMILY: NLA_U16 + * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr + * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 + * 0: NLA_NESTED + * ... + * 0: NLA_NESTED + * ... + * ... + * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at + * all by most users of this API, as the + * most recent protocol will be used when + * this is unset. Otherwise, must be set + * to 1. + * 0: NLA_NESTED + * ... + * ... + * + * It is possible that the amount of configuration data exceeds that of + * the maximum message length accepted by the kernel. In that case, several + * messages should be sent one after another, with each successive one + * filling in information not contained in the prior. Note that if + * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably + * should not be specified in fragments that come after, so that the list + * of peers is only cleared the first time but appended after. Likewise for + * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message + * of a peer, it likely should not be specified in subsequent fragments. + * + * If an error occurs, NLMSG_ERROR will reply containing an errno. + */ + +#ifndef _WG_UAPI_WIREGUARD_H +#define _WG_UAPI_WIREGUARD_H + +#define WG_GENL_NAME "wireguard" +#define WG_GENL_VERSION 1 + +#define WG_KEY_LEN 32 + +enum wg_cmd { + WG_CMD_GET_DEVICE, + WG_CMD_SET_DEVICE, + __WG_CMD_MAX +}; +#define WG_CMD_MAX (__WG_CMD_MAX - 1) + +enum wgdevice_flag { + WGDEVICE_F_REPLACE_PEERS = 1U << 0, + __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS +}; +enum wgdevice_attribute { + WGDEVICE_A_UNSPEC, + WGDEVICE_A_IFINDEX, + WGDEVICE_A_IFNAME, + WGDEVICE_A_PRIVATE_KEY, + WGDEVICE_A_PUBLIC_KEY, + WGDEVICE_A_FLAGS, + WGDEVICE_A_LISTEN_PORT, + WGDEVICE_A_FWMARK, + WGDEVICE_A_PEERS, + __WGDEVICE_A_LAST +}; +#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1) + +enum wgpeer_flag { + WGPEER_F_REMOVE_ME = 1U << 0, + WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1, + WGPEER_F_UPDATE_ONLY = 1U << 2, + __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS | + WGPEER_F_UPDATE_ONLY +}; +enum wgpeer_attribute { + WGPEER_A_UNSPEC, + WGPEER_A_PUBLIC_KEY, + WGPEER_A_PRESHARED_KEY, + WGPEER_A_FLAGS, + WGPEER_A_ENDPOINT, + WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, + WGPEER_A_LAST_HANDSHAKE_TIME, + WGPEER_A_RX_BYTES, + WGPEER_A_TX_BYTES, + WGPEER_A_ALLOWEDIPS, + WGPEER_A_PROTOCOL_VERSION, + __WGPEER_A_LAST +}; +#define WGPEER_A_MAX (__WGPEER_A_LAST - 1) + +enum wgallowedip_attribute { + WGALLOWEDIP_A_UNSPEC, + WGALLOWEDIP_A_FAMILY, + WGALLOWEDIP_A_IPADDR, + WGALLOWEDIP_A_CIDR_MASK, + __WGALLOWEDIP_A_LAST +}; +#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1) + +#endif /* _WG_UAPI_WIREGUARD_H */ diff --git a/net/wireguard/version.h b/net/wireguard/version.h new file mode 100644 index 000000000000..c7f9028f0177 --- /dev/null +++ b/net/wireguard/version.h @@ -0,0 +1,3 @@ +#ifndef WIREGUARD_VERSION +#define WIREGUARD_VERSION "1.0.20220627" +#endif From b9e19eb28311b6f1dfcac64aa60ee49512a5eccf Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 23 Mar 2020 23:56:48 +0300 Subject: [PATCH 248/439] net: wireguard: ignore generated files Signed-off-by: Denis Efremov --- net/wireguard/crypto/zinc/chacha20/.gitignore | 1 + net/wireguard/crypto/zinc/poly1305/.gitignore | 1 + 2 files changed, 2 insertions(+) create mode 100644 net/wireguard/crypto/zinc/chacha20/.gitignore create mode 100644 net/wireguard/crypto/zinc/poly1305/.gitignore diff --git a/net/wireguard/crypto/zinc/chacha20/.gitignore b/net/wireguard/crypto/zinc/chacha20/.gitignore new file mode 100644 index 000000000000..50a214c24cdb --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/.gitignore @@ -0,0 +1 @@ +/chacha20-arm64.S diff --git a/net/wireguard/crypto/zinc/poly1305/.gitignore b/net/wireguard/crypto/zinc/poly1305/.gitignore new file mode 100644 index 000000000000..0a7e308ae2ba --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/.gitignore @@ -0,0 +1 @@ +/poly1305-arm64.S From 2073b936edfdfbddedbc24c5cbf6b89a71c86df3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 20 Feb 2020 21:53:45 +0300 Subject: [PATCH 249/439] net: wireguard: switch wireguard to n by default Signed-off-by: Denis Efremov --- net/wireguard/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wireguard/Kconfig b/net/wireguard/Kconfig index 156e9dbfc051..e4b07763a6e5 100644 --- a/net/wireguard/Kconfig +++ b/net/wireguard/Kconfig @@ -10,7 +10,7 @@ config WIREGUARD select VFPv3 if CPU_V7 select NEON if CPU_V7 select KERNEL_MODE_NEON if CPU_V7 - default m + default n help WireGuard is a secure, fast, and easy to use replacement for IPsec that uses modern cryptography and clever networking tricks. It's @@ -24,6 +24,7 @@ config WIREGUARD config WIREGUARD_DEBUG bool "Debugging checks and verbose messages" depends on WIREGUARD + default n help This will write log messages for handshake and other events that occur for a WireGuard interface. It will also perform some From 0cab76bb05e7324948c690ae4ee9dcb78dbf6837 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 12 Jun 2020 01:41:58 +0300 Subject: [PATCH 250/439] scripts: add wireguard updater script Signed-off-by: Denis Efremov --- scripts/update_wireguard.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100755 scripts/update_wireguard.sh diff --git a/scripts/update_wireguard.sh b/scripts/update_wireguard.sh new file mode 100755 index 000000000000..fd56e21e595b --- /dev/null +++ b/scripts/update_wireguard.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +git clone git://git.zx2c4.com/wireguard-linux-compat +rm -fr net/wireguard/* + +perl -i -ne 'BEGIN{$print=1;} $print = 0 if m/cat/; print $_ if $print;' wireguard-linux-compat/kernel-tree-scripts/create-patch.sh + +wireguard-linux-compat/kernel-tree-scripts/create-patch.sh | patch -p1 + +rm -fr wireguard-linux-compat + +git checkout net/wireguard/crypto/zinc/chacha20/.gitignore \ + net/wireguard/crypto/zinc/poly1305/.gitignore \ + net/wireguard/Kconfig + From 2b7fdc23e09a7d975af39bf754e06c8223d5e377 Mon Sep 17 00:00:00 2001 From: andip71 Date: Mon, 8 Jan 2018 00:50:49 +0100 Subject: [PATCH 251/439] power: Add generic wakelock blocker driver v1.1.0 Based on ideas of FranciscoFranco's non-generic driver. Sysfs node: /sys/class/misc/boeffla_wakelock_blocker/wakelock_blocker - list of wakelocks to be blocked, separated by semicolons /sys/class/misc/boeffla_wakelock_blocker/debug - write: 0/1 to switch off and on debug logging into dmesg - read: get current driver internals /sys/class/misc/boeffla_wakelock_blocker/version - show driver version Signed-off-by: andip71 --- drivers/base/power/Makefile | 1 + drivers/base/power/boeffla_wl_blocker.c | 236 ++++++++++++++++++++++++ drivers/base/power/boeffla_wl_blocker.h | 23 +++ drivers/base/power/main.c | 11 ++ drivers/base/power/wakeup.c | 87 ++++++++- kernel/power/Kconfig | 5 + 6 files changed, 356 insertions(+), 7 deletions(-) create mode 100644 drivers/base/power/boeffla_wl_blocker.c create mode 100644 drivers/base/power/boeffla_wl_blocker.h diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 29cd71d8b360..4f9c632738f6 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o +obj-$(CONFIG_BOEFFLA_WL_BLOCKER) += boeffla_wl_blocker.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c new file mode 100644 index 000000000000..e9c93ce97ce9 --- /dev/null +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -0,0 +1,236 @@ +/* + * Author: andip71, 01.09.2017 + * + * Version 1.1.0 + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Change log: + * + * 1.1.0 (01.09.2017) + * - By default, the following wakelocks are blocked in an own list + * qcom_rx_wakelock, wlan, wlan_wow_wl, wlan_extscan_wl, NETLINK + * + * 1.0.1 (29.08.2017) + * - Add killing wakelock when currently active + * + * 1.0.0 (28.08.2017) + * - Initial version + * + */ + +#include +#include +#include +#include +#include +#include +#include "boeffla_wl_blocker.h" + + +/*****************************************/ +// Variables +/*****************************************/ + +char list_wl[LENGTH_LIST_WL] = {0}; +char list_wl_default[LENGTH_LIST_WL_DEFAULT] = {0}; + +extern char list_wl_search[LENGTH_LIST_WL_SEARCH]; +extern bool wl_blocker_active; +extern bool wl_blocker_debug; + + +/*****************************************/ +// internal functions +/*****************************************/ + +static void build_search_string(char *list1, char *list2) +{ + // store wakelock list and search string (with semicolons added at start and end) + sprintf(list_wl_search, ";%s;%s;", list1, list2); + + // set flag if wakelock blocker should be active (for performance reasons) + if (strlen(list_wl_search) > 5) + wl_blocker_active = true; + else + wl_blocker_active = false; +} + + +/*****************************************/ +// sysfs interface functions +/*****************************************/ + +// show list of user configured wakelocks +static ssize_t wakelock_blocker_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + // return list of wakelocks to be blocked + return sprintf(buf, "%s\n", list_wl); +} + + +// store list of user configured wakelocks +static ssize_t wakelock_blocker_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + int len = n; + + // check if string is too long to be stored + if (len > LENGTH_LIST_WL) + return -EINVAL; + + // store user configured wakelock list and rebuild search string + sscanf(buf, "%s", list_wl); + build_search_string(list_wl_default, list_wl); + + return n; +} + + +// show list of default, predefined wakelocks +static ssize_t wakelock_blocker_default_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + // return list of wakelocks to be blocked + return sprintf(buf, "%s\n", list_wl_default); +} + + +// store list of default, predefined wakelocks +static ssize_t wakelock_blocker_default_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + int len = n; + + // check if string is too long to be stored + if (len > LENGTH_LIST_WL_DEFAULT) + return -EINVAL; + + // store default, predefined wakelock list and rebuild search string + sscanf(buf, "%s", list_wl_default); + build_search_string(list_wl_default, list_wl); + + return n; +} + + +// show debug information of driver internals +static ssize_t debug_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + // return current debug status + return sprintf(buf, "Debug status: %d\n\nUser list: %s\nDefault list: %s\nSearch list: %s\nActive: %d\n", + wl_blocker_debug, list_wl, list_wl_default, list_wl_search, wl_blocker_active); +} + + +// store debug mode on/off (1/0) +static ssize_t debug_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int ret = -EINVAL; + unsigned int val; + + // check data and store if valid + ret = sscanf(buf, "%d", &val); + + if (ret != 1) + return -EINVAL; + + if (val == 1) + wl_blocker_debug = true; + else + wl_blocker_debug = false; + + return count; +} + + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + // return version information + return sprintf(buf, "%s\n", BOEFFLA_WL_BLOCKER_VERSION); +} + + + +/*****************************************/ +// Initialize sysfs objects +/*****************************************/ + +// define objects +static DEVICE_ATTR(wakelock_blocker, 0644, wakelock_blocker_show, wakelock_blocker_store); +static DEVICE_ATTR(wakelock_blocker_default, 0644, wakelock_blocker_default_show, wakelock_blocker_default_store); +static DEVICE_ATTR(debug, 0664, debug_show, debug_store); +static DEVICE_ATTR(version, 0664, version_show, NULL); + +// define attributes +static struct attribute *boeffla_wl_blocker_attributes[] = { + &dev_attr_wakelock_blocker.attr, + &dev_attr_wakelock_blocker_default.attr, + &dev_attr_debug.attr, + &dev_attr_version.attr, + NULL +}; + +// define attribute group +static struct attribute_group boeffla_wl_blocker_control_group = { + .attrs = boeffla_wl_blocker_attributes, +}; + +// define control device +static struct miscdevice boeffla_wl_blocker_control_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "boeffla_wakelock_blocker", +}; + + +/*****************************************/ +// Driver init and exit functions +/*****************************************/ + +static int boeffla_wl_blocker_init(void) +{ + // register boeffla wakelock blocker control device + misc_register(&boeffla_wl_blocker_control_device); + if (sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, + &boeffla_wl_blocker_control_group) < 0) { + printk("Boeffla WL blocker: failed to create sys fs object.\n"); + return 0; + } + + // initialize default list + sprintf(list_wl_default, "%s", LIST_WL_DEFAULT); + build_search_string(list_wl_default, list_wl); + + // Print debug info + printk("Boeffla WL blocker: driver version %s started\n", BOEFFLA_WL_BLOCKER_VERSION); + + return 0; +} + + +static void boeffla_wl_blocker_exit(void) +{ + // remove boeffla wakelock blocker control device + sysfs_remove_group(&boeffla_wl_blocker_control_device.this_device->kobj, + &boeffla_wl_blocker_control_group); + + // Print debug info + printk("Boeffla WL blocker: driver stopped\n"); +} + + +/* define driver entry points */ +module_init(boeffla_wl_blocker_init); +module_exit(boeffla_wl_blocker_exit); diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h new file mode 100644 index 000000000000..63603edc4b43 --- /dev/null +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -0,0 +1,23 @@ +/* + * Author: andip71, 01.09.2017 + * + * Version 1.1.0 + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define BOEFFLA_WL_BLOCKER_VERSION "1.1.0" + +#define LIST_WL_DEFAULT "" + +#define LENGTH_LIST_WL 255 +#define LENGTH_LIST_WL_DEFAULT 150 +#define LENGTH_LIST_WL_SEARCH LENGTH_LIST_WL + LENGTH_LIST_WL_DEFAULT + 5 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a3e4afff3fea..0d052b36c189 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -40,6 +40,14 @@ #include "../base.h" #include "power.h" +#ifdef CONFIG_SEC_DEBUG +#include +#endif + +#ifdef CONFIG_BOEFFLA_WL_BLOCKER +void pm_print_active_wakeup_sources(void); +#endif + typedef int (*pm_callback_t)(struct device *); /* @@ -761,6 +769,9 @@ void dpm_resume_early(pm_message_t state) trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); dbg_snapshot_suspend("dpm_resume_early", dpm_resume_early, NULL, state.event, DSS_FLAG_IN); +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + pm_print_active_wakeup_sources(); +#endif mutex_lock(&dpm_list_mtx); pm_transition = state; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index e9591af2e2ee..ff1396b40c02 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -26,6 +26,18 @@ #include "power.h" + +#ifdef CONFIG_BOEFFLA_WL_BLOCKER +#include "boeffla_wl_blocker.h" + +char list_wl_search[LENGTH_LIST_WL_SEARCH] = {0}; +bool wl_blocker_active = false; +bool wl_blocker_debug = false; + +static void wakeup_source_deactivate(struct wakeup_source *ws); +#endif + + /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. @@ -562,6 +574,57 @@ static void wakeup_source_activate(struct wakeup_source *ws) trace_wakeup_source_activate(ws->name, cec); } +#ifdef CONFIG_BOEFFLA_WL_BLOCKER +// AP: Function to check if a wakelock is on the wakelock blocker list +static bool check_for_block(struct wakeup_source *ws) +{ + char wakelock_name[52] = {0}; + int length; + + // if debug mode on, print every wakelock requested + if (wl_blocker_debug) + printk("Boeffla WL blocker: %s requested\n", ws->name); + + // if there is no list of wakelocks to be blocked, exit without futher checking + if (!wl_blocker_active) + return false; + + // only if ws structure is valid + if (ws) + { + // wake lock names handled have maximum length=50 and minimum=1 + length = strlen(ws->name); + if ((length > 50) || (length < 1)) + return false; + + // check if wakelock is in wake lock list to be blocked + sprintf(wakelock_name, ";%s;", ws->name); + + if(strstr(list_wl_search, wakelock_name) == NULL) + return false; + + // wake lock is in list, print it if debug mode on + if (wl_blocker_debug) + printk("Boeffla WL blocker: %s blocked\n", ws->name); + + // if it is currently active, deactivate it immediately + log in debug mode + if (ws->active) + { + wakeup_source_deactivate(ws); + + if (wl_blocker_debug) + printk("Boeffla WL blocker: %s killed\n", ws->name); + } + + // finally block it + return true; + } + + // there was no valid ws structure, do not block by default + return false; +} +#endif + /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. @@ -569,16 +632,23 @@ static void wakeup_source_activate(struct wakeup_source *ws) */ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { - ws->event_count++; - /* This is racy, but the counter is approximate anyway. */ - if (events_check_enabled) - ws->wakeup_count++; +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + if (!check_for_block(ws)) // AP: check if wakelock is on wakelock blocker list + { +#endif + ws->event_count++; + /* This is racy, but the counter is approximate anyway. */ + if (events_check_enabled) + ws->wakeup_count++; - if (!ws->active) - wakeup_source_activate(ws); + if (!ws->active) + wakeup_source_activate(ws); if (hard) pm_system_wakeup(); +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + } +#endif } /** @@ -940,7 +1010,10 @@ void pm_print_active_wakeup_sources(void) list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_info("active wakeup source: %s\n", ws->name); - active = 1; +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + if (!check_for_block(ws)) // AP: check if wakelock is on wakelock blocker list +#endif + active = 1; } else if (!active && (!last_activity_ws || ktime_to_ns(ws->last_time) > diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index dd2b5a4d89a5..a69065e41913 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -312,3 +312,8 @@ config PM_GENERIC_DOMAINS_OF config CPU_PM bool + +config BOEFFLA_WL_BLOCKER + bool "Boeffla generic wakelock blocker driver" + depends on PM + default N From 43d226cb40109c92f1869c6e60bc193d557ece20 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 13 Jun 2020 11:02:24 +0300 Subject: [PATCH 252/439] power: wl_blocker: add generic size Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index 63603edc4b43..f32b51651927 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -19,5 +19,5 @@ #define LIST_WL_DEFAULT "" #define LENGTH_LIST_WL 255 -#define LENGTH_LIST_WL_DEFAULT 150 +#define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) #define LENGTH_LIST_WL_SEARCH LENGTH_LIST_WL + LENGTH_LIST_WL_DEFAULT + 5 From 00295a9942985f6e784c94151e53dba55008df3b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 13 Jun 2020 11:09:19 +0300 Subject: [PATCH 253/439] power: wl_blocker: use scnprintf PAGE_SIZE Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index e9c93ce97ce9..a5a36cd528ac 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -76,7 +76,7 @@ static ssize_t wakelock_blocker_show(struct device *dev, struct device_attribute char *buf) { // return list of wakelocks to be blocked - return sprintf(buf, "%s\n", list_wl); + return scnprintf(buf, PAGE_SIZE, "%s\n", list_wl); } @@ -103,7 +103,7 @@ static ssize_t wakelock_blocker_default_show(struct device *dev, struct device_a char *buf) { // return list of wakelocks to be blocked - return sprintf(buf, "%s\n", list_wl_default); + return scnprintf(buf, PAGE_SIZE, "%s\n", list_wl_default); } @@ -129,8 +129,11 @@ static ssize_t wakelock_blocker_default_store(struct device * dev, struct device static ssize_t debug_show(struct device *dev, struct device_attribute *attr, char *buf) { // return current debug status - return sprintf(buf, "Debug status: %d\n\nUser list: %s\nDefault list: %s\nSearch list: %s\nActive: %d\n", - wl_blocker_debug, list_wl, list_wl_default, list_wl_search, wl_blocker_active); + return scnprintf(buf, PAGE_SIZE, + "Debug status: %d\n\nUser list: %s\nDefault list: %s\n" + "Search list: %s\nActive: %d\n", + wl_blocker_debug, list_wl, list_wl_default, + list_wl_search, wl_blocker_active); } @@ -159,7 +162,7 @@ static ssize_t debug_store(struct device *dev, struct device_attribute *attr, static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { // return version information - return sprintf(buf, "%s\n", BOEFFLA_WL_BLOCKER_VERSION); + return scnprintf(buf, PAGE_SIZE, "%s\n", BOEFFLA_WL_BLOCKER_VERSION); } From 560964cff93d44e8943a45b48ab29796a84da4e8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 13 Jun 2020 11:24:06 +0300 Subject: [PATCH 254/439] power: wl_blocker: fix permissions for version_show Use only read permissions for version_show. Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index a5a36cd528ac..b8814b16d3f1 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -172,10 +172,10 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c /*****************************************/ // define objects -static DEVICE_ATTR(wakelock_blocker, 0644, wakelock_blocker_show, wakelock_blocker_store); -static DEVICE_ATTR(wakelock_blocker_default, 0644, wakelock_blocker_default_show, wakelock_blocker_default_store); -static DEVICE_ATTR(debug, 0664, debug_show, debug_store); -static DEVICE_ATTR(version, 0664, version_show, NULL); +static DEVICE_ATTR_RW(wakelock_blocker); +static DEVICE_ATTR_RW(wakelock_blocker_default); +static DEVICE_ATTR_RW(debug); +static DEVICE_ATTR_RO(version); // define attributes static struct attribute *boeffla_wl_blocker_attributes[] = { From 514d5285cfa3354d2eed6d8ed086ed3b110f473c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 13 Jun 2020 16:16:07 +0300 Subject: [PATCH 255/439] power: wl_blocker: remove excessive len variable It's not used. Type conversion size_t > int is not needed. Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index b8814b16d3f1..5d4af4e8217c 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -84,10 +84,8 @@ static ssize_t wakelock_blocker_show(struct device *dev, struct device_attribute static ssize_t wakelock_blocker_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { - int len = n; - // check if string is too long to be stored - if (len > LENGTH_LIST_WL) + if (n > LENGTH_LIST_WL) return -EINVAL; // store user configured wakelock list and rebuild search string @@ -111,10 +109,8 @@ static ssize_t wakelock_blocker_default_show(struct device *dev, struct device_a static ssize_t wakelock_blocker_default_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { - int len = n; - // check if string is too long to be stored - if (len > LENGTH_LIST_WL_DEFAULT) + if (n > LENGTH_LIST_WL_DEFAULT) return -EINVAL; // store default, predefined wakelock list and rebuild search string From 077fb12d2fb20957ecfb71953a0b10fd9f95f0d0 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 13 Jun 2020 17:36:29 +0300 Subject: [PATCH 256/439] power: wl_blocker: use strcpy() in init Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 5d4af4e8217c..99d3637c5180 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -54,7 +54,7 @@ extern bool wl_blocker_debug; // internal functions /*****************************************/ -static void build_search_string(char *list1, char *list2) +static void build_search_string(const char *list1, const char *list2) { // store wakelock list and search string (with semicolons added at start and end) sprintf(list_wl_search, ";%s;%s;", list1, list2); @@ -209,7 +209,7 @@ static int boeffla_wl_blocker_init(void) } // initialize default list - sprintf(list_wl_default, "%s", LIST_WL_DEFAULT); + strcpy(list_wl_default, LIST_WL_DEFAULT); build_search_string(list_wl_default, list_wl); // Print debug info From 76726f6d3ada9bb87e70d78141e4353bfc2b8255 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:01:38 +0300 Subject: [PATCH 257/439] power: wl_blocker: mark functions with __init, __exit attrs Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 99d3637c5180..92f285a44f3f 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -198,7 +198,7 @@ static struct miscdevice boeffla_wl_blocker_control_device = { // Driver init and exit functions /*****************************************/ -static int boeffla_wl_blocker_init(void) +static int __init boeffla_wl_blocker_init(void) { // register boeffla wakelock blocker control device misc_register(&boeffla_wl_blocker_control_device); @@ -219,7 +219,7 @@ static int boeffla_wl_blocker_init(void) } -static void boeffla_wl_blocker_exit(void) +static void __exit boeffla_wl_blocker_exit(void) { // remove boeffla wakelock blocker control device sysfs_remove_group(&boeffla_wl_blocker_control_device.this_device->kobj, From 90f4e372519508c4015ba501ff688ca2e4c37a5e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:14:04 +0300 Subject: [PATCH 258/439] power: wl_blocker: add pr_fmt Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 92f285a44f3f..b45d03e0aa21 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -29,6 +29,8 @@ * */ +#define pr_fmt(fmt) "Boeffla WL blocker: " fmt + #include #include #include @@ -204,7 +206,7 @@ static int __init boeffla_wl_blocker_init(void) misc_register(&boeffla_wl_blocker_control_device); if (sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, &boeffla_wl_blocker_control_group) < 0) { - printk("Boeffla WL blocker: failed to create sys fs object.\n"); + pr_err("failed to create sys fs object.\n"); return 0; } @@ -213,7 +215,7 @@ static int __init boeffla_wl_blocker_init(void) build_search_string(list_wl_default, list_wl); // Print debug info - printk("Boeffla WL blocker: driver version %s started\n", BOEFFLA_WL_BLOCKER_VERSION); + pr_info("driver version %s started\n", BOEFFLA_WL_BLOCKER_VERSION); return 0; } @@ -226,7 +228,7 @@ static void __exit boeffla_wl_blocker_exit(void) &boeffla_wl_blocker_control_group); // Print debug info - printk("Boeffla WL blocker: driver stopped\n"); + pr_info("driver stopped\n"); } From cbe08e7f865bb04955b13bf2e182f8351a6ae980 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:15:14 +0300 Subject: [PATCH 259/439] power: wl_blocker: add misc_deregister in exit function Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index b45d03e0aa21..0da0d73bcf73 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -227,6 +227,8 @@ static void __exit boeffla_wl_blocker_exit(void) sysfs_remove_group(&boeffla_wl_blocker_control_device.this_device->kobj, &boeffla_wl_blocker_control_group); + misc_deregister(&boeffla_wl_blocker_control_device); + // Print debug info pr_info("driver stopped\n"); } From 77189d17184c9dbff2204e7df14097fd07689b6a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:23:11 +0300 Subject: [PATCH 260/439] power: wl_blocker: add error handling to init function Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 0da0d73bcf73..fb74ca25f854 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -202,12 +202,21 @@ static struct miscdevice boeffla_wl_blocker_control_device = { static int __init boeffla_wl_blocker_init(void) { + int err = 0; + // register boeffla wakelock blocker control device - misc_register(&boeffla_wl_blocker_control_device); - if (sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, - &boeffla_wl_blocker_control_group) < 0) { + err = misc_register(&boeffla_wl_blocker_control_device); + if (err) { + pr_err("failed register the device.\n"); + return err; + } + + err = sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, + &boeffla_wl_blocker_control_group); + if (err) { pr_err("failed to create sys fs object.\n"); - return 0; + misc_deregister(&boeffla_wl_blocker_control_device); + return err; } // initialize default list From 943bd45c078167d56e4d651db00a3b5bfd952dc1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:29:07 +0300 Subject: [PATCH 261/439] power: wl_blocker: use device_store_bool() to store wl_blocker_debug Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 29 +++++-------------------- 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index fb74ca25f854..40d3c5647f3c 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -135,28 +135,6 @@ static ssize_t debug_show(struct device *dev, struct device_attribute *attr, cha } -// store debug mode on/off (1/0) -static ssize_t debug_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned int ret = -EINVAL; - unsigned int val; - - // check data and store if valid - ret = sscanf(buf, "%d", &val); - - if (ret != 1) - return -EINVAL; - - if (val == 1) - wl_blocker_debug = true; - else - wl_blocker_debug = false; - - return count; -} - - static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { // return version information @@ -172,14 +150,17 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c // define objects static DEVICE_ATTR_RW(wakelock_blocker); static DEVICE_ATTR_RW(wakelock_blocker_default); -static DEVICE_ATTR_RW(debug); static DEVICE_ATTR_RO(version); +static struct dev_ext_attribute dev_attr_debug = { + __ATTR(debug, 0644, debug_show, device_store_bool), + &wl_blocker_debug +}; // define attributes static struct attribute *boeffla_wl_blocker_attributes[] = { &dev_attr_wakelock_blocker.attr, &dev_attr_wakelock_blocker_default.attr, - &dev_attr_debug.attr, + &dev_attr_debug.attr.attr, &dev_attr_version.attr, NULL }; From ce122584cac23841ebcf885386c779d21a438a9b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 13 Jun 2020 16:49:57 +0300 Subject: [PATCH 262/439] power: wl_blocker: add default list Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index f32b51651927..42ee96769fc6 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -16,7 +16,7 @@ #define BOEFFLA_WL_BLOCKER_VERSION "1.1.0" -#define LIST_WL_DEFAULT "" +#define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect" #define LENGTH_LIST_WL 255 #define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) From 1366caaf47491356db77fb9fcf3c2e6fb76d3400 Mon Sep 17 00:00:00 2001 From: Aner Torre Date: Wed, 17 Jun 2020 11:33:50 +0200 Subject: [PATCH 263/439] power: wl_blocker: increase blacklist max length The previous 255 limit seemed to be not enough to hold all the Samsung stock defined wakelocks that look safe to block. --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index 42ee96769fc6..5ee4a755b8a1 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -18,6 +18,6 @@ #define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect" -#define LENGTH_LIST_WL 255 +#define LENGTH_LIST_WL 1024 #define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) #define LENGTH_LIST_WL_SEARCH LENGTH_LIST_WL + LENGTH_LIST_WL_DEFAULT + 5 From 383dd2cee550dec88c57aeb57ff6381ea261f64f Mon Sep 17 00:00:00 2001 From: Aner Torre Date: Wed, 17 Jun 2020 11:57:15 +0200 Subject: [PATCH 264/439] power: wl_blocker: add some wakelocks to the blacklist Seems blocking them does not affect device stability. --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index 5ee4a755b8a1..753b3afe4962 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -16,7 +16,7 @@ #define BOEFFLA_WL_BLOCKER_VERSION "1.1.0" -#define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect" +#define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect;grip_wake_lock;wlan_scan_wake;wlan_pm_wake;nfc_wake_lock" #define LENGTH_LIST_WL 1024 #define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) From ad36bd1b7acf1a66a6279889ea154e1fdbc771a2 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 9 Sep 2020 01:52:11 +0300 Subject: [PATCH 265/439] power: wl_blocker: drop redundant global initializers Signed-off-by: Denis Efremov --- drivers/base/power/boeffla_wl_blocker.c | 4 ++-- drivers/base/power/wakeup.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 40d3c5647f3c..9e848ba9df5a 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -44,8 +44,8 @@ // Variables /*****************************************/ -char list_wl[LENGTH_LIST_WL] = {0}; -char list_wl_default[LENGTH_LIST_WL_DEFAULT] = {0}; +char list_wl[LENGTH_LIST_WL]; +char list_wl_default[LENGTH_LIST_WL_DEFAULT]; extern char list_wl_search[LENGTH_LIST_WL_SEARCH]; extern bool wl_blocker_active; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index ff1396b40c02..5dc98b862b5a 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -30,7 +30,7 @@ #ifdef CONFIG_BOEFFLA_WL_BLOCKER #include "boeffla_wl_blocker.h" -char list_wl_search[LENGTH_LIST_WL_SEARCH] = {0}; +char list_wl_search[LENGTH_LIST_WL_SEARCH]; bool wl_blocker_active = false; bool wl_blocker_debug = false; From 2f362e9382bde34e63481404d832ef42c5bd2d82 Mon Sep 17 00:00:00 2001 From: Nico Becker Date: Sun, 22 Mar 2020 04:31:51 +0100 Subject: [PATCH 266/439] sound: add moro sound module Added Moro Sound Module Signed-off-by: Nico Becker Signed-off-by: Denis Efremov --- drivers/base/regmap/regmap.c | 39 ++ sound/soc/codecs/Kconfig | 4 + sound/soc/codecs/Makefile | 1 + sound/soc/codecs/cs47l92.c | 10 + sound/soc/codecs/madera.c | 7 + sound/soc/codecs/moro_sound.c | 770 ++++++++++++++++++++++++++++++++++ sound/soc/codecs/moro_sound.h | 63 +++ 7 files changed, 894 insertions(+) create mode 100644 sound/soc/codecs/moro_sound.c create mode 100644 sound/soc/codecs/moro_sound.h diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 8fd08023c0f5..cb409bd2cc7a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -34,6 +34,10 @@ */ #undef LOG_DEVICE +#ifdef CONFIG_MORO_SOUND +int moro_sound_write_hook(unsigned int reg, unsigned int val); +#endif + static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool force_write); @@ -1636,6 +1640,10 @@ int _regmap_write(struct regmap *map, unsigned int reg, if (!regmap_writeable(map, reg)) return -EIO; +#ifdef CONFIG_MORO_SOUND + val = moro_sound_write_hook(reg, val); +#endif + if (!map->cache_bypass && !map->defer_caching) { ret = regcache_write(map, reg, val); if (ret != 0) @@ -1656,6 +1664,37 @@ int _regmap_write(struct regmap *map, unsigned int reg, return map->reg_write(context, reg, val); } +#ifdef CONFIG_MORO_SOUND +int _regmap_write_nohook(struct regmap *map, unsigned int reg, + unsigned int val) +{ + int ret; + void *context = _regmap_map_get_context(map); + + if (!regmap_writeable(map, reg)) + return -EIO; + + if (!map->cache_bypass && !map->defer_caching) { + ret = regcache_write(map, reg, val); + if (ret != 0) + return ret; + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + +#ifdef LOG_DEVICE + if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + dev_info(map->dev, "%x <= %x\n", reg, val); +#endif + + trace_regmap_reg_write(map, reg, val); + + return map->reg_write(context, reg, val); +} +#endif + /** * regmap_write() - Write a value to a single register * diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 9ec790676217..811e5eda4db0 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -1245,4 +1245,8 @@ config SND_SOC_TPA6130A2 tristate "Texas Instruments TPA6130A2 headphone amplifier" depends on I2C +config MORO_SOUND + bool "Sound control for S10 madera driver" + default n + endmenu diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 10b1aec4addf..930652f3b7c6 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -490,6 +490,7 @@ obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o obj-$(CONFIG_SND_SOC_ZX_AUD96P22) += snd-soc-zx-aud96p22.o +obj-$(CONFIG_MORO_SOUND) += moro_sound.o # Amp obj-$(CONFIG_SND_SOC_DIO2125) += snd-soc-dio2125.o diff --git a/sound/soc/codecs/cs47l92.c b/sound/soc/codecs/cs47l92.c index 253a844b5060..f03c291ea3e3 100644 --- a/sound/soc/codecs/cs47l92.c +++ b/sound/soc/codecs/cs47l92.c @@ -30,6 +30,10 @@ #include "madera.h" #include "wm_adsp.h" +#ifdef CONFIG_MORO_SOUND +#include "moro_sound.h" +#endif + #define CS47L92_NUM_ADSP 1 #define CS47L92_MONO_OUTPUTS 3 @@ -1974,6 +1978,12 @@ static int cs47l92_codec_probe(struct snd_soc_codec *codec) madera->dapm = snd_soc_codec_get_dapm(codec); +#ifdef CONFIG_MORO_SOUND + moro_sound_hook_madera_pcm_probe(madera->regmap); + + cs47l92->core.madera->dapm = snd_soc_codec_get_dapm(codec); +#endif + ret = madera_init_inputs(codec, cs47l92_dmic_inputs, ARRAY_SIZE(cs47l92_dmic_inputs), diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index da7037f0d300..93bb47d0c5bc 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -16,6 +16,9 @@ #include #include #include +#ifdef CONFIG_MORO_SOUND +#include "moro_sound.h" +#endif #include #include @@ -1382,6 +1385,10 @@ int madera_init_aif(struct snd_soc_codec *codec) struct madera *madera = priv->madera; int ret; +#ifdef CONFIG_MORO_SOUND + moro_sound_hook_madera_pcm_probe(madera->regmap); +#endif + /* Update Sample Rate 1 to 48kHz for cases when no AIF1 hw_params */ ret = regmap_update_bits(madera->regmap, MADERA_SAMPLE_RATE_1, MADERA_SAMPLE_RATE_1_MASK, 0x03); diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c new file mode 100644 index 000000000000..8ca5c3d97387 --- /dev/null +++ b/sound/soc/codecs/moro_sound.c @@ -0,0 +1,770 @@ +/* + * moro_sound.c -- Sound mod for Madera, S8 sound driver + * + * Author : @morogoku https://github.com/morogoku + * Edited by : @noxxxious https://github.com/noxxxious + * + * Date : March 2019 - v1.0 + * + * + * Based on the Boeffla Sound 1.6 for Galaxy S3 + * + * Credits: andip71, author of Boeffla Sound + * Supercurio, Yank555 and Gokhanmoral. + * AndreiLux, for his Madera control sound mod + * Flar2, for his speaker gain mod + * + */ + + +#include "moro_sound.h" + + +/* Variables */ +static struct regmap *map; + +/* Internal moro sound variables */ +static int first = 1; +static int moro_sound = 0; + +static int headphone_gain_l, headphone_gain_r; +static int earpiece_gain; +static int out2l_mix_source, out2r_mix_source; +static int eq1_mix_source, eq2_mix_source; + +static int eq = 0; +static int eq_gains[5]; + +static unsigned int get_headphone_gain_l(void); +static unsigned int get_headphone_gain_r(void); +static void set_headphone_gain_l(int gain); +static void set_headphone_gain_r(int gain); + +static unsigned int get_earpiece_gain(void); +static void set_earpiece_gain(int gain); + +static void set_out2l_mix_source(int value); +static void set_out2r_mix_source(int value); + +static void set_eq1_mix_source(int value); +static void set_eq2_mix_source(int value); + +static void set_eq(void); +static void set_eq_gains(void); + +static void reset_moro_sound(void); +static void reset_audio_hub(void); +static void update_audio_hub(void); + +/* Internal helper functions */ + +#define madera_write(reg, val) _regmap_write_nohook(map, reg, val) + +#define madera_read(reg, val) regmap_read(map, reg, val) + +static unsigned int get_headphone_gain_l(void) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2L, &val); + val &= MADERA_OUT2L_VOL_MASK; + val >>= MADERA_OUT2L_VOL_SHIFT; + + return val; +} + +static unsigned int get_headphone_gain_r(void) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2R, &val); + val &= MADERA_OUT2R_VOL_MASK; + val >>= MADERA_OUT2R_VOL_SHIFT; + + return val; +} + +static void set_headphone_gain_l(int gain) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2L, &val); + val &= ~MADERA_OUT2L_VOL_MASK; + val |= (gain << MADERA_OUT2L_VOL_SHIFT); + madera_write(MADERA_DAC_DIGITAL_VOLUME_2L, val); +} + +static void set_headphone_gain_r(int gain) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2R, &val); + val &= ~MADERA_OUT2R_VOL_MASK; + val |= (gain << MADERA_OUT2R_VOL_SHIFT); + madera_write(MADERA_DAC_DIGITAL_VOLUME_2R, val); +} + +static unsigned int get_earpiece_gain(void) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_3L, &val); + val &= MADERA_OUT3L_VOL_MASK; + val >>= MADERA_OUT3L_VOL_SHIFT; + + return val; +} + +static void set_earpiece_gain(int gain) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_3L, &val); + val &= ~MADERA_OUT3L_VOL_MASK; + val |= (gain << MADERA_OUT3L_VOL_SHIFT); + madera_write(MADERA_DAC_DIGITAL_VOLUME_3L, val); +} + +static void set_out2l_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_OUT2LMIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_OUT2LMIX_INPUT_1_SOURCE, val); +} + +static void set_out2r_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_OUT2RMIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_OUT2RMIX_INPUT_1_SOURCE, val); +} + +static void set_eq1_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_EQ1MIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_EQ1MIX_INPUT_1_SOURCE, val); +} + + +static void set_eq2_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_EQ2MIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_EQ2MIX_INPUT_1_SOURCE, val); +} + +static void set_eq(void) +{ + unsigned int val; + + if (eq && moro_sound) { + madera_read(MADERA_EQ1_1, &val); + val &= ~MADERA_EQ1_ENA_MASK; + val |= 1 << MADERA_EQ1_ENA_SHIFT; + madera_write(MADERA_EQ1_1, val); + madera_read(MADERA_EQ2_1, &val); + val &= ~MADERA_EQ2_ENA_MASK; + val |= 1 << MADERA_EQ2_ENA_SHIFT; + madera_write(MADERA_EQ2_1, val); + set_eq1_mix_source(32); + set_eq2_mix_source(33); + set_out2l_mix_source(80); + set_out2r_mix_source(81); + } else { + madera_read(MADERA_EQ1_1, &val); + val &= ~MADERA_EQ1_ENA_MASK; + val |= 0 << MADERA_EQ1_ENA_SHIFT; + madera_write(MADERA_EQ1_1, val); + madera_read(MADERA_EQ2_1, &val); + val &= ~MADERA_EQ2_ENA_MASK; + val |= 0 << MADERA_EQ2_ENA_SHIFT; + madera_write(MADERA_EQ2_1, val); + eq1_mix_source = EQ1_MIX_DEFAULT; + eq2_mix_source = EQ2_MIX_DEFAULT; + set_eq1_mix_source(eq1_mix_source); + set_eq2_mix_source(eq2_mix_source); + out2l_mix_source = OUT2L_MIX_DEFAULT; + out2r_mix_source = OUT2R_MIX_DEFAULT; + set_out2l_mix_source(out2l_mix_source); + set_out2r_mix_source(out2r_mix_source); + } + + set_eq_gains(); +} + +static void set_eq_gains(void) +{ + unsigned int val; + unsigned int gain1, gain2, gain3, gain4, gain5; + + gain1 = eq_gains[0]; + gain2 = eq_gains[1]; + gain3 = eq_gains[2]; + gain4 = eq_gains[3]; + gain5 = eq_gains[4]; + + madera_read(MADERA_EQ1_1, &val); + + val &= MADERA_EQ1_ENA_MASK; + val |= ((gain1 + EQ_GAIN_OFFSET) << MADERA_EQ1_B1_GAIN_SHIFT); + val |= ((gain2 + EQ_GAIN_OFFSET) << MADERA_EQ1_B2_GAIN_SHIFT); + val |= ((gain3 + EQ_GAIN_OFFSET) << MADERA_EQ1_B3_GAIN_SHIFT); + + madera_write(MADERA_EQ1_1, val); + madera_write(MADERA_EQ2_1, val); + + madera_read(MADERA_EQ1_2, &val); + + val &= MADERA_EQ1_B1_MODE_MASK; + val |= ((gain4 + EQ_GAIN_OFFSET) << MADERA_EQ1_B4_GAIN_SHIFT); + val |= ((gain5 + EQ_GAIN_OFFSET) << MADERA_EQ1_B5_GAIN_SHIFT); + + madera_write(MADERA_EQ1_2, val); + madera_write(MADERA_EQ2_2, val); +} + +/* Sound hook functions */ +void moro_sound_hook_madera_pcm_probe(struct regmap *pmap) +{ + map = pmap; + moro_sound = MORO_SOUND_DEFAULT; + eq = EQ_DEFAULT; + set_eq(); + + if (moro_sound) + reset_moro_sound(); +} + +unsigned int moro_sound_write_hook(unsigned int reg, unsigned int val) +{ + if (!moro_sound) + return val; + + switch (reg) { + case MADERA_DAC_DIGITAL_VOLUME_2L: + val &= ~MADERA_OUT2L_VOL_MASK; + val |= (headphone_gain_l << MADERA_OUT2L_VOL_SHIFT); + break; + case MADERA_DAC_DIGITAL_VOLUME_2R: + val &= ~MADERA_OUT2R_VOL_MASK; + val |= (headphone_gain_r << MADERA_OUT2R_VOL_SHIFT); + break; + case MADERA_DAC_DIGITAL_VOLUME_3L: + val &= ~MADERA_OUT3L_VOL_MASK; + val |= (earpiece_gain << MADERA_OUT3L_VOL_SHIFT); + break; + if (eq) { + case MADERA_OUT2LMIX_INPUT_1_SOURCE: + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (out2l_mix_source << MADERA_MIXER_SOURCE_SHIFT); + break; + case MADERA_OUT2RMIX_INPUT_1_SOURCE: + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (out2r_mix_source << MADERA_MIXER_SOURCE_SHIFT); + break; + } + default: + break; + } + + return val; +} + +/* Initialization functions */ + +static void reset_moro_sound(void) +{ + headphone_gain_l = HEADPHONE_DEFAULT; + headphone_gain_r = HEADPHONE_DEFAULT; + + earpiece_gain = EARPIECE_DEFAULT; + + out2l_mix_source = OUT2L_MIX_DEFAULT; + out2r_mix_source = OUT2R_MIX_DEFAULT; + + eq1_mix_source = EQ1_MIX_DEFAULT; + eq2_mix_source = EQ2_MIX_DEFAULT; +} + + +static void reset_audio_hub(void) +{ + set_headphone_gain_l(HEADPHONE_DEFAULT); + set_headphone_gain_r(HEADPHONE_DEFAULT); + + set_earpiece_gain(EARPIECE_DEFAULT); + + set_out2l_mix_source(OUT2L_MIX_DEFAULT); + set_out2r_mix_source(OUT2R_MIX_DEFAULT); + + set_eq1_mix_source(EQ1_MIX_DEFAULT); + set_eq2_mix_source(EQ2_MIX_DEFAULT); + + set_eq(); +} + +static void update_audio_hub(void) +{ + set_headphone_gain_l(headphone_gain_l); + set_headphone_gain_r(headphone_gain_r); + + set_earpiece_gain(earpiece_gain); + + set_out2l_mix_source(out2l_mix_source); + set_out2r_mix_source(out2r_mix_source); + + set_eq1_mix_source(eq1_mix_source); + set_eq2_mix_source(eq2_mix_source); + + set_eq(); +} + +/* sysfs interface functions */ +static ssize_t moro_sound_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", moro_sound); +} + + +static ssize_t moro_sound_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (((val == 0) || (val == 1))) { + if (moro_sound != val) { + moro_sound = val; + if (first) { + reset_moro_sound(); + first = 0; + } + + if (val == 1) + update_audio_hub(); + else if (val == 0) + reset_audio_hub(); + } + } + + return count; +} + + +/* Headphone volume */ +static ssize_t headphone_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d %d\n", headphone_gain_l, headphone_gain_r); +} + + +static ssize_t headphone_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val_l; + int val_r; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d %d", &val_l, &val_r) < 2) + return -EINVAL; + + if (val_l < HEADPHONE_MIN) + val_l = HEADPHONE_MIN; + + if (val_l > HEADPHONE_MAX) + val_l = HEADPHONE_MAX; + + if (val_r < HEADPHONE_MIN) + val_r = HEADPHONE_MIN; + + if (val_r > HEADPHONE_MAX) + val_r = HEADPHONE_MAX; + + headphone_gain_l = val_l; + headphone_gain_r = val_r; + + set_headphone_gain_l(headphone_gain_l); + set_headphone_gain_r(headphone_gain_r); + + return count; +} + +static ssize_t headphone_limits_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Min: %d Max: %d Def: %d\n", HEADPHONE_MIN, HEADPHONE_MAX, HEADPHONE_DEFAULT); +} + +/* Earpiece Volume */ + +static ssize_t earpiece_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", earpiece_gain); +} + +static ssize_t earpiece_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EARPIECE_MIN) + val = EARPIECE_MIN; + + if (val > EARPIECE_MAX) + val = EARPIECE_MAX; + + earpiece_gain = val; + set_earpiece_gain(earpiece_gain); + + return count; +} + +static ssize_t earpiece_limits_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + // return version information + return sprintf(buf, "Min: %d Max: %d Def:%d\n", EARPIECE_MIN, EARPIECE_MAX, EARPIECE_DEFAULT); +} + +/* EQ */ +static ssize_t eq_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq); +} + +static ssize_t eq_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (((val == 0) || (val == 1))) { + if (eq != val) { + eq = val; + set_eq(); + } + } + + return count; +} + + +/* EQ GAIN */ + +static ssize_t eq_gains_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d %d %d %d %d\n", eq_gains[0], eq_gains[1], eq_gains[2], eq_gains[3], eq_gains[4]); +} + +static ssize_t eq_gains_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int gains[5]; + int i; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d %d %d %d %d", &gains[0], &gains[1], &gains[2], &gains[3], &gains[4]) < 5) + return -EINVAL; + + for (i = 0; i <= 4; i++) { + if (gains[i] < EQ_GAIN_MIN) + gains[i] = EQ_GAIN_MIN; + if (gains[i] > EQ_GAIN_MAX) + gains[i] = EQ_GAIN_MAX; + eq_gains[i] = gains[i]; + } + + set_eq_gains(); + + return count; +} + +static ssize_t eq_b1_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[0]); +} + +static ssize_t eq_b1_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[0] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b2_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[1]); +} + +static ssize_t eq_b2_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[1] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b3_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[2]); +} + +static ssize_t eq_b3_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[2] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b4_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[3]); +} + +static ssize_t eq_b4_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[3] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b5_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[4]); +} + +static ssize_t eq_b5_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[4] = val; + set_eq_gains(); + + return count; +} + +static ssize_t reg_dump_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + unsigned int out2_ena, out2l_mix, out2r_mix, eq1_ena, eq2_ena, eq1_mix, eq2_mix, eq_b1, + eq_b2, eq_b3, eq_b4, eq_b5; + + madera_read(MADERA_OUTPUT_ENABLES_1, &out2_ena); + out2_ena = (out2_ena & MADERA_OUT2L_ENA_MASK) >> MADERA_OUT2L_ENA_SHIFT; + + madera_read(MADERA_OUT2LMIX_INPUT_1_SOURCE, &out2l_mix); + madera_read(MADERA_OUT2RMIX_INPUT_1_SOURCE, &out2r_mix); + madera_read(MADERA_EQ1_1, &eq1_ena); + eq1_ena = (eq1_ena & MADERA_EQ1_ENA_MASK) >> MADERA_EQ1_ENA_SHIFT; + madera_read(MADERA_EQ2_1, &eq2_ena); + eq2_ena = (eq2_ena & MADERA_EQ2_ENA_MASK) >> MADERA_EQ2_ENA_SHIFT; + madera_read(MADERA_EQ1MIX_INPUT_1_SOURCE, &eq1_mix); + madera_read(MADERA_EQ2MIX_INPUT_1_SOURCE, &eq2_mix); + madera_read(MADERA_EQ1_1, &eq_b1); + eq_b1 = ((eq_b1 & MADERA_EQ1_B1_GAIN_MASK) >> MADERA_EQ1_B1_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_1, &eq_b2); + eq_b2 = ((eq_b2 & MADERA_EQ1_B2_GAIN_MASK) >> MADERA_EQ1_B2_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_1, &eq_b3); + eq_b3 = ((eq_b3 & MADERA_EQ1_B3_GAIN_MASK) >> MADERA_EQ1_B3_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_2, &eq_b4); + eq_b4 = ((eq_b4 & MADERA_EQ1_B4_GAIN_MASK) >> MADERA_EQ1_B4_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_2, &eq_b5); + eq_b5 = ((eq_b5 & MADERA_EQ1_B5_GAIN_MASK) >> MADERA_EQ1_B5_GAIN_SHIFT) - EQ_GAIN_OFFSET; + + return sprintf(buf, "\ +headphone_gain_l: reg: %d, variable: %d \ +headphone_gain_r: reg: %d, variable: %d \ +earpiece_gain: %d \ +HPOUT Enabled: %d \ +HPOUT2L Source: %d \ +HPOUT2R Source: %d \ +EQ1 Enabled: %d \ +EQ2 Enabled: %d \ +EQ1MIX source: %d \ +EQ2MIX source: %d \ +EQ b1 gain: %d \ +EQ b2 gain: %d \ +EQ b3 gain: %d \ +EQ b4 gain: %d \ +EQ b5 gain: %d \ +", +get_headphone_gain_l(), +get_headphone_gain_r(), +headphone_gain_l, +headphone_gain_r, +first, +get_earpiece_gain(), +out2_ena, +out2l_mix, +out2r_mix, +eq1_ena, +eq2_ena, +eq1_mix, +eq2_mix, +eq_b1, +eq_b2, +eq_b3, +eq_b4, +eq_b5); +} + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", MORO_SOUND_VERSION); +} + + +/* Sysfs permissions */ +static DEVICE_ATTR(moro_sound, 0664, moro_sound_show, moro_sound_store); +static DEVICE_ATTR(headphone_gain, 0664, headphone_gain_show, headphone_gain_store); +static DEVICE_ATTR(headphone_limits, 0664, headphone_limits_show, NULL); +static DEVICE_ATTR(earpiece_gain, 0664, earpiece_gain_show, earpiece_gain_store); +static DEVICE_ATTR(earpiece_limits, 0664, earpiece_limits_show, NULL); +static DEVICE_ATTR(eq, 0664, eq_show, eq_store); +static DEVICE_ATTR(eq_gains, 0664, eq_gains_show, eq_gains_store); +static DEVICE_ATTR(eq_b1_gain, 0664, eq_b1_gain_show, eq_b1_gain_store); +static DEVICE_ATTR(eq_b2_gain, 0664, eq_b2_gain_show, eq_b2_gain_store); +static DEVICE_ATTR(eq_b3_gain, 0664, eq_b3_gain_show, eq_b3_gain_store); +static DEVICE_ATTR(eq_b4_gain, 0664, eq_b4_gain_show, eq_b4_gain_store); +static DEVICE_ATTR(eq_b5_gain, 0664, eq_b5_gain_show, eq_b5_gain_store); +static DEVICE_ATTR(version, 0664, version_show, NULL); +static DEVICE_ATTR(reg_dump, 0664, reg_dump_show, NULL); + +static struct attribute *moro_sound_attributes[] = { + &dev_attr_moro_sound.attr, + &dev_attr_headphone_gain.attr, + &dev_attr_headphone_limits.attr, + &dev_attr_earpiece_gain.attr, + &dev_attr_earpiece_limits.attr, + &dev_attr_eq.attr, + &dev_attr_eq_gains.attr, + &dev_attr_eq_b1_gain.attr, + &dev_attr_eq_b2_gain.attr, + &dev_attr_eq_b3_gain.attr, + &dev_attr_eq_b4_gain.attr, + &dev_attr_eq_b5_gain.attr, + &dev_attr_version.attr, + &dev_attr_reg_dump.attr, + NULL +}; + +static struct attribute_group moro_sound_control_group = { + .attrs = moro_sound_attributes, +}; + +static struct miscdevice moro_sound_control_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "moro_sound", +}; + +static int moro_sound_init(void) +{ + misc_register(&moro_sound_control_device); + + if (sysfs_create_group(&moro_sound_control_device.this_device->kobj, + &moro_sound_control_group) < 0) { + return 0; + } + + reset_moro_sound(); + + return 0; +} + +static void moro_sound_exit(void) +{ + sysfs_remove_group(&moro_sound_control_device.this_device->kobj, + &moro_sound_control_group); +} + +/* Driver init and exit functions */ +module_init(moro_sound_init); +module_exit(moro_sound_exit); diff --git a/sound/soc/codecs/moro_sound.h b/sound/soc/codecs/moro_sound.h new file mode 100644 index 000000000000..3c3594523ed4 --- /dev/null +++ b/sound/soc/codecs/moro_sound.h @@ -0,0 +1,63 @@ +/* + * moro_sound.h -- Sound mod for Madera, S10 sound driver + * + * Author : @morogoku https://github.com/morogoku + * + * + */ + + +#include +#include +#include +#include +#include +#include + +#include + +/* External function declarations */ +void moro_sound_hook_madera_pcm_probe(struct regmap *pmap); +int _regmap_write_nohook(struct regmap *map, unsigned int reg, unsigned int val); +int set_speaker_gain(int gain); +int get_speaker_gain(void); + +/* Definitions */ + +/* Moro sound general */ +#define MORO_SOUND_DEFAULT 0 +#define MORO_SOUND_VERSION "2.1.1" + +/* Headphone levels */ +#define HEADPHONE_DEFAULT 113 +#define HEADPHONE_MIN 60 +#define HEADPHONE_MAX 170 + +/* Earpiece levels */ +#define EARPIECE_DEFAULT 128 +#define EARPIECE_MIN 60 +#define EARPIECE_MAX 190 + +/* Speaker levels */ +#define SPEAKER_DEFAULT 20 +#define SPEAKER_MIN 0 +#define SPEAKER_MAX 63 + +/* Mixers sources */ +#define OUT2L_MIX_DEFAULT 32 +#define OUT2R_MIX_DEFAULT 33 +#define EQ1_MIX_DEFAULT 0 +#define EQ2_MIX_DEFAULT 0 + +/* EQ gain */ +#define EQ_DEFAULT 0 +#define EQ_GAIN_DEFAULT 0 +#define EQ_GAIN_OFFSET 12 +#define EQ_GAIN_MIN -12 +#define EQ_GAIN_MAX 12 + +/* Mixers */ +#define MADERA_MIXER_SOURCE_MASK 0xff +#define MADERA_MIXER_SOURCE_SHIFT 0 +#define MADERA_MIXER_VOLUME_MASK 0xfe +#define MADERA_MIXER_VOLUME_SHIFT 1 From a2562a5c98be14179d6d398fb39fbf8c4491f558 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 18:54:53 +0300 Subject: [PATCH 267/439] sound: moro: fix reg_dump_show() output Remove first variable, because it's not used in fmt string. Swap get_headphone_gain_l(), headphone_gain_l, get_headphone_gain_r(), headphone_gain_r according to the fmt string. Signed-off-by: Denis Efremov --- sound/soc/codecs/moro_sound.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 8ca5c3d97387..03561a05d80b 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -676,11 +676,8 @@ EQ b3 gain: %d \ EQ b4 gain: %d \ EQ b5 gain: %d \ ", -get_headphone_gain_l(), -get_headphone_gain_r(), -headphone_gain_l, -headphone_gain_r, -first, +get_headphone_gain_l(), headphone_gain_l, +get_headphone_gain_r(), headphone_gain_r, get_earpiece_gain(), out2_ena, out2l_mix, From bcf50d8f1463cbfd06c183fe2e02016573d4894b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:02:22 +0300 Subject: [PATCH 268/439] sound: moro: mark functions with __init, __exit attrs Signed-off-by: Denis Efremov --- sound/soc/codecs/moro_sound.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 03561a05d80b..45f7e6d62046 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -742,7 +742,7 @@ static struct miscdevice moro_sound_control_device = { .name = "moro_sound", }; -static int moro_sound_init(void) +static int __init moro_sound_init(void) { misc_register(&moro_sound_control_device); @@ -756,7 +756,7 @@ static int moro_sound_init(void) return 0; } -static void moro_sound_exit(void) +static void __exit moro_sound_exit(void) { sysfs_remove_group(&moro_sound_control_device.this_device->kobj, &moro_sound_control_group); From a7b61a69ed9f3ed25d991ce388a91dee160d5344 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:35:51 +0300 Subject: [PATCH 269/439] sound: moro: add pr_fmt macro Signed-off-by: Denis Efremov --- sound/soc/codecs/moro_sound.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 45f7e6d62046..b8d6fbd504a5 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -16,6 +16,7 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "moro_sound.h" From d0e7bb4d782b756c97cc039f40c7f635f453a838 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:37:54 +0300 Subject: [PATCH 270/439] sound: moro: add misc_deregister in exit function Signed-off-by: Denis Efremov --- sound/soc/codecs/moro_sound.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index b8d6fbd504a5..a298fe04f662 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -761,6 +761,8 @@ static void __exit moro_sound_exit(void) { sysfs_remove_group(&moro_sound_control_device.this_device->kobj, &moro_sound_control_group); + + misc_deregister(&moro_sound_control_device); } /* Driver init and exit functions */ From f8d21fbd7ca279dc7118e361bee0fecbc91d8e1c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 22:40:25 +0300 Subject: [PATCH 271/439] sound: moro: add error handling to init function Signed-off-by: Denis Efremov --- sound/soc/codecs/moro_sound.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index a298fe04f662..0e66750e0972 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -745,11 +745,20 @@ static struct miscdevice moro_sound_control_device = { static int __init moro_sound_init(void) { - misc_register(&moro_sound_control_device); + int err = 0; - if (sysfs_create_group(&moro_sound_control_device.this_device->kobj, - &moro_sound_control_group) < 0) { - return 0; + err = misc_register(&moro_sound_control_device); + if (err) { + pr_err("failed register the device.\n"); + return err; + } + + err = sysfs_create_group(&moro_sound_control_device.this_device->kobj, + &moro_sound_control_group); + if (err) { + pr_err("failed to create sys fs object.\n"); + misc_deregister(&moro_sound_control_device); + return err; } reset_moro_sound(); From 2b18b2c831e3efc14fe3ba1ba6bee5a82a8460bc Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 21 Jun 2020 18:05:30 +0300 Subject: [PATCH 272/439] sound: moro: fix device attr permissions Signed-off-by: Denis Efremov --- sound/soc/codecs/moro_sound.c | 217 +++++++++------------------------- 1 file changed, 54 insertions(+), 163 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 0e66750e0972..37ab93ebd1b8 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -333,13 +333,6 @@ static void update_audio_hub(void) set_eq(); } -/* sysfs interface functions */ -static ssize_t moro_sound_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", moro_sound); -} - - static ssize_t moro_sound_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -414,11 +407,6 @@ static ssize_t headphone_limits_show(struct device *dev, struct device_attribute /* Earpiece Volume */ -static ssize_t earpiece_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", earpiece_gain); -} - static ssize_t earpiece_gain_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -449,10 +437,6 @@ static ssize_t earpiece_limits_show(struct device *dev, struct device_attribute } /* EQ */ -static ssize_t eq_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq); -} static ssize_t eq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -508,131 +492,6 @@ static ssize_t eq_gains_store(struct device *dev, struct device_attribute *attr, return count; } -static ssize_t eq_b1_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[0]); -} - -static ssize_t eq_b1_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[0] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b2_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[1]); -} - -static ssize_t eq_b2_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[1] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b3_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[2]); -} - -static ssize_t eq_b3_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[2] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b4_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[3]); -} - -static ssize_t eq_b4_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[3] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b5_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[4]); -} - -static ssize_t eq_b5_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[4] = val; - set_eq_gains(); - - return count; -} - static ssize_t reg_dump_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int out2_ena, out2l_mix, out2r_mix, eq1_ena, eq2_ena, eq1_mix, eq2_mix, eq_b1, @@ -701,34 +560,66 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c /* Sysfs permissions */ -static DEVICE_ATTR(moro_sound, 0664, moro_sound_show, moro_sound_store); -static DEVICE_ATTR(headphone_gain, 0664, headphone_gain_show, headphone_gain_store); -static DEVICE_ATTR(headphone_limits, 0664, headphone_limits_show, NULL); -static DEVICE_ATTR(earpiece_gain, 0664, earpiece_gain_show, earpiece_gain_store); -static DEVICE_ATTR(earpiece_limits, 0664, earpiece_limits_show, NULL); -static DEVICE_ATTR(eq, 0664, eq_show, eq_store); -static DEVICE_ATTR(eq_gains, 0664, eq_gains_show, eq_gains_store); -static DEVICE_ATTR(eq_b1_gain, 0664, eq_b1_gain_show, eq_b1_gain_store); -static DEVICE_ATTR(eq_b2_gain, 0664, eq_b2_gain_show, eq_b2_gain_store); -static DEVICE_ATTR(eq_b3_gain, 0664, eq_b3_gain_show, eq_b3_gain_store); -static DEVICE_ATTR(eq_b4_gain, 0664, eq_b4_gain_show, eq_b4_gain_store); -static DEVICE_ATTR(eq_b5_gain, 0664, eq_b5_gain_show, eq_b5_gain_store); -static DEVICE_ATTR(version, 0664, version_show, NULL); -static DEVICE_ATTR(reg_dump, 0664, reg_dump_show, NULL); +static DEVICE_ATTR_RW(headphone_gain); +static DEVICE_ATTR_RW(eq_gains); +static DEVICE_ATTR_RO(earpiece_limits); +static DEVICE_ATTR_RO(headphone_limits); +static DEVICE_ATTR_RO(version); +static DEVICE_ATTR_RO(reg_dump); + +static struct dev_ext_attribute dev_attr_moro_sound = { + __ATTR(moro_sound, 0644, device_show_bool, moro_sound_store), + &moro_sound +}; +static struct dev_ext_attribute dev_attr_earpiece_gain = { + __ATTR(earpiece_gain, 0644, device_show_int, earpiece_gain_store), + &earpiece_gain +}; +static struct dev_ext_attribute dev_attr_eq = { + __ATTR(eq, 0644, device_show_bool, eq_store), + &eq +}; + + +#define MORO_DEVICE_ATTR_EQ_B_GAIN(num) \ +static ssize_t eq_b##num##_gain_store(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + if (sscanf(buf, "%d", &val) < 1) \ + return -EINVAL; \ + if (val < EQ_GAIN_MIN) \ + val = EQ_GAIN_MIN; \ + if (val > EQ_GAIN_MAX) \ + val = EQ_GAIN_MAX; \ + eq_gains[num - 1] = val; \ + set_eq_gains(); \ + return count; \ +} \ +struct dev_ext_attribute dev_attr_eq_b##num##_gain = { \ + __ATTR(eq_b##num##_gain, 0644, device_show_int, eq_b##num##_gain_store), \ + &eq_gains[num - 1] \ +}; + +MORO_DEVICE_ATTR_EQ_B_GAIN(1); +MORO_DEVICE_ATTR_EQ_B_GAIN(2); +MORO_DEVICE_ATTR_EQ_B_GAIN(3); +MORO_DEVICE_ATTR_EQ_B_GAIN(4); +MORO_DEVICE_ATTR_EQ_B_GAIN(5); static struct attribute *moro_sound_attributes[] = { - &dev_attr_moro_sound.attr, + &dev_attr_moro_sound.attr.attr, &dev_attr_headphone_gain.attr, &dev_attr_headphone_limits.attr, - &dev_attr_earpiece_gain.attr, + &dev_attr_earpiece_gain.attr.attr, &dev_attr_earpiece_limits.attr, - &dev_attr_eq.attr, + &dev_attr_eq.attr.attr, &dev_attr_eq_gains.attr, - &dev_attr_eq_b1_gain.attr, - &dev_attr_eq_b2_gain.attr, - &dev_attr_eq_b3_gain.attr, - &dev_attr_eq_b4_gain.attr, - &dev_attr_eq_b5_gain.attr, + &dev_attr_eq_b1_gain.attr.attr, + &dev_attr_eq_b2_gain.attr.attr, + &dev_attr_eq_b3_gain.attr.attr, + &dev_attr_eq_b4_gain.attr.attr, + &dev_attr_eq_b5_gain.attr.attr, &dev_attr_version.attr, &dev_attr_reg_dump.attr, NULL From 694aef27584262591e0193a7e49e98ad8f047082 Mon Sep 17 00:00:00 2001 From: Joe Maples Date: Thu, 11 May 2017 22:36:29 -0500 Subject: [PATCH 273/439] cpufreq: Introduce fingerprint boost driver This driver, based on Sultanxda's input boost driver, boosts all available cpus to max freq after recieving an input notification from the fingerprint sensor to reduce lag. Signed-off-by: Joe Maples --- drivers/cpufreq/Kconfig | 6 + drivers/cpufreq/Makefile | 2 + drivers/cpufreq/fp-boost.c | 393 +++++++++++++++++++++++++++++++++++++ 3 files changed, 401 insertions(+) create mode 100644 drivers/cpufreq/fp-boost.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index bf084b3a6715..a8a1e16dbe7d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -242,6 +242,12 @@ config CPUFREQ_DT_PLATDEV If in doubt, say N. +config FINGERPRINT_BOOST + bool "Fingerprint Boost" + default n + help + Boosts available CPUs to max frequency on fingerprint sensor input. + if X86 source "drivers/cpufreq/Kconfig.x86" endif diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 39cc6c6e76cd..dc75e7ce3e83 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o +obj-$(CONFIG_FINGERPRINT_BOOST) += fp-boost.o + ################################################################################## # x86 drivers. # Link order matters. K8 is preferred to ACPI because of firmware bugs in early diff --git a/drivers/cpufreq/fp-boost.c b/drivers/cpufreq/fp-boost.c new file mode 100644 index 000000000000..9ff17a0f8525 --- /dev/null +++ b/drivers/cpufreq/fp-boost.c @@ -0,0 +1,393 @@ +/* + * Copyright (C) 2014-2017, Sultanxda + * (C) 2017, Joe Maples + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* drivers/cpufreq/fp-boost.c: Fingerprint boost driver + * + * fp-boost is a simple cpufreq driver that boosts all CPU frequencies + * to max when an input notification is recieved from the devices + * fingerprint sensor. This aims to wake speed-up device unlock, + * especially from the deep sleep state. + * + * fp-boost is based on cpu_input_boost by Sultanxda, and all copyright + * information has been retained. Huge thanks to him for writing the + * initial driver this was based on. + */ + +#define pr_fmt(fmt) "fp-boost: " fmt + +#include +#include +#include +#include + +/* Available bits for boost_policy state */ +#define DRIVER_ENABLED (1U << 0) +#define FINGERPRINT_BOOST (1U << 1) + +/* Fingerprint sensor input key */ +#define FINGERPRINT_KEY 0x2ee + +/* The duration in milliseconds for the fingerprint boost */ +#define FP_BOOST_MS (2000) + +/* + * "fp_config" = "fingerprint boost configuration". This contains the data and + * workers used for a single input-boost event. + */ +struct fp_config { + struct delayed_work boost_work; + struct delayed_work unboost_work; + uint32_t adj_duration_ms; + uint32_t cpus_to_boost; + uint32_t duration_ms; + uint32_t freq[2]; +}; + +/* + * This is the struct that contains all of the data for the entire driver. It + * encapsulates all of the other structs, so all data can be accessed through + * this struct. + */ +struct boost_policy { + spinlock_t lock; + struct fp_config fp; + struct workqueue_struct *wq; + uint32_t state; +}; + +/* Global pointer to all of the data for the driver */ +static struct boost_policy *boost_policy_g; + +static uint32_t get_boost_state(struct boost_policy *b); +static void set_boost_bit(struct boost_policy *b, uint32_t state); +static void clear_boost_bit(struct boost_policy *b, uint32_t state); +static void unboost_all_cpus(struct boost_policy *b); +static void update_online_cpu_policy(void); + +/* Boolean to let us know if input is already recieved */ +static bool touched; + +static void fp_boost_main(struct work_struct *work) +{ + struct boost_policy *b = boost_policy_g; + struct fp_config *fp = &b->fp; + + /* All CPUs will be boosted to policy->max */ + set_boost_bit(b, FINGERPRINT_BOOST); + + /* Immediately boost the online CPUs */ + update_online_cpu_policy(); + + queue_delayed_work(b->wq, &fp->unboost_work, + msecs_to_jiffies(FP_BOOST_MS)); + +} + +static void fp_unboost_main(struct work_struct *work) +{ + struct boost_policy *b = boost_policy_g; + pr_info("Unboosting\n"); + touched = false; + /* This clears the wake-boost bit and unboosts everything */ + unboost_all_cpus(b); +} + +static int do_cpu_boost(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct cpufreq_policy *policy = data; + struct boost_policy *b = boost_policy_g; + uint32_t state; + + if (action != CPUFREQ_ADJUST) + return NOTIFY_OK; + + state = get_boost_state(b); + + /* + * Don't do anything when the driver is disabled, unless there are + * still CPUs that need to be unboosted. + */ + if (!(state & DRIVER_ENABLED) && + policy->min == policy->cpuinfo.min_freq) + return NOTIFY_OK; + + /* Boost CPU to max frequency for fingerprint boost */ + if (state & FINGERPRINT_BOOST) { + pr_info("Boosting\n"); + policy->cur = policy->max; + policy->min = policy->max; + return NOTIFY_OK; + } + + return NOTIFY_OK; +} + +static struct notifier_block do_cpu_boost_nb = { + .notifier_call = do_cpu_boost, + .priority = INT_MAX, +}; + +static void cpu_fp_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + struct boost_policy *b = boost_policy_g; + struct fp_config *fp = &b->fp; + uint32_t state; + + state = get_boost_state(b); + + if (!(state & DRIVER_ENABLED) || touched) + return; + + pr_info("Recieved input event\n"); + touched = true; + set_boost_bit(b, FINGERPRINT_BOOST); + + /* Delaying work to ensure all CPUs are online */ + queue_delayed_work(b->wq, &fp->boost_work, + msecs_to_jiffies(20)); +} + +static int cpu_fp_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int ret; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "cpu_fp_handle"; + + ret = input_register_handle(handle); + if (ret) + goto err2; + + ret = input_open_device(handle); + if (ret) + goto err1; + + return 0; + +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return ret; +} + +static void cpu_fp_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id cpu_fp_ids[] = { + /* fingerprint sensor */ + { + .flags = INPUT_DEVICE_ID_MATCH_KEYBIT, + .keybit = { [BIT_WORD(FINGERPRINT_KEY)] = BIT_MASK(FINGERPRINT_KEY) }, + }, + { }, +}; + +static struct input_handler cpu_fp_input_handler = { + .event = cpu_fp_input_event, + .connect = cpu_fp_input_connect, + .disconnect = cpu_fp_input_disconnect, + .name = "cpu_fp_handler", + .id_table = cpu_fp_ids, +}; + +static uint32_t get_boost_state(struct boost_policy *b) +{ + uint32_t state; + + spin_lock(&b->lock); + state = b->state; + spin_unlock(&b->lock); + + return state; +} + +static void set_boost_bit(struct boost_policy *b, uint32_t state) +{ + spin_lock(&b->lock); + b->state |= state; + spin_unlock(&b->lock); +} + +static void clear_boost_bit(struct boost_policy *b, uint32_t state) +{ + spin_lock(&b->lock); + b->state &= ~state; + spin_unlock(&b->lock); +} + +static void unboost_all_cpus(struct boost_policy *b) +{ + struct fp_config *fp = &b->fp; + + /* Clear boost bit */ + clear_boost_bit(b, FINGERPRINT_BOOST); + + /* Clear cpus_to_boost bits for all CPUs */ + fp->cpus_to_boost = 0; + + /* Immediately unboost the online CPUs */ + update_online_cpu_policy(); +} + +static void update_online_cpu_policy(void) +{ + uint32_t cpu; + + /* Trigger cpufreq notifier for online CPUs */ + get_online_cpus(); + for_each_online_cpu(cpu) + cpufreq_update_policy(cpu); + put_online_cpus(); +} + +static ssize_t enabled_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct boost_policy *b = boost_policy_g; + uint32_t data; + int ret; + + ret = kstrtou32(buf, 10, &data); + if (ret) + return -EINVAL; + + if (data) { + set_boost_bit(b, DRIVER_ENABLED); + } else { + clear_boost_bit(b, DRIVER_ENABLED); + /* Stop everything */ + unboost_all_cpus(b); + } + + return size; +} + +static ssize_t enabled_read(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct boost_policy *b = boost_policy_g; + + return snprintf(buf, PAGE_SIZE, "%u\n", + get_boost_state(b) & DRIVER_ENABLED); +} + +static DEVICE_ATTR(enabled, 0644, + enabled_read, enabled_write); + +static struct attribute *cpu_fp_attr[] = { + &dev_attr_enabled.attr, + NULL +}; + +static struct attribute_group cpu_fp_attr_group = { + .attrs = cpu_fp_attr, +}; + +static int sysfs_fp_init(void) +{ + struct kobject *kobj; + int ret; + + kobj = kobject_create_and_add("fp_boost", kernel_kobj); + if (!kobj) { + pr_err("Failed to create kobject\n"); + return -ENOMEM; + } + + ret = sysfs_create_group(kobj, &cpu_fp_attr_group); + if (ret) { + pr_err("Failed to create sysfs interface\n"); + kobject_put(kobj); + } + + return ret; +} + +static struct boost_policy *alloc_boost_policy(void) +{ + struct boost_policy *b; + + b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) + return NULL; + + b->wq = alloc_workqueue("cpu_fp_wq", WQ_HIGHPRI, 0); + if (!b->wq) { + pr_err("Failed to allocate workqueue\n"); + goto free_b; + } + + return b; + +free_b: + kfree(b); + return NULL; +} + +static int __init cpu_fp_init(void) +{ + struct boost_policy *b; + int ret; + touched = false; + + b = alloc_boost_policy(); + if (!b) { + pr_err("Failed to allocate boost policy\n"); + return -ENOMEM; + } + + spin_lock_init(&b->lock); + + INIT_DELAYED_WORK(&b->fp.boost_work, fp_boost_main); + INIT_DELAYED_WORK(&b->fp.unboost_work, fp_unboost_main); + + /* Allow global boost config access */ + boost_policy_g = b; + + ret = input_register_handler(&cpu_fp_input_handler); + if (ret) { + pr_err("Failed to register input handler, err: %d\n", ret); + goto free_mem; + } + + ret = sysfs_fp_init(); + if (ret) + goto input_unregister; + + cpufreq_register_notifier(&do_cpu_boost_nb, CPUFREQ_POLICY_NOTIFIER); + + return 0; + +input_unregister: + input_unregister_handler(&cpu_fp_input_handler); +free_mem: + kfree(b); + return ret; +} +late_initcall(cpu_fp_init); + From 3e019d73dcca1802dd9716446109e59038e7c41f Mon Sep 17 00:00:00 2001 From: Angheloaia Victor Date: Thu, 26 Mar 2020 21:30:28 +0200 Subject: [PATCH 274/439] block: Add Maple I/O scheduler --- block/Kconfig.iosched | 8 + block/Makefile | 1 + block/maple-iosched.c | 459 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 468 insertions(+) create mode 100644 block/maple-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index a4a8914bf7a4..9ee2932fd174 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -22,6 +22,10 @@ config IOSCHED_DEADLINE a new point in the service tree and doing a batch of IO from there in case of expiry. +config IOSCHED_MAPLE + tristate "Maple I/O scheduler" + default n + config IOSCHED_CFQ tristate "CFQ I/O scheduler" default y @@ -54,6 +58,9 @@ choice config DEFAULT_CFQ bool "CFQ" if IOSCHED_CFQ=y + config DEFAULT_MAPLE + bool "MAPLE" if IOSCHED_MAPLE=y + config DEFAULT_NOOP bool "No-op" @@ -63,6 +70,7 @@ config DEFAULT_IOSCHED string default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ + default "maple" if DEFAULT_MAPLE default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index b0249cd24633..dc1c0dfdbed1 100644 --- a/block/Makefile +++ b/block/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o +obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/maple-iosched.c b/block/maple-iosched.c new file mode 100644 index 000000000000..beb474e83ec9 --- /dev/null +++ b/block/maple-iosched.c @@ -0,0 +1,459 @@ +/* + * Maple I/O Scheduler + * Based on Zen and SIO. + * + * Copyright (C) 2016 Joe Maples + * (C) 2012 Brandon Berhent + * + * Maple uses a first come first serve style algorithm with seperated read/write + * handling to allow for read biases. By prioritizing reads, simple tasks should improve + * in performance. Maple also uses hooks for the powersuspend driver to increase + * expirations when power is suspended to decrease workload. + */ +#include +#include +#include +#include +#include +#include +#include + +#define MAPLE_IOSCHED_PATCHLEVEL (8) + +enum { ASYNC, SYNC }; + +/* Tunables */ +static const int sync_read_expire = 350; /* max time before a read sync is submitted. */ +static const int sync_write_expire = 550; /* max time before a write sync is submitted. */ +static const int async_read_expire = 250; /* ditto for read async, these limits are SOFT! */ +static const int async_write_expire = 450; /* ditto for write async, these limits are SOFT! */ +static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. */ +static const int writes_starved = 4; /* max times reads can starve a write */ +static const int sleep_latency_multiple = 10; /* multple for expire time when device is asleep */ + +/* Elevator data */ +struct maple_data { + /* Request queues */ + struct list_head fifo_list[2][2]; + + /* Attributes */ + unsigned int batched; + unsigned int starved; + + /* Settings */ + int fifo_expire[2][2]; + int fifo_batch; + int writes_starved; + int sleep_latency_multiple; + + /* Display state */ + struct notifier_block fb_notifier; + bool display_on; +}; + +static inline struct maple_data * +maple_get_data(struct request_queue *q) { + return q->elevator->elevator_data; +} + +static void +maple_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * If next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, (unsigned long)rq->fifo_time)) { + list_move(&rq->queuelist, &next->queuelist); + rq->fifo_time = next->fifo_time; + } + } + + /* Delete next request */ + rq_fifo_clear(next); +} + +static void +maple_add_request(struct request_queue *q, struct request *rq) +{ + struct maple_data *mdata = maple_get_data(q); + const int sync = rq_is_sync(rq); + const int dir = rq_data_dir(rq); + + /* + * Add request to the proper fifo list and set its + * expire time. + */ + + /* inrease expiration when device is asleep */ + unsigned int fifo_expire_suspended = mdata->fifo_expire[sync][dir] * sleep_latency_multiple; + if (mdata->display_on && mdata->fifo_expire[sync][dir]) { + rq->fifo_time = jiffies + mdata->fifo_expire[sync][dir]; + list_add_tail(&rq->queuelist, &mdata->fifo_list[sync][dir]); + } else if (!mdata->display_on && fifo_expire_suspended) { + rq->fifo_time = jiffies + fifo_expire_suspended; + list_add_tail(&rq->queuelist, &mdata->fifo_list[sync][dir]); + } +} + +static struct request * +maple_expired_request(struct maple_data *mdata, int sync, int data_dir) +{ + struct list_head *list = &mdata->fifo_list[sync][data_dir]; + struct request *rq; + + if (list_empty(list)) + return NULL; + + /* Retrieve request */ + rq = rq_entry_fifo(list->next); + + /* Request has expired */ + if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) + return rq; + + return NULL; +} + +static struct request * +maple_choose_expired_request(struct maple_data *mdata) +{ + struct request *rq_sync_read = maple_expired_request(mdata, SYNC, READ); + struct request *rq_sync_write = maple_expired_request(mdata, SYNC, WRITE); + struct request *rq_async_read = maple_expired_request(mdata, ASYNC, READ); + struct request *rq_async_write = maple_expired_request(mdata, ASYNC, WRITE); + + /* Reset (non-expired-)batch-counter */ + mdata->batched = 0; + + /* + * Check expired requests. + * Asynchronous requests have priority over synchronous. + * Read requests have priority over write. + */ + + if (rq_async_read && rq_sync_read) { + if (time_after((unsigned long)rq_sync_read->fifo_time, (unsigned long)rq_async_read->fifo_time)) + return rq_async_read; + } else if (rq_async_read) { + return rq_async_read; + } else if (rq_sync_read) { + return rq_sync_read; + } + + if (rq_async_write && rq_sync_write) { +if (time_after((unsigned long)rq_sync_write->fifo_time, (unsigned long)rq_async_write->fifo_time)) + return rq_async_write; + } else if (rq_async_write) { + return rq_async_write; + } else if (rq_sync_write) { + return rq_sync_write; + } + + return NULL; +} + +static struct request * +maple_choose_request(struct maple_data *mdata, int data_dir) +{ + struct list_head *sync = mdata->fifo_list[SYNC]; + struct list_head *async = mdata->fifo_list[ASYNC]; + + /* Increase (non-expired-)batch-counter */ + mdata->batched++; + + + /* + * Retrieve request from available fifo list. + * Asynchronous requests have priority over synchronous. + * Read requests have priority over write. + */ + if (!list_empty(&async[data_dir])) + return rq_entry_fifo(async[data_dir].next); + if (!list_empty(&sync[data_dir])) + return rq_entry_fifo(sync[data_dir].next); + + if (!list_empty(&async[!data_dir])) + return rq_entry_fifo(async[!data_dir].next); + if (!list_empty(&sync[!data_dir])) + return rq_entry_fifo(sync[!data_dir].next); + + return NULL; +} + +static inline void +maple_dispatch_request(struct maple_data *mdata, struct request *rq) +{ + /* + * Remove the request from the fifo list + * and dispatch it. + */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + if (rq_data_dir(rq)) { + mdata->starved = 0; + } else { + if (!list_empty(&mdata->fifo_list[SYNC][WRITE]) || + !list_empty(&mdata->fifo_list[ASYNC][WRITE])) + mdata->starved++; + } +} + +static int +maple_dispatch_requests(struct request_queue *q, int force) +{ + struct maple_data *mdata = maple_get_data(q); + struct request *rq = NULL; + int data_dir = READ; + + /* + * Retrieve any expired request after a batch of + * sequential requests. + */ + if (mdata->batched >= mdata->fifo_batch) + rq = maple_choose_expired_request(mdata); + + /* Retrieve request */ + if (!rq) { + /* Treat writes fairly while suspended, otherwise allow them to be starved */ + if (mdata->display_on && mdata->starved >= mdata->writes_starved) + data_dir = WRITE; + else if (!mdata->display_on && mdata->starved >= 1) + data_dir = WRITE; + + rq = maple_choose_request(mdata, data_dir); + if (!rq) + return 0; + } + + /* Dispatch request */ + maple_dispatch_request(mdata, rq); + + return 1; +} + +static struct request * +maple_former_request(struct request_queue *q, struct request *rq) +{ + struct maple_data *mdata = maple_get_data(q); + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.prev == &mdata->fifo_list[sync][data_dir]) + return NULL; + + /* Return former request */ + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +maple_latter_request(struct request_queue *q, struct request *rq) +{ + struct maple_data *mdata = maple_get_data(q); + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.next == &mdata->fifo_list[sync][data_dir]) + return NULL; + + /* Return latter request */ + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static int fb_notifier_callback(struct notifier_block *self, + unsigned long event, void *data) +{ + struct maple_data *mdata = container_of(self, + struct maple_data, fb_notifier); + struct fb_event *evdata = data; + int *blank; + + if (evdata && evdata->data && event == FB_EVENT_BLANK) { + blank = evdata->data; + switch (*blank) { + case FB_BLANK_UNBLANK: + mdata->display_on = true; + break; + case FB_BLANK_POWERDOWN: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_NORMAL: + mdata->display_on = false; + break; + } + } + + return 0; +} + +static int maple_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct maple_data *mdata; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + /* Allocate structure */ + mdata = kmalloc_node(sizeof(*mdata), GFP_KERNEL, q->node); + if (!mdata) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = mdata; + + mdata->fb_notifier.notifier_call = fb_notifier_callback; + fb_register_client(&mdata->fb_notifier); + + /* Initialize fifo lists */ + INIT_LIST_HEAD(&mdata->fifo_list[SYNC][READ]); + INIT_LIST_HEAD(&mdata->fifo_list[SYNC][WRITE]); + INIT_LIST_HEAD(&mdata->fifo_list[ASYNC][READ]); + INIT_LIST_HEAD(&mdata->fifo_list[ASYNC][WRITE]); + + /* Initialize data */ + mdata->batched = 0; + mdata->fifo_expire[SYNC][READ] = sync_read_expire; + mdata->fifo_expire[SYNC][WRITE] = sync_write_expire; + mdata->fifo_expire[ASYNC][READ] = async_read_expire; + mdata->fifo_expire[ASYNC][WRITE] = async_write_expire; + mdata->fifo_batch = fifo_batch; + mdata->writes_starved = writes_starved; + mdata->sleep_latency_multiple = sleep_latency_multiple; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + return 0; +} + +static void +maple_exit_queue(struct elevator_queue *e) +{ + struct maple_data *mdata = e->elevator_data; + + fb_unregister_client(&mdata->fb_notifier); + + /* Free structure */ + kfree(mdata); +} + +/* + * sysfs code + */ + +static ssize_t +maple_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +maple_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct maple_data *mdata = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return maple_var_show(__data, (page)); \ +} +SHOW_FUNCTION(maple_sync_read_expire_show, mdata->fifo_expire[SYNC][READ], 1); +SHOW_FUNCTION(maple_sync_write_expire_show, mdata->fifo_expire[SYNC][WRITE], 1); +SHOW_FUNCTION(maple_async_read_expire_show, mdata->fifo_expire[ASYNC][READ], 1); +SHOW_FUNCTION(maple_async_write_expire_show, mdata->fifo_expire[ASYNC][WRITE], 1); +SHOW_FUNCTION(maple_fifo_batch_show, mdata->fifo_batch, 0); +SHOW_FUNCTION(maple_writes_starved_show, mdata->writes_starved, 0); +SHOW_FUNCTION(maple_sleep_latency_multiple_show, mdata->sleep_latency_multiple, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct maple_data *mdata = e->elevator_data; \ + int __data; \ + int ret = maple_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(maple_sync_read_expire_store, &mdata->fifo_expire[SYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(maple_sync_write_expire_store, &mdata->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(maple_async_read_expire_store, &mdata->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(maple_async_write_expire_store, &mdata->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(maple_fifo_batch_store, &mdata->fifo_batch, 1, INT_MAX, 0); +STORE_FUNCTION(maple_writes_starved_store, &mdata->writes_starved, 1, INT_MAX, 0); +STORE_FUNCTION(maple_sleep_latency_multiple_store, &mdata->sleep_latency_multiple, 1, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, maple_##name##_show, \ + maple_##name##_store) + +static struct elv_fs_entry maple_attrs[] = { + DD_ATTR(sync_read_expire), + DD_ATTR(sync_write_expire), + DD_ATTR(async_read_expire), + DD_ATTR(async_write_expire), + DD_ATTR(fifo_batch), + DD_ATTR(writes_starved), + DD_ATTR(sleep_latency_multiple), + __ATTR_NULL +}; + +static struct elevator_type iosched_maple = { + .ops.sq = { + .elevator_merge_req_fn = maple_merged_requests, + .elevator_dispatch_fn = maple_dispatch_requests, + .elevator_add_req_fn = maple_add_request, + .elevator_former_req_fn = maple_former_request, + .elevator_latter_req_fn = maple_latter_request, + .elevator_init_fn = maple_init_queue, + .elevator_exit_fn = maple_exit_queue, + }, + + .elevator_attrs = maple_attrs, + .elevator_name = "maple", + .elevator_owner = THIS_MODULE, +}; + +static int __init maple_init(void) +{ + /* Register elevator */ + elv_register(&iosched_maple); + + return 0; +} + +static void __exit maple_exit(void) +{ + /* Unregister elevator */ + elv_unregister(&iosched_maple); +} + +module_init(maple_init); +module_exit(maple_exit); + +MODULE_AUTHOR("Joe Maples"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Maple I/O Scheduler"); +MODULE_VERSION("1.0"); From 19fe90c023795f82c8a7964f2b541e9b07dd5f1a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 5 Apr 2020 16:54:43 +0300 Subject: [PATCH 275/439] block: Add FIOPS I/O scheduler Signed-off-by: Denis Efremov --- block/Kconfig.iosched | 12 + block/Makefile | 1 + block/fiops-iosched.c | 770 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 783 insertions(+) create mode 100644 block/fiops-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 9ee2932fd174..855fe54e5470 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -44,6 +44,14 @@ config CFQ_GROUP_IOSCHED ---help--- Enable group IO scheduling in CFQ. +config IOSCHED_FIOPS + tristate "IOPS based I/O scheduler" + default n + ---help--- + This is an IOPS based I/O scheduler. It will try to distribute + IOPS equally among all processes in the system. It's mainly for + Flash based storage. + choice prompt "Default I/O scheduler" @@ -61,6 +69,9 @@ choice config DEFAULT_MAPLE bool "MAPLE" if IOSCHED_MAPLE=y + config DEFAULT_FIOPS + bool "FIOPS" if IOSCHED_FIOPS=y + config DEFAULT_NOOP bool "No-op" @@ -71,6 +82,7 @@ config DEFAULT_IOSCHED default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ default "maple" if DEFAULT_MAPLE + default "fiops" if DEFAULT_FIOPS default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index dc1c0dfdbed1..2023d8e298b1 100644 --- a/block/Makefile +++ b/block/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o +obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/fiops-iosched.c b/block/fiops-iosched.c new file mode 100644 index 000000000000..72d22b5062e9 --- /dev/null +++ b/block/fiops-iosched.c @@ -0,0 +1,770 @@ +/* + * IOPS based IO scheduler. Based on CFQ. + * Copyright (C) 2003 Jens Axboe + * Shaohua Li + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "blk.h" + +#define VIOS_SCALE_SHIFT 10 +#define VIOS_SCALE (1 << VIOS_SCALE_SHIFT) + +#define VIOS_READ_SCALE (1) +#define VIOS_WRITE_SCALE (1) +#define VIOS_SYNC_SCALE (2) +#define VIOS_ASYNC_SCALE (5) + +#define VIOS_PRIO_SCALE (5) + +struct fiops_rb_root { + struct rb_root rb; + struct rb_node *left; + unsigned count; + + u64 min_vios; +}; +#define FIOPS_RB_ROOT (struct fiops_rb_root) { .rb = RB_ROOT} + +enum wl_prio_t { + IDLE_WORKLOAD = 0, + BE_WORKLOAD = 1, + RT_WORKLOAD = 2, + FIOPS_PRIO_NR, +}; + +struct fiops_data { + struct request_queue *queue; + + struct fiops_rb_root service_tree[FIOPS_PRIO_NR]; + + unsigned int busy_queues; + unsigned int in_flight[2]; + + struct work_struct unplug_work; + + unsigned int read_scale; + unsigned int write_scale; + unsigned int sync_scale; + unsigned int async_scale; +}; + +struct fiops_ioc { + struct io_cq icq; + + unsigned int flags; + struct fiops_data *fiopsd; + struct rb_node rb_node; + u64 vios; /* key in service_tree */ + struct fiops_rb_root *service_tree; + + unsigned int in_flight; + + struct rb_root sort_list; + struct list_head fifo; + + pid_t pid; + unsigned short ioprio; + enum wl_prio_t wl_type; +}; + +#define ioc_service_tree(ioc) (&((ioc)->fiopsd->service_tree[(ioc)->wl_type])) +#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) + +enum ioc_state_flags { + FIOPS_IOC_FLAG_on_rr = 0, /* on round-robin busy list */ + FIOPS_IOC_FLAG_prio_changed, /* task priority has changed */ +}; + +#define FIOPS_IOC_FNS(name) \ +static inline void fiops_mark_ioc_##name(struct fiops_ioc *ioc) \ +{ \ + ioc->flags |= (1 << FIOPS_IOC_FLAG_##name); \ +} \ +static inline void fiops_clear_ioc_##name(struct fiops_ioc *ioc) \ +{ \ + ioc->flags &= ~(1 << FIOPS_IOC_FLAG_##name); \ +} \ +static inline int fiops_ioc_##name(const struct fiops_ioc *ioc) \ +{ \ + return ((ioc)->flags & (1 << FIOPS_IOC_FLAG_##name)) != 0; \ +} + +FIOPS_IOC_FNS(on_rr); +FIOPS_IOC_FNS(prio_changed); +#undef FIOPS_IOC_FNS + +#define fiops_log_ioc(fiopsd, ioc, fmt, args...) \ + blk_add_trace_msg((fiopsd)->queue, "ioc%d " fmt, (ioc)->pid, ##args) +#define fiops_log(fiopsd, fmt, args...) \ + blk_add_trace_msg((fiopsd)->queue, "fiops " fmt, ##args) + +enum wl_prio_t fiops_wl_type(short prio_class) +{ + if (prio_class == IOPRIO_CLASS_RT) + return RT_WORKLOAD; + if (prio_class == IOPRIO_CLASS_BE) + return BE_WORKLOAD; + return IDLE_WORKLOAD; +} + +static inline struct fiops_ioc *icq_to_cic(struct io_cq *icq) +{ + /* cic->icq is the first member, %NULL will convert to %NULL */ + return container_of(icq, struct fiops_ioc, icq); +} + +static inline struct fiops_ioc *fiops_cic_lookup(struct fiops_data *fiopsd, + struct io_context *ioc) +{ + if (ioc) + return icq_to_cic(ioc_lookup_icq(ioc, fiopsd->queue)); + return NULL; +} + +/* + * The below is leftmost cache rbtree addon + */ +static struct fiops_ioc *fiops_rb_first(struct fiops_rb_root *root) +{ + /* Service tree is empty */ + if (!root->count) + return NULL; + + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry(root->left, struct fiops_ioc, rb_node); + + return NULL; +} + +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} + +static void fiops_rb_erase(struct rb_node *n, struct fiops_rb_root *root) +{ + if (root->left == n) + root->left = NULL; + rb_erase_init(n, &root->rb); + --root->count; +} + +static inline u64 max_vios(u64 min_vios, u64 vios) +{ + s64 delta = (s64)(vios - min_vios); + if (delta > 0) + min_vios = vios; + + return min_vios; +} + +static void fiops_update_min_vios(struct fiops_rb_root *service_tree) +{ + struct fiops_ioc *ioc; + + ioc = fiops_rb_first(service_tree); + if (!ioc) + return; + service_tree->min_vios = max_vios(service_tree->min_vios, ioc->vios); +} + +/* + * The fiopsd->service_trees holds all pending fiops_ioc's that have + * requests waiting to be processed. It is sorted in the order that + * we will service the queues. + */ +static void fiops_service_tree_add(struct fiops_data *fiopsd, + struct fiops_ioc *ioc) +{ + struct rb_node **p, *parent; + struct fiops_ioc *__ioc; + struct fiops_rb_root *service_tree = ioc_service_tree(ioc); + u64 vios; + int left; + + /* New added IOC */ + if (RB_EMPTY_NODE(&ioc->rb_node)) { + if (ioc->in_flight > 0) + vios = ioc->vios; + else + vios = max_vios(service_tree->min_vios, ioc->vios); + } else { + vios = ioc->vios; + /* ioc->service_tree might not equal to service_tree */ + fiops_rb_erase(&ioc->rb_node, ioc->service_tree); + ioc->service_tree = NULL; + } + + fiops_log_ioc(fiopsd, ioc, "service tree add, vios %lld", vios); + + left = 1; + parent = NULL; + ioc->service_tree = service_tree; + p = &service_tree->rb.rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + __ioc = rb_entry(parent, struct fiops_ioc, rb_node); + + /* + * sort by key, that represents service time. + */ + if (vios < __ioc->vios) + n = &(*p)->rb_left; + else { + n = &(*p)->rb_right; + left = 0; + } + + p = n; + } + + if (left) + service_tree->left = &ioc->rb_node; + + ioc->vios = vios; + rb_link_node(&ioc->rb_node, parent, p); + rb_insert_color(&ioc->rb_node, &service_tree->rb); + service_tree->count++; + + fiops_update_min_vios(service_tree); +} + +/* + * Update ioc's position in the service tree. + */ +static void fiops_resort_rr_list(struct fiops_data *fiopsd, + struct fiops_ioc *ioc) +{ + /* + * Resorting requires the ioc to be on the RR list already. + */ + if (fiops_ioc_on_rr(ioc)) + fiops_service_tree_add(fiopsd, ioc); +} + +/* + * add to busy list of queues for service, trying to be fair in ordering + * the pending list according to last request service + */ +static void fiops_add_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc) +{ + BUG_ON(fiops_ioc_on_rr(ioc)); + fiops_mark_ioc_on_rr(ioc); + + fiopsd->busy_queues++; + + fiops_resort_rr_list(fiopsd, ioc); +} + +/* + * Called when the ioc no longer has requests pending, remove it from + * the service tree. + */ +static void fiops_del_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc) +{ + BUG_ON(!fiops_ioc_on_rr(ioc)); + fiops_clear_ioc_on_rr(ioc); + + if (!RB_EMPTY_NODE(&ioc->rb_node)) { + fiops_rb_erase(&ioc->rb_node, ioc->service_tree); + ioc->service_tree = NULL; + } + + BUG_ON(!fiopsd->busy_queues); + fiopsd->busy_queues--; +} + +/* + * rb tree support functions + */ +static void fiops_del_rq_rb(struct request *rq) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + + elv_rb_del(&ioc->sort_list, rq); +} + +static void fiops_add_rq_rb(struct request *rq) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + struct fiops_data *fiopsd = ioc->fiopsd; + + elv_rb_add(&ioc->sort_list, rq); + + if (!fiops_ioc_on_rr(ioc)) + fiops_add_ioc_rr(fiopsd, ioc); +} + +static void fiops_reposition_rq_rb(struct fiops_ioc *ioc, struct request *rq) +{ + elv_rb_del(&ioc->sort_list, rq); + fiops_add_rq_rb(rq); +} + +static void fiops_remove_request(struct request *rq) +{ + list_del_init(&rq->queuelist); + fiops_del_rq_rb(rq); +} + +static u64 fiops_scaled_vios(struct fiops_data *fiopsd, + struct fiops_ioc *ioc, struct request *rq) +{ + int vios = VIOS_SCALE; + + if (rq_data_dir(rq) == WRITE) + vios = vios * fiopsd->write_scale / fiopsd->read_scale; + + if (!rq_is_sync(rq)) + vios = vios * fiopsd->async_scale / fiopsd->sync_scale; + + vios += vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE; + + return vios; +} + +/* return vios dispatched */ +static u64 fiops_dispatch_request(struct fiops_data *fiopsd, + struct fiops_ioc *ioc) +{ + struct request *rq; + struct request_queue *q = fiopsd->queue; + + rq = rq_entry_fifo(ioc->fifo.next); + + fiops_remove_request(rq); + elv_dispatch_add_tail(q, rq); + + fiopsd->in_flight[rq_is_sync(rq)]++; + ioc->in_flight++; + + return fiops_scaled_vios(fiopsd, ioc, rq); +} + +static int fiops_forced_dispatch(struct fiops_data *fiopsd) +{ + struct fiops_ioc *ioc; + int dispatched = 0; + int i; + + for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) { + while (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) { + ioc = fiops_rb_first(&fiopsd->service_tree[i]); + + while (!list_empty(&ioc->fifo)) { + fiops_dispatch_request(fiopsd, ioc); + dispatched++; + } + if (fiops_ioc_on_rr(ioc)) + fiops_del_ioc_rr(fiopsd, ioc); + } + } + return dispatched; +} + +static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd) +{ + struct fiops_ioc *ioc; + struct fiops_rb_root *service_tree = NULL; + int i; + struct request *rq; + + for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) { + if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) { + service_tree = &fiopsd->service_tree[i]; + break; + } + } + + if (!service_tree) + return NULL; + + ioc = fiops_rb_first(service_tree); + + rq = rq_entry_fifo(ioc->fifo.next); + /* + * we are the only async task and sync requests are in flight, delay a + * moment. If there are other tasks coming, sync tasks have no chance + * to be starved, don't delay + */ + if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 && + service_tree->count == 1) { + fiops_log_ioc(fiopsd, ioc, + "postpone async, in_flight async %d sync %d", + fiopsd->in_flight[0], fiopsd->in_flight[1]); + return NULL; + } + + return ioc; +} + +static void fiops_charge_vios(struct fiops_data *fiopsd, + struct fiops_ioc *ioc, u64 vios) +{ + struct fiops_rb_root *service_tree = ioc->service_tree; + ioc->vios += vios; + + fiops_log_ioc(fiopsd, ioc, "charge vios %lld, new vios %lld", vios, ioc->vios); + + if (RB_EMPTY_ROOT(&ioc->sort_list)) + fiops_del_ioc_rr(fiopsd, ioc); + else + fiops_resort_rr_list(fiopsd, ioc); + + fiops_update_min_vios(service_tree); +} + +static int fiops_dispatch_requests(struct request_queue *q, int force) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct fiops_ioc *ioc; + u64 vios; + + if (unlikely(force)) + return fiops_forced_dispatch(fiopsd); + + ioc = fiops_select_ioc(fiopsd); + if (!ioc) + return 0; + + vios = fiops_dispatch_request(fiopsd, ioc); + + fiops_charge_vios(fiopsd, ioc, vios); + return 1; +} + +static void fiops_init_prio_data(struct fiops_ioc *cic) +{ + struct task_struct *tsk = current; + struct io_context *ioc = cic->icq.ioc; + int ioprio_class; + + if (!fiops_ioc_prio_changed(cic)) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "fiops: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, inherit CPU scheduling settings + */ + cic->ioprio = task_nice_ioprio(tsk); + cic->wl_type = fiops_wl_type(task_nice_ioclass(tsk)); + break; + case IOPRIO_CLASS_RT: + cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio); + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_RT); + break; + case IOPRIO_CLASS_BE: + cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio); + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_BE); + break; + case IOPRIO_CLASS_IDLE: + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_IDLE); + cic->ioprio = 7; + break; + } + + fiops_clear_ioc_prio_changed(cic); +} + +static void fiops_insert_request(struct request_queue *q, struct request *rq) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + + fiops_init_prio_data(ioc); + + list_add_tail(&rq->queuelist, &ioc->fifo); + + fiops_add_rq_rb(rq); +} + +/* + * scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing + */ +static inline void fiops_schedule_dispatch(struct fiops_data *fiopsd) +{ + if (fiopsd->busy_queues) + kblockd_schedule_work(&fiopsd->unplug_work); +} + +static void fiops_completed_request(struct request_queue *q, struct request *rq) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct fiops_ioc *ioc = RQ_CIC(rq); + + fiopsd->in_flight[rq_is_sync(rq)]--; + ioc->in_flight--; + + fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d", + ioc->in_flight, fiopsd->busy_queues); + + if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0) + fiops_schedule_dispatch(fiopsd); +} + +static struct request * +fiops_find_rq_fmerge(struct fiops_data *fiopsd, struct bio *bio) +{ + struct task_struct *tsk = current; + struct fiops_ioc *cic; + + cic = fiops_cic_lookup(fiopsd, tsk->io_context); + + if (cic) { + return elv_rb_find(&cic->sort_list, bio_end_sector(bio)); + } + + return NULL; +} + +static enum elv_merge +fiops_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct request *__rq; + + __rq = fiops_find_rq_fmerge(fiopsd, bio); + if (__rq && elv_bio_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void fiops_merged_request(struct request_queue *q, struct request *req, + enum elv_merge type) +{ + if (type == ELEVATOR_FRONT_MERGE) { + struct fiops_ioc *ioc = RQ_CIC(req); + + fiops_reposition_rq_rb(ioc, req); + } +} + +static void +fiops_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + struct fiops_data *fiopsd = q->elevator->elevator_data; + + fiops_remove_request(next); + + ioc = RQ_CIC(next); + /* + * all requests of this task are merged to other tasks, delete it + * from the service tree. + */ + if (fiops_ioc_on_rr(ioc) && RB_EMPTY_ROOT(&ioc->sort_list)) + fiops_del_ioc_rr(fiopsd, ioc); +} + +static int fiops_allow_bio_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct fiops_ioc *cic; + + /* + * Lookup the ioc that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + cic = fiops_cic_lookup(fiopsd, current->io_context); + + return cic == RQ_CIC(rq); +} + +static int fiops_allow_rq_merge(struct request_queue *q, struct request *rq, + struct request *next) +{ + return RQ_CIC(rq) == RQ_CIC(next); +} + +static void fiops_exit_queue(struct elevator_queue *e) +{ + struct fiops_data *fiopsd = e->elevator_data; + + cancel_work_sync(&fiopsd->unplug_work); + + kfree(fiopsd); +} + +static void fiops_kick_queue(struct work_struct *work) +{ + struct fiops_data *fiopsd = + container_of(work, struct fiops_data, unplug_work); + struct request_queue *q = fiopsd->queue; + + spin_lock_irq(q->queue_lock); + __blk_run_queue(q); + spin_unlock_irq(q->queue_lock); +} + +static int fiops_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct fiops_data *fiopsd; + int i; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + fiopsd = kzalloc_node(sizeof(*fiopsd), GFP_KERNEL, q->node); + if (!fiopsd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = fiopsd; + + fiopsd->queue = q; + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + for (i = IDLE_WORKLOAD; i <= RT_WORKLOAD; i++) + fiopsd->service_tree[i] = FIOPS_RB_ROOT; + + INIT_WORK(&fiopsd->unplug_work, fiops_kick_queue); + + fiopsd->read_scale = VIOS_READ_SCALE; + fiopsd->write_scale = VIOS_WRITE_SCALE; + fiopsd->sync_scale = VIOS_SYNC_SCALE; + fiopsd->async_scale = VIOS_ASYNC_SCALE; + + return 0; +} + +static void fiops_init_icq(struct io_cq *icq) +{ + struct fiops_data *fiopsd = icq->q->elevator->elevator_data; + struct fiops_ioc *ioc = icq_to_cic(icq); + + RB_CLEAR_NODE(&ioc->rb_node); + INIT_LIST_HEAD(&ioc->fifo); + ioc->sort_list = RB_ROOT; + + ioc->fiopsd = fiopsd; + + ioc->pid = current->pid; + fiops_mark_ioc_prio_changed(ioc); +} + +/* + * sysfs parts below --> + */ +static ssize_t +fiops_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +fiops_var_store(unsigned int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct fiops_data *fiopsd = e->elevator_data; \ + return fiops_var_show(__VAR, (page)); \ +} +SHOW_FUNCTION(fiops_read_scale_show, fiopsd->read_scale); +SHOW_FUNCTION(fiops_write_scale_show, fiopsd->write_scale); +SHOW_FUNCTION(fiops_sync_scale_show, fiopsd->sync_scale); +SHOW_FUNCTION(fiops_async_scale_show, fiopsd->async_scale); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct fiops_data *fiopsd = e->elevator_data; \ + unsigned int __data; \ + int ret = fiops_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(fiops_read_scale_store, &fiopsd->read_scale, 1, 100); +STORE_FUNCTION(fiops_write_scale_store, &fiopsd->write_scale, 1, 100); +STORE_FUNCTION(fiops_sync_scale_store, &fiopsd->sync_scale, 1, 100); +STORE_FUNCTION(fiops_async_scale_store, &fiopsd->async_scale, 1, 100); +#undef STORE_FUNCTION + +#define FIOPS_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, fiops_##name##_show, fiops_##name##_store) + +static struct elv_fs_entry fiops_attrs[] = { + FIOPS_ATTR(read_scale), + FIOPS_ATTR(write_scale), + FIOPS_ATTR(sync_scale), + FIOPS_ATTR(async_scale), + __ATTR_NULL +}; + +static struct elevator_type iosched_fiops = { + .ops.sq = { + .elevator_merge_fn = fiops_merge, + .elevator_merged_fn = fiops_merged_request, + .elevator_merge_req_fn = fiops_merged_requests, + .elevator_allow_bio_merge_fn = fiops_allow_bio_merge, + .elevator_allow_rq_merge_fn = fiops_allow_rq_merge, + .elevator_dispatch_fn = fiops_dispatch_requests, + .elevator_add_req_fn = fiops_insert_request, + .elevator_completed_req_fn = fiops_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_icq_fn = fiops_init_icq, + .elevator_init_fn = fiops_init_queue, + .elevator_exit_fn = fiops_exit_queue, + }, + .icq_size = sizeof(struct fiops_ioc), + .icq_align = __alignof__(struct fiops_ioc), + .elevator_attrs = fiops_attrs, + .elevator_name = "fiops", + .elevator_owner = THIS_MODULE, +}; + +static int __init fiops_init(void) +{ + return elv_register(&iosched_fiops); +} + +static void __exit fiops_exit(void) +{ + elv_unregister(&iosched_fiops); +} + +module_init(fiops_init); +module_exit(fiops_exit); + +MODULE_AUTHOR("Jens Axboe, Shaohua Li "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IOPS based IO scheduler"); From a3ee26e692e135fff98cd358e8bf34d66f420b04 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 6 Apr 2020 10:25:27 +0300 Subject: [PATCH 276/439] block: Add SIO I/O scheduler Signed-off-by: Denis Efremov --- block/Kconfig.iosched | 14 ++ block/Makefile | 1 + block/sio-iosched.c | 412 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 427 insertions(+) create mode 100644 block/sio-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 855fe54e5470..02ace3ceb2b0 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -52,6 +52,16 @@ config IOSCHED_FIOPS IOPS equally among all processes in the system. It's mainly for Flash based storage. +config IOSCHED_SIO + tristate "Simple I/O scheduler" + default n + ---help--- + The Simple I/O scheduler is an extremely simple scheduler, + based on noop and deadline, that relies on deadlines to + ensure fairness. The algorithm does not do any sorting but + basic merging, trying to keep a minimum overhead. It is aimed + mainly for aleatory access devices (eg: flash devices). + choice prompt "Default I/O scheduler" @@ -72,6 +82,9 @@ choice config DEFAULT_FIOPS bool "FIOPS" if IOSCHED_FIOPS=y + config DEFAULT_SIO + bool "SIO" if IOSCHED_SIO=y + config DEFAULT_NOOP bool "No-op" @@ -83,6 +96,7 @@ config DEFAULT_IOSCHED default "cfq" if DEFAULT_CFQ default "maple" if DEFAULT_MAPLE default "fiops" if DEFAULT_FIOPS + default "sio" if DEFAULT_SIO default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index 2023d8e298b1..25320b87d941 100644 --- a/block/Makefile +++ b/block/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/sio-iosched.c b/block/sio-iosched.c new file mode 100644 index 000000000000..45b97b8db612 --- /dev/null +++ b/block/sio-iosched.c @@ -0,0 +1,412 @@ +/* + * Simple IO scheduler + * Based on Noop, Deadline and V(R) IO schedulers. + * + * Copyright (C) 2012 Miguel Boton + * + * + * This algorithm does not do any kind of sorting, as it is aimed for + * aleatory access devices, but it does some basic merging. We try to + * keep minimum overhead to achieve low latency. + * + * Asynchronous and synchronous requests are not treated separately, but + * we relay on deadlines to ensure fairness. + * + */ +#include +#include +#include +#include +#include +#include +#include + +enum { ASYNC, SYNC }; + +/* Tunables */ +static const int sync_read_expire = HZ / 2; /* max time before a sync read is submitted. */ +static const int sync_write_expire = 2 * HZ; /* max time before a sync write is submitted. */ + +static const int async_read_expire = 4 * HZ; /* ditto for async, these limits are SOFT! */ +static const int async_write_expire = 16 * HZ; /* ditto for async, these limits are SOFT! */ + +static const int writes_starved = 2; /* max times reads can starve a write */ +static const int fifo_batch = 8; /* # of sequential requests treated as one + by the above parameters. For throughput. */ + +/* Elevator data */ +struct sio_data { + /* Request queues */ + struct list_head fifo_list[2][2]; + + /* Attributes */ + unsigned int batched; + unsigned int starved; + + /* Settings */ + int fifo_expire[2][2]; + int fifo_batch; + int writes_starved; +}; + +static void +sio_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * If next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, (unsigned long)rq->fifo_time)) { + list_move(&rq->queuelist, &next->queuelist); + rq->fifo_time = next->fifo_time; + } + } + + /* Delete next request */ + rq_fifo_clear(next); +} + +static void +sio_add_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + /* + * Add request to the proper fifo list and set its + * expire time. + */ + rq->fifo_time = jiffies + sd->fifo_expire[sync][data_dir]; + list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) +static int +sio_queue_empty(struct request_queue *q) +{ + struct sio_data *sd = q->elevator->elevator_data; + + /* Check if fifo lists are empty */ + return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) && + list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]); +} +#endif + +static struct request * +sio_expired_request(struct sio_data *sd, int sync, int data_dir) +{ + struct list_head *list = &sd->fifo_list[sync][data_dir]; + struct request *rq; + + if (list_empty(list)) + return NULL; + + /* Retrieve request */ + rq = rq_entry_fifo(list->next); + + /* Request has expired */ + if (time_after(jiffies, (unsigned long)rq->fifo_time)) + return rq; + + return NULL; +} + +static struct request * +sio_choose_expired_request(struct sio_data *sd) +{ + struct request *rq; + + /* + * Check expired requests. + * Asynchronous requests have priority over synchronous. + * Write requests have priority over read. + */ + rq = sio_expired_request(sd, ASYNC, WRITE); + if (rq) + return rq; + rq = sio_expired_request(sd, ASYNC, READ); + if (rq) + return rq; + + rq = sio_expired_request(sd, SYNC, WRITE); + if (rq) + return rq; + rq = sio_expired_request(sd, SYNC, READ); + if (rq) + return rq; + + return NULL; +} + +static struct request * +sio_choose_request(struct sio_data *sd, int data_dir) +{ + struct list_head *sync = sd->fifo_list[SYNC]; + struct list_head *async = sd->fifo_list[ASYNC]; + + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + * Read requests have priority over write. + */ + if (!list_empty(&sync[data_dir])) + return rq_entry_fifo(sync[data_dir].next); + if (!list_empty(&async[data_dir])) + return rq_entry_fifo(async[data_dir].next); + + if (!list_empty(&sync[!data_dir])) + return rq_entry_fifo(sync[!data_dir].next); + if (!list_empty(&async[!data_dir])) + return rq_entry_fifo(async[!data_dir].next); + + return NULL; +} + +static inline void +sio_dispatch_request(struct sio_data *sd, struct request *rq) +{ + /* + * Remove the request from the fifo list + * and dispatch it. + */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + sd->batched++; + + if (rq_data_dir(rq)) + sd->starved = 0; + else + sd->starved++; +} + +static int +sio_dispatch_requests(struct request_queue *q, int force) +{ + struct sio_data *sd = q->elevator->elevator_data; + struct request *rq = NULL; + int data_dir = READ; + + /* + * Retrieve any expired request after a batch of + * sequential requests. + */ + if (sd->batched > sd->fifo_batch) { + sd->batched = 0; + rq = sio_choose_expired_request(sd); + } + + /* Retrieve request */ + if (!rq) { + if (sd->starved > sd->writes_starved) + data_dir = WRITE; + + rq = sio_choose_request(sd, data_dir); + if (!rq) + return 0; + } + + /* Dispatch request */ + sio_dispatch_request(sd, rq); + + return 1; +} + +static struct request * +sio_former_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir]) + return NULL; + + /* Return former request */ + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +sio_latter_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.next == &sd->fifo_list[sync][data_dir]) + return NULL; + + /* Return latter request */ + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static int sio_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct sio_data *sd; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + /* Allocate structure */ + sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); + if (!sd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = sd; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + /* Initialize fifo lists */ + INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]); + INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]); + + /* Initialize data */ + sd->batched = 0; + sd->fifo_expire[SYNC][READ] = sync_read_expire; + sd->fifo_expire[SYNC][WRITE] = sync_write_expire; + sd->fifo_expire[ASYNC][READ] = async_read_expire; + sd->fifo_expire[ASYNC][WRITE] = async_write_expire; + sd->fifo_batch = fifo_batch; + + return 0; +} + +static void +sio_exit_queue(struct elevator_queue *e) +{ + struct sio_data *sd = e->elevator_data; + + BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ])); + BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE])); + + /* Free structure */ + kfree(sd); +} + +/* + * sysfs code + */ + +static ssize_t +sio_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +sio_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return sio_var_show(__data, (page)); \ +} +SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1); +SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1); +SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1); +SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1); +SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0); +SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data; \ + int ret = sio_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0); +STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \ + sio_##name##_store) + +static struct elv_fs_entry sio_attrs[] = { + DD_ATTR(sync_read_expire), + DD_ATTR(sync_write_expire), + DD_ATTR(async_read_expire), + DD_ATTR(async_write_expire), + DD_ATTR(fifo_batch), + DD_ATTR(writes_starved), + __ATTR_NULL +}; + +static struct elevator_type iosched_sio = { + .ops.sq = { + .elevator_merge_req_fn = sio_merged_requests, + .elevator_dispatch_fn = sio_dispatch_requests, + .elevator_add_req_fn = sio_add_request, +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) + .elevator_queue_empty_fn = sio_queue_empty, +#endif + .elevator_former_req_fn = sio_former_request, + .elevator_latter_req_fn = sio_latter_request, + .elevator_init_fn = sio_init_queue, + .elevator_exit_fn = sio_exit_queue, + }, + + .elevator_attrs = sio_attrs, + .elevator_name = "sio", + .elevator_owner = THIS_MODULE, +}; + +static int __init sio_init(void) +{ + /* Register elevator */ + elv_register(&iosched_sio); + + return 0; +} + +static void __exit sio_exit(void) +{ + /* Unregister elevator */ + elv_unregister(&iosched_sio); +} + +module_init(sio_init); +module_exit(sio_exit); + +MODULE_AUTHOR("Miguel Boton"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple IO scheduler"); +MODULE_VERSION("0.2"); + From 13da1291f0f63a3bdfe596e714b9bf113c6d44f3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 6 Apr 2020 10:35:16 +0300 Subject: [PATCH 277/439] block: Add ZEN I/O Scheduler Signed-off-by: Denis Efremov --- block/Kconfig.iosched | 11 ++ block/Makefile | 1 + block/zen-iosched.c | 288 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 300 insertions(+) create mode 100644 block/zen-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 02ace3ceb2b0..ff662fa22fe1 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -62,6 +62,13 @@ config IOSCHED_SIO basic merging, trying to keep a minimum overhead. It is aimed mainly for aleatory access devices (eg: flash devices). +config IOSCHED_ZEN + tristate "Zen I/O scheduler" + default n + ---help--- + FCFS, dispatches are back-inserted, deadlines ensure fairness. + Should work best with devices where there is no travel delay. + choice prompt "Default I/O scheduler" @@ -85,6 +92,9 @@ choice config DEFAULT_SIO bool "SIO" if IOSCHED_SIO=y + config DEFAULT_ZEN + bool "ZEN" if IOSCHED_ZEN=y + config DEFAULT_NOOP bool "No-op" @@ -97,6 +107,7 @@ config DEFAULT_IOSCHED default "maple" if DEFAULT_MAPLE default "fiops" if DEFAULT_FIOPS default "sio" if DEFAULT_SIO + default "zen" if DEFAULT_ZEN default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index 25320b87d941..96179eddd688 100644 --- a/block/Makefile +++ b/block/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o +obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/zen-iosched.c b/block/zen-iosched.c new file mode 100644 index 000000000000..80af3ef83243 --- /dev/null +++ b/block/zen-iosched.c @@ -0,0 +1,288 @@ +/* + * Zen IO scheduler + * Primarily based on Noop, deadline, and SIO IO schedulers. + * + * Copyright (C) 2012 Brandon Berhent + * + * FCFS, dispatches are back-inserted, deadlines ensure fairness. + * Should work best with devices where there is no travel delay. + */ +#include +#include +#include +#include +#include +#include + +enum zen_data_dir { ASYNC, SYNC }; + +static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */ +static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */ +static const int fifo_batch = 1; + +struct zen_data { + /* Runtime Data */ + /* Requests are only present on fifo_list */ + struct list_head fifo_list[2]; + + unsigned int batching; /* number of sequential requests made */ + + /* tunables */ + int fifo_expire[2]; + int fifo_batch; +}; + +static inline struct zen_data * +zen_get_data(struct request_queue *q) { + return q->elevator->elevator_data; +} + +static void zen_dispatch(struct zen_data *, struct request *); + +static void +zen_merged_requests(struct request_queue *q, struct request *req, + struct request *next) +{ + /* + * if next expires before rq, assign its expire time to arq + * and move into next position (next will be deleted) in fifo + */ + if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, (unsigned long)req->fifo_time)) { + list_move(&req->queuelist, &next->queuelist); + req->fifo_time = next->fifo_time; + } + } + + /* next request is gone */ + rq_fifo_clear(next); +} + +static void zen_add_request(struct request_queue *q, struct request *rq) +{ + struct zen_data *zdata = zen_get_data(q); + const int sync = rq_is_sync(rq); + + if (zdata->fifo_expire[sync]) { + rq->fifo_time = jiffies + zdata->fifo_expire[sync]; + list_add_tail(&rq->queuelist, &zdata->fifo_list[sync]); + } +} + +static void zen_dispatch(struct zen_data *zdata, struct request *rq) +{ + /* Remove request from list and dispatch it */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + /* Increment # of sequential requests */ + zdata->batching++; +} + +/* + * get the first expired request in direction ddir + */ +static struct request * +zen_expired_request(struct zen_data *zdata, int ddir) +{ + struct request *rq; + + if (list_empty(&zdata->fifo_list[ddir])) + return NULL; + + rq = rq_entry_fifo(zdata->fifo_list[ddir].next); + if (time_after(jiffies, (unsigned long)rq->fifo_time)) + return rq; + + return NULL; +} + +/* + * zen_check_fifo returns 0 if there are no expired requests on the fifo, + * otherwise it returns the next expired request + */ +static struct request * +zen_check_fifo(struct zen_data *zdata) +{ + struct request *rq_sync = zen_expired_request(zdata, SYNC); + struct request *rq_async = zen_expired_request(zdata, ASYNC); + + if (rq_async && rq_sync) { + if (time_after((unsigned long)rq_async->fifo_time, (unsigned long)rq_sync->fifo_time)) + return rq_sync; + } else if (rq_sync) { + return rq_sync; + } else if (rq_async) { + return rq_async; + } + + return 0; +} + +static struct request * +zen_choose_request(struct zen_data *zdata) +{ + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + */ + if (!list_empty(&zdata->fifo_list[SYNC])) + return rq_entry_fifo(zdata->fifo_list[SYNC].next); + if (!list_empty(&zdata->fifo_list[ASYNC])) + return rq_entry_fifo(zdata->fifo_list[ASYNC].next); + + return NULL; +} + +static int zen_dispatch_requests(struct request_queue *q, int force) +{ + struct zen_data *zdata = zen_get_data(q); + struct request *rq = NULL; + + /* Check for and issue expired requests */ + if (zdata->batching > zdata->fifo_batch) { + zdata->batching = 0; + rq = zen_check_fifo(zdata); + } + + if (!rq) { + rq = zen_choose_request(zdata); + if (!rq) + return 0; + } + + zen_dispatch(zdata, rq); + + return 1; +} + +static int zen_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct zen_data *zdata; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + zdata = kmalloc_node(sizeof(*zdata), GFP_KERNEL, q->node); + if (!zdata) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = zdata; + + INIT_LIST_HEAD(&zdata->fifo_list[SYNC]); + INIT_LIST_HEAD(&zdata->fifo_list[ASYNC]); + zdata->fifo_expire[SYNC] = sync_expire; + zdata->fifo_expire[ASYNC] = async_expire; + zdata->fifo_batch = fifo_batch; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + return 0; +} + +static void zen_exit_queue(struct elevator_queue *e) +{ + struct zen_data *zdata = e->elevator_data; + + BUG_ON(!list_empty(&zdata->fifo_list[SYNC])); + BUG_ON(!list_empty(&zdata->fifo_list[ASYNC])); + kfree(zdata); +} + +/* Sysfs */ +static ssize_t +zen_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +zen_var_store(int *var, const char *page, size_t count) +{ + *var = simple_strtol(page, NULL, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct zen_data *zdata = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return zen_var_show(__data, (page)); \ +} +SHOW_FUNCTION(zen_sync_expire_show, zdata->fifo_expire[SYNC], 1); +SHOW_FUNCTION(zen_async_expire_show, zdata->fifo_expire[ASYNC], 1); +SHOW_FUNCTION(zen_fifo_batch_show, zdata->fifo_batch, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct zen_data *zdata = e->elevator_data; \ + int __data; \ + int ret = zen_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(zen_sync_expire_store, &zdata->fifo_expire[SYNC], 0, INT_MAX, 1); +STORE_FUNCTION(zen_async_expire_store, &zdata->fifo_expire[ASYNC], 0, INT_MAX, 1); +STORE_FUNCTION(zen_fifo_batch_store, &zdata->fifo_batch, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, zen_##name##_show, \ + zen_##name##_store) + +static struct elv_fs_entry zen_attrs[] = { + DD_ATTR(sync_expire), + DD_ATTR(async_expire), + DD_ATTR(fifo_batch), + __ATTR_NULL +}; + +static struct elevator_type iosched_zen = { + .ops.sq = { + .elevator_merge_req_fn = zen_merged_requests, + .elevator_dispatch_fn = zen_dispatch_requests, + .elevator_add_req_fn = zen_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = zen_init_queue, + .elevator_exit_fn = zen_exit_queue, + }, + .elevator_attrs = zen_attrs, + .elevator_name = "zen", + .elevator_owner = THIS_MODULE, +}; + +static int __init zen_init(void) +{ + return elv_register(&iosched_zen); +} + +static void __exit zen_exit(void) +{ + elv_unregister(&iosched_zen); +} + +module_init(zen_init); +module_exit(zen_exit); + + +MODULE_AUTHOR("Brandon Berhent"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zen IO scheduler"); +MODULE_VERSION("1.0"); From 76c68be33e0346f0608875fb618fed436b9e5335 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 9 Apr 2020 00:59:33 +0300 Subject: [PATCH 278/439] block: Add Anxiety I/O scheduler Signed-off-by: Denis Efremov --- block/Kconfig.iosched | 12 ++ block/Makefile | 1 + block/anxiety-iosched.c | 255 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 268 insertions(+) create mode 100644 block/anxiety-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index ff662fa22fe1..57b77ecca2a5 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -69,6 +69,14 @@ config IOSCHED_ZEN FCFS, dispatches are back-inserted, deadlines ensure fairness. Should work best with devices where there is no travel delay. +config IOSCHED_ANXIETY + tristate "Anxiety I/O scheduler" + default n + ---help--- + The Anxiety I/O scheduler prioritizes latency over everything + else. When a request comes in, it will use a lighweight + selection algorithm to swiftly process the current pending task. + choice prompt "Default I/O scheduler" @@ -95,6 +103,9 @@ choice config DEFAULT_ZEN bool "ZEN" if IOSCHED_ZEN=y + config DEFAULT_ANXIETY + bool "Anxiety" if IOSCHED_ANXIETY=y + config DEFAULT_NOOP bool "No-op" @@ -108,6 +119,7 @@ config DEFAULT_IOSCHED default "fiops" if DEFAULT_FIOPS default "sio" if DEFAULT_SIO default "zen" if DEFAULT_ZEN + default "anxiety" if DEFAULT_ANXIETY default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index 96179eddd688..f9009a1aed6e 100644 --- a/block/Makefile +++ b/block/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o +obj-$(CONFIG_IOSCHED_ANXIETY) += anxiety-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/anxiety-iosched.c b/block/anxiety-iosched.c new file mode 100644 index 000000000000..c7d818eff167 --- /dev/null +++ b/block/anxiety-iosched.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Anxiety I/O Scheduler + * + * Copyright (c) 2020, Tyler Nijmeh + */ + +#include +#include +#include +#include +#include +#include + +/* Batch this many synchronous requests at a time */ +#define DEFAULT_SYNC_RATIO (8) + +/* Run each batch this many times*/ +#define DEFAULT_BATCH_COUNT (4) + +struct anxiety_data { + struct list_head sync_queue; + struct list_head async_queue; + + /* Tunables */ + uint8_t sync_ratio; + uint8_t batch_count; +}; + +static inline struct request *anxiety_next_entry(struct list_head *queue) +{ + return list_first_entry(queue, struct request, + queuelist); +} + +static void anxiety_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + list_del_init(&next->queuelist); +} + +static inline int __anxiety_dispatch(struct request_queue *q, + struct request *rq) +{ + if (unlikely(!rq)) + return -EINVAL; + + list_del_init(&rq->queuelist); + elv_dispatch_add_tail(q, rq); + + return 0; +} + +static uint16_t anxiety_dispatch_batch(struct request_queue *q) +{ + struct anxiety_data *adata = q->elevator->elevator_data; + uint8_t i, j; + uint16_t dispatched = 0; + int ret; + + /* Perform each batch adata->batch_count many times */ + for (i = 0; i < adata->batch_count; i++) { + /* Batch sync requests according to tunables */ + for (j = 0; j < adata->sync_ratio; j++) { + if (list_empty(&adata->sync_queue)) + break; + + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->sync_queue)); + + if (!ret) + dispatched++; + } + + /* Submit one async request after the sync batch to avoid starvation */ + if (!list_empty(&adata->async_queue)) { + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->async_queue)); + + if (!ret) + dispatched++; + } + + /* If we didn't have anything to dispatch; don't batch again */ + if (!dispatched) + break; + } + + return dispatched; +} + +static uint16_t anxiety_dispatch_drain(struct request_queue *q) +{ + struct anxiety_data *adata = q->elevator->elevator_data; + uint16_t dispatched = 0; + int ret; + + /* + * Drain out all of the synchronous requests first, + * then drain the asynchronous requests. + */ + while (!list_empty(&adata->sync_queue)) { + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->sync_queue)); + + if (!ret) + dispatched++; + } + + while (!list_empty(&adata->async_queue)) { + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->async_queue)); + + if (!ret) + dispatched++; + } + + return dispatched; +} + +static int anxiety_dispatch(struct request_queue *q, int force) +{ + /* + * When requested by the elevator, a full queue drain can be + * performed in one scheduler dispatch. + */ + if (unlikely(force)) + return anxiety_dispatch_drain(q); + + return anxiety_dispatch_batch(q); +} + +static void anxiety_add_request(struct request_queue *q, struct request *rq) +{ + struct anxiety_data *adata = q->elevator->elevator_data; + + list_add_tail(&rq->queuelist, + rq_is_sync(rq) ? &adata->sync_queue : &adata->async_queue); +} + +static int anxiety_init_queue(struct request_queue *q, + struct elevator_type *elv) +{ + struct anxiety_data *adata; + struct elevator_queue *eq = elevator_alloc(q, elv); + + if (!eq) + return -ENOMEM; + + /* Allocate the data */ + adata = kmalloc_node(sizeof(*adata), GFP_KERNEL, q->node); + if (!adata) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + + /* Set the elevator data */ + eq->elevator_data = adata; + + /* Initialize */ + INIT_LIST_HEAD(&adata->sync_queue); + INIT_LIST_HEAD(&adata->async_queue); + adata->sync_ratio = DEFAULT_SYNC_RATIO; + adata->batch_count = DEFAULT_BATCH_COUNT; + + /* Set elevator to Anxiety */ + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + return 0; +} + +/* Sysfs access */ +static ssize_t anxiety_sync_ratio_show(struct elevator_queue *e, char *page) +{ + struct anxiety_data *adata = e->elevator_data; + + return snprintf(page, PAGE_SIZE, "%u\n", adata->sync_ratio); +} + +static ssize_t anxiety_sync_ratio_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct anxiety_data *adata = e->elevator_data; + int ret; + + ret = kstrtou8(page, 0, &adata->sync_ratio); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t anxiety_batch_count_show(struct elevator_queue *e, char *page) +{ + struct anxiety_data *adata = e->elevator_data; + + return snprintf(page, PAGE_SIZE, "%u\n", adata->batch_count); +} + +static ssize_t anxiety_batch_count_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct anxiety_data *adata = e->elevator_data; + int ret; + + ret = kstrtou8(page, 0, &adata->batch_count); + if (ret < 0) + return ret; + + if (adata->batch_count < 1) + adata->batch_count = 1; + + return count; +} + +static struct elv_fs_entry anxiety_attrs[] = { + __ATTR(sync_ratio, 0644, anxiety_sync_ratio_show, + anxiety_sync_ratio_store), + __ATTR(batch_count, 0644, anxiety_batch_count_show, + anxiety_batch_count_store), + __ATTR_NULL +}; + +static struct elevator_type elevator_anxiety = { + .ops.sq = { + .elevator_merge_req_fn = anxiety_merged_requests, + .elevator_dispatch_fn = anxiety_dispatch, + .elevator_add_req_fn = anxiety_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = anxiety_init_queue, + }, + .elevator_name = "anxiety", + .elevator_attrs = anxiety_attrs, + .elevator_owner = THIS_MODULE, +}; + +static int __init anxiety_init(void) +{ + return elv_register(&elevator_anxiety); +} + +static void __exit anxiety_exit(void) +{ + elv_unregister(&elevator_anxiety); +} + +module_init(anxiety_init); +module_exit(anxiety_exit); + +MODULE_AUTHOR("Tyler Nijmeh"); +MODULE_LICENSE("GPLv3"); +MODULE_DESCRIPTION("Anxiety I/O scheduler"); From cbaf9c0c0e040fc2b06033517ac3b0cfe8304d61 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 17:42:44 +0300 Subject: [PATCH 279/439] fs: add reiser4 filesystem Signed-off-by: Denis Efremov --- Documentation/filesystems/reiser4.txt | 75 + Documentation/process/changes.rst | 12 + fs/Kconfig | 1 + fs/Makefile | 1 + fs/fs-writeback.c | 105 +- fs/read_write.c | 18 +- fs/reiser4/Kconfig | 36 + fs/reiser4/Makefile | 105 + fs/reiser4/README | 128 + fs/reiser4/as_ops.c | 348 ++ fs/reiser4/block_alloc.c | 1176 +++++ fs/reiser4/block_alloc.h | 177 + fs/reiser4/blocknrlist.c | 336 ++ fs/reiser4/blocknrset.c | 399 ++ fs/reiser4/carry.c | 1408 ++++++ fs/reiser4/carry.h | 445 ++ fs/reiser4/carry_ops.c | 2136 ++++++++++ fs/reiser4/carry_ops.h | 43 + fs/reiser4/checksum.c | 33 + fs/reiser4/checksum.h | 39 + fs/reiser4/context.c | 288 ++ fs/reiser4/context.h | 233 + fs/reiser4/coord.c | 928 ++++ fs/reiser4/coord.h | 399 ++ fs/reiser4/debug.c | 309 ++ fs/reiser4/debug.h | 353 ++ fs/reiser4/dformat.h | 73 + fs/reiser4/discard.c | 179 + fs/reiser4/discard.h | 42 + fs/reiser4/dscale.c | 192 + fs/reiser4/dscale.h | 28 + fs/reiser4/entd.c | 361 ++ fs/reiser4/entd.h | 90 + fs/reiser4/eottl.c | 510 +++ fs/reiser4/estimate.c | 129 + fs/reiser4/export_ops.c | 325 ++ fs/reiser4/flush.c | 3522 +++++++++++++++ fs/reiser4/flush.h | 290 ++ fs/reiser4/flush_queue.c | 677 +++ fs/reiser4/forward.h | 259 ++ fs/reiser4/fsdata.c | 801 ++++ fs/reiser4/fsdata.h | 203 + fs/reiser4/init_super.c | 806 ++++ fs/reiser4/inode.c | 711 +++ fs/reiser4/inode.h | 506 +++ fs/reiser4/ioctl.h | 41 + fs/reiser4/jnode.c | 1905 +++++++++ fs/reiser4/jnode.h | 704 +++ fs/reiser4/kassign.c | 677 +++ fs/reiser4/kassign.h | 111 + fs/reiser4/key.c | 138 + fs/reiser4/key.h | 392 ++ fs/reiser4/ktxnmgrd.c | 215 + fs/reiser4/ktxnmgrd.h | 52 + fs/reiser4/lock.c | 1237 ++++++ fs/reiser4/lock.h | 250 ++ fs/reiser4/oid.c | 141 + fs/reiser4/page_cache.c | 691 +++ fs/reiser4/page_cache.h | 64 + fs/reiser4/plugin/Makefile | 26 + fs/reiser4/plugin/cluster.c | 72 + fs/reiser4/plugin/cluster.h | 410 ++ fs/reiser4/plugin/compress/Makefile | 5 + fs/reiser4/plugin/compress/compress.c | 521 +++ fs/reiser4/plugin/compress/compress.h | 44 + fs/reiser4/plugin/compress/compress_mode.c | 162 + fs/reiser4/plugin/compress/lzoconf.h | 216 + fs/reiser4/plugin/compress/minilzo.c | 1967 +++++++++ fs/reiser4/plugin/compress/minilzo.h | 70 + fs/reiser4/plugin/crypto/cipher.c | 37 + fs/reiser4/plugin/crypto/cipher.h | 55 + fs/reiser4/plugin/crypto/digest.c | 58 + fs/reiser4/plugin/dir/Makefile | 5 + fs/reiser4/plugin/dir/dir.h | 36 + fs/reiser4/plugin/dir/hashed_dir.c | 81 + fs/reiser4/plugin/dir/seekable_dir.c | 46 + fs/reiser4/plugin/dir_plugin_common.c | 865 ++++ fs/reiser4/plugin/disk_format/Makefile | 5 + fs/reiser4/plugin/disk_format/disk_format.c | 38 + fs/reiser4/plugin/disk_format/disk_format.h | 27 + fs/reiser4/plugin/disk_format/disk_format40.c | 664 +++ fs/reiser4/plugin/disk_format/disk_format40.h | 111 + fs/reiser4/plugin/fibration.c | 175 + fs/reiser4/plugin/fibration.h | 37 + fs/reiser4/plugin/file/Makefile | 7 + fs/reiser4/plugin/file/cryptcompress.c | 3797 +++++++++++++++++ fs/reiser4/plugin/file/cryptcompress.h | 619 +++ fs/reiser4/plugin/file/file.c | 2796 ++++++++++++ fs/reiser4/plugin/file/file.h | 322 ++ fs/reiser4/plugin/file/file_conversion.c | 755 ++++ fs/reiser4/plugin/file/invert.c | 493 +++ fs/reiser4/plugin/file/symfile.c | 87 + fs/reiser4/plugin/file/symlink.c | 95 + fs/reiser4/plugin/file/tail_conversion.c | 763 ++++ fs/reiser4/plugin/file_ops.c | 119 + fs/reiser4/plugin/file_ops_readdir.c | 658 +++ fs/reiser4/plugin/file_plugin_common.c | 1004 +++++ fs/reiser4/plugin/hash.c | 352 ++ fs/reiser4/plugin/inode_ops.c | 891 ++++ fs/reiser4/plugin/inode_ops_rename.c | 958 +++++ fs/reiser4/plugin/item/Makefile | 18 + fs/reiser4/plugin/item/acl.h | 66 + fs/reiser4/plugin/item/blackbox.c | 142 + fs/reiser4/plugin/item/blackbox.h | 33 + fs/reiser4/plugin/item/cde.c | 1004 +++++ fs/reiser4/plugin/item/cde.h | 87 + fs/reiser4/plugin/item/ctail.c | 1769 ++++++++ fs/reiser4/plugin/item/ctail.h | 102 + fs/reiser4/plugin/item/extent.c | 197 + fs/reiser4/plugin/item/extent.h | 231 + fs/reiser4/plugin/item/extent_file_ops.c | 1434 +++++++ fs/reiser4/plugin/item/extent_flush_ops.c | 686 +++ fs/reiser4/plugin/item/extent_item_ops.c | 887 ++++ fs/reiser4/plugin/item/internal.c | 404 ++ fs/reiser4/plugin/item/internal.h | 57 + fs/reiser4/plugin/item/item.c | 719 ++++ fs/reiser4/plugin/item/item.h | 398 ++ fs/reiser4/plugin/item/sde.c | 186 + fs/reiser4/plugin/item/sde.h | 66 + fs/reiser4/plugin/item/static_stat.c | 1114 +++++ fs/reiser4/plugin/item/static_stat.h | 224 + fs/reiser4/plugin/item/tail.c | 810 ++++ fs/reiser4/plugin/item/tail.h | 59 + fs/reiser4/plugin/node/Makefile | 6 + fs/reiser4/plugin/node/node.c | 170 + fs/reiser4/plugin/node/node.h | 275 ++ fs/reiser4/plugin/node/node40.c | 3073 +++++++++++++ fs/reiser4/plugin/node/node40.h | 130 + fs/reiser4/plugin/node/node41.c | 137 + fs/reiser4/plugin/node/node41.h | 50 + fs/reiser4/plugin/object.c | 553 +++ fs/reiser4/plugin/object.h | 117 + fs/reiser4/plugin/plugin.c | 569 +++ fs/reiser4/plugin/plugin.h | 999 +++++ fs/reiser4/plugin/plugin_header.h | 150 + fs/reiser4/plugin/plugin_set.c | 387 ++ fs/reiser4/plugin/plugin_set.h | 78 + fs/reiser4/plugin/regular.c | 44 + fs/reiser4/plugin/security/Makefile | 4 + fs/reiser4/plugin/security/perm.c | 33 + fs/reiser4/plugin/security/perm.h | 38 + fs/reiser4/plugin/space/Makefile | 4 + fs/reiser4/plugin/space/bitmap.c | 1609 +++++++ fs/reiser4/plugin/space/bitmap.h | 47 + fs/reiser4/plugin/space/space_allocator.h | 80 + fs/reiser4/plugin/tail_policy.c | 113 + fs/reiser4/plugin/txmod.c | 1238 ++++++ fs/reiser4/pool.c | 231 + fs/reiser4/pool.h | 57 + fs/reiser4/readahead.c | 140 + fs/reiser4/readahead.h | 42 + fs/reiser4/reiser4.h | 260 ++ fs/reiser4/safe_link.c | 354 ++ fs/reiser4/safe_link.h | 29 + fs/reiser4/seal.c | 219 + fs/reiser4/seal.h | 49 + fs/reiser4/search.c | 1612 +++++++ fs/reiser4/status_flags.c | 180 + fs/reiser4/status_flags.h | 47 + fs/reiser4/super.c | 306 ++ fs/reiser4/super.h | 472 ++ fs/reiser4/super_ops.c | 783 ++++ fs/reiser4/tap.c | 376 ++ fs/reiser4/tap.h | 70 + fs/reiser4/tree.c | 1884 ++++++++ fs/reiser4/tree.h | 577 +++ fs/reiser4/tree_mod.c | 387 ++ fs/reiser4/tree_mod.h | 29 + fs/reiser4/tree_walk.c | 927 ++++ fs/reiser4/tree_walk.h | 125 + fs/reiser4/txnmgr.c | 3163 ++++++++++++++ fs/reiser4/txnmgr.h | 755 ++++ fs/reiser4/type_safe_hash.h | 320 ++ fs/reiser4/vfs_ops.c | 260 ++ fs/reiser4/vfs_ops.h | 60 + fs/reiser4/wander.c | 1757 ++++++++ fs/reiser4/wander.h | 135 + fs/reiser4/writeout.h | 21 + fs/reiser4/znode.c | 1027 +++++ fs/reiser4/znode.h | 435 ++ include/linux/fs.h | 21 + include/linux/mm.h | 1 + include/linux/sched.h | 1 + include/linux/writeback.h | 26 + mm/filemap.c | 1 + mm/page-writeback.c | 29 + mm/vmscan.c | 6 + 187 files changed, 83026 insertions(+), 47 deletions(-) create mode 100644 Documentation/filesystems/reiser4.txt create mode 100644 fs/reiser4/Kconfig create mode 100644 fs/reiser4/Makefile create mode 100644 fs/reiser4/README create mode 100644 fs/reiser4/as_ops.c create mode 100644 fs/reiser4/block_alloc.c create mode 100644 fs/reiser4/block_alloc.h create mode 100644 fs/reiser4/blocknrlist.c create mode 100644 fs/reiser4/blocknrset.c create mode 100644 fs/reiser4/carry.c create mode 100644 fs/reiser4/carry.h create mode 100644 fs/reiser4/carry_ops.c create mode 100644 fs/reiser4/carry_ops.h create mode 100644 fs/reiser4/checksum.c create mode 100644 fs/reiser4/checksum.h create mode 100644 fs/reiser4/context.c create mode 100644 fs/reiser4/context.h create mode 100644 fs/reiser4/coord.c create mode 100644 fs/reiser4/coord.h create mode 100644 fs/reiser4/debug.c create mode 100644 fs/reiser4/debug.h create mode 100644 fs/reiser4/dformat.h create mode 100644 fs/reiser4/discard.c create mode 100644 fs/reiser4/discard.h create mode 100644 fs/reiser4/dscale.c create mode 100644 fs/reiser4/dscale.h create mode 100644 fs/reiser4/entd.c create mode 100644 fs/reiser4/entd.h create mode 100644 fs/reiser4/eottl.c create mode 100644 fs/reiser4/estimate.c create mode 100644 fs/reiser4/export_ops.c create mode 100644 fs/reiser4/flush.c create mode 100644 fs/reiser4/flush.h create mode 100644 fs/reiser4/flush_queue.c create mode 100644 fs/reiser4/forward.h create mode 100644 fs/reiser4/fsdata.c create mode 100644 fs/reiser4/fsdata.h create mode 100644 fs/reiser4/init_super.c create mode 100644 fs/reiser4/inode.c create mode 100644 fs/reiser4/inode.h create mode 100644 fs/reiser4/ioctl.h create mode 100644 fs/reiser4/jnode.c create mode 100644 fs/reiser4/jnode.h create mode 100644 fs/reiser4/kassign.c create mode 100644 fs/reiser4/kassign.h create mode 100644 fs/reiser4/key.c create mode 100644 fs/reiser4/key.h create mode 100644 fs/reiser4/ktxnmgrd.c create mode 100644 fs/reiser4/ktxnmgrd.h create mode 100644 fs/reiser4/lock.c create mode 100644 fs/reiser4/lock.h create mode 100644 fs/reiser4/oid.c create mode 100644 fs/reiser4/page_cache.c create mode 100644 fs/reiser4/page_cache.h create mode 100644 fs/reiser4/plugin/Makefile create mode 100644 fs/reiser4/plugin/cluster.c create mode 100644 fs/reiser4/plugin/cluster.h create mode 100644 fs/reiser4/plugin/compress/Makefile create mode 100644 fs/reiser4/plugin/compress/compress.c create mode 100644 fs/reiser4/plugin/compress/compress.h create mode 100644 fs/reiser4/plugin/compress/compress_mode.c create mode 100644 fs/reiser4/plugin/compress/lzoconf.h create mode 100644 fs/reiser4/plugin/compress/minilzo.c create mode 100644 fs/reiser4/plugin/compress/minilzo.h create mode 100644 fs/reiser4/plugin/crypto/cipher.c create mode 100644 fs/reiser4/plugin/crypto/cipher.h create mode 100644 fs/reiser4/plugin/crypto/digest.c create mode 100644 fs/reiser4/plugin/dir/Makefile create mode 100644 fs/reiser4/plugin/dir/dir.h create mode 100644 fs/reiser4/plugin/dir/hashed_dir.c create mode 100644 fs/reiser4/plugin/dir/seekable_dir.c create mode 100644 fs/reiser4/plugin/dir_plugin_common.c create mode 100644 fs/reiser4/plugin/disk_format/Makefile create mode 100644 fs/reiser4/plugin/disk_format/disk_format.c create mode 100644 fs/reiser4/plugin/disk_format/disk_format.h create mode 100644 fs/reiser4/plugin/disk_format/disk_format40.c create mode 100644 fs/reiser4/plugin/disk_format/disk_format40.h create mode 100644 fs/reiser4/plugin/fibration.c create mode 100644 fs/reiser4/plugin/fibration.h create mode 100644 fs/reiser4/plugin/file/Makefile create mode 100644 fs/reiser4/plugin/file/cryptcompress.c create mode 100644 fs/reiser4/plugin/file/cryptcompress.h create mode 100644 fs/reiser4/plugin/file/file.c create mode 100644 fs/reiser4/plugin/file/file.h create mode 100644 fs/reiser4/plugin/file/file_conversion.c create mode 100644 fs/reiser4/plugin/file/invert.c create mode 100644 fs/reiser4/plugin/file/symfile.c create mode 100644 fs/reiser4/plugin/file/symlink.c create mode 100644 fs/reiser4/plugin/file/tail_conversion.c create mode 100644 fs/reiser4/plugin/file_ops.c create mode 100644 fs/reiser4/plugin/file_ops_readdir.c create mode 100644 fs/reiser4/plugin/file_plugin_common.c create mode 100644 fs/reiser4/plugin/hash.c create mode 100644 fs/reiser4/plugin/inode_ops.c create mode 100644 fs/reiser4/plugin/inode_ops_rename.c create mode 100644 fs/reiser4/plugin/item/Makefile create mode 100644 fs/reiser4/plugin/item/acl.h create mode 100644 fs/reiser4/plugin/item/blackbox.c create mode 100644 fs/reiser4/plugin/item/blackbox.h create mode 100644 fs/reiser4/plugin/item/cde.c create mode 100644 fs/reiser4/plugin/item/cde.h create mode 100644 fs/reiser4/plugin/item/ctail.c create mode 100644 fs/reiser4/plugin/item/ctail.h create mode 100644 fs/reiser4/plugin/item/extent.c create mode 100644 fs/reiser4/plugin/item/extent.h create mode 100644 fs/reiser4/plugin/item/extent_file_ops.c create mode 100644 fs/reiser4/plugin/item/extent_flush_ops.c create mode 100644 fs/reiser4/plugin/item/extent_item_ops.c create mode 100644 fs/reiser4/plugin/item/internal.c create mode 100644 fs/reiser4/plugin/item/internal.h create mode 100644 fs/reiser4/plugin/item/item.c create mode 100644 fs/reiser4/plugin/item/item.h create mode 100644 fs/reiser4/plugin/item/sde.c create mode 100644 fs/reiser4/plugin/item/sde.h create mode 100644 fs/reiser4/plugin/item/static_stat.c create mode 100644 fs/reiser4/plugin/item/static_stat.h create mode 100644 fs/reiser4/plugin/item/tail.c create mode 100644 fs/reiser4/plugin/item/tail.h create mode 100644 fs/reiser4/plugin/node/Makefile create mode 100644 fs/reiser4/plugin/node/node.c create mode 100644 fs/reiser4/plugin/node/node.h create mode 100644 fs/reiser4/plugin/node/node40.c create mode 100644 fs/reiser4/plugin/node/node40.h create mode 100644 fs/reiser4/plugin/node/node41.c create mode 100644 fs/reiser4/plugin/node/node41.h create mode 100644 fs/reiser4/plugin/object.c create mode 100644 fs/reiser4/plugin/object.h create mode 100644 fs/reiser4/plugin/plugin.c create mode 100644 fs/reiser4/plugin/plugin.h create mode 100644 fs/reiser4/plugin/plugin_header.h create mode 100644 fs/reiser4/plugin/plugin_set.c create mode 100644 fs/reiser4/plugin/plugin_set.h create mode 100644 fs/reiser4/plugin/regular.c create mode 100644 fs/reiser4/plugin/security/Makefile create mode 100644 fs/reiser4/plugin/security/perm.c create mode 100644 fs/reiser4/plugin/security/perm.h create mode 100644 fs/reiser4/plugin/space/Makefile create mode 100644 fs/reiser4/plugin/space/bitmap.c create mode 100644 fs/reiser4/plugin/space/bitmap.h create mode 100644 fs/reiser4/plugin/space/space_allocator.h create mode 100644 fs/reiser4/plugin/tail_policy.c create mode 100644 fs/reiser4/plugin/txmod.c create mode 100644 fs/reiser4/pool.c create mode 100644 fs/reiser4/pool.h create mode 100644 fs/reiser4/readahead.c create mode 100644 fs/reiser4/readahead.h create mode 100644 fs/reiser4/reiser4.h create mode 100644 fs/reiser4/safe_link.c create mode 100644 fs/reiser4/safe_link.h create mode 100644 fs/reiser4/seal.c create mode 100644 fs/reiser4/seal.h create mode 100644 fs/reiser4/search.c create mode 100644 fs/reiser4/status_flags.c create mode 100644 fs/reiser4/status_flags.h create mode 100644 fs/reiser4/super.c create mode 100644 fs/reiser4/super.h create mode 100644 fs/reiser4/super_ops.c create mode 100644 fs/reiser4/tap.c create mode 100644 fs/reiser4/tap.h create mode 100644 fs/reiser4/tree.c create mode 100644 fs/reiser4/tree.h create mode 100644 fs/reiser4/tree_mod.c create mode 100644 fs/reiser4/tree_mod.h create mode 100644 fs/reiser4/tree_walk.c create mode 100644 fs/reiser4/tree_walk.h create mode 100644 fs/reiser4/txnmgr.c create mode 100644 fs/reiser4/txnmgr.h create mode 100644 fs/reiser4/type_safe_hash.h create mode 100644 fs/reiser4/vfs_ops.c create mode 100644 fs/reiser4/vfs_ops.h create mode 100644 fs/reiser4/wander.c create mode 100644 fs/reiser4/wander.h create mode 100644 fs/reiser4/writeout.h create mode 100644 fs/reiser4/znode.c create mode 100644 fs/reiser4/znode.h diff --git a/Documentation/filesystems/reiser4.txt b/Documentation/filesystems/reiser4.txt new file mode 100644 index 000000000000..8e07c9e24aa0 --- /dev/null +++ b/Documentation/filesystems/reiser4.txt @@ -0,0 +1,75 @@ +Reiser4 filesystem +================== +Reiser4 is a file system based on dancing tree algorithms, and is +described at http://www.namesys.com + + +References +========== +web page http://namesys.com/v4/v4.html +source code ftp://ftp.namesys.com/pub/reiser4-for-2.6/ +userland tools ftp://ftp.namesys.com/pub/reiser4progs/ +install page http://www.namesys.com/install_v4.html + +Compile options +=============== +Enable reiser4 debug mode + This checks everything imaginable while reiser4 + runs + +Mount options +============= +tmgr.atom_max_size=N + Atoms containing more than N blocks will be forced to commit. + N is decimal. + Default is nr_free_pagecache_pages() / 2 at mount time. + +tmgr.atom_max_age=N + Atoms older than N seconds will be forced to commit. N is decimal. + Default is 600. + +tmgr.atom_max_flushers=N + Limit of concurrent flushers for one atom. 0 means no limit. + Default is 0. + +tree.cbk_cache.nr_slots=N + Number of slots in the cbk cache. + +flush.relocate_threshold=N + If flush finds more than N adjacent dirty leaf-level blocks it + will force them to be relocated. + Default is 64. + +flush.relocate_distance=N + If flush finds can find a block allocation closer than at most + N from the preceder it will relocate to that position. + Default is 64. + +flush.scan_maxnodes=N + The maximum number of nodes to scan left on a level during + flush. + Default is 10000. + +optimal_io_size=N + Preferred IO size. This value is used to set st_blksize of + struct stat. + Default is 65536. + +bsdgroups + Turn on BSD-style gid assignment. + +32bittimes + By default file in reiser4 have 64 bit timestamps. Files + created when filesystem is mounted with 32bittimes mount + option will get 32 bit timestamps. + +mtflush + Turn off concurrent flushing. + +nopseudo + Disable pseudo files support. See + http://namesys.com/v4/pseudo.html for more about pseudo files. + +dont_load_bitmap + Don't load all bitmap blocks at mount time, it is useful for + machines with tiny RAM and large disks. diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 73fcdcd52b87..2591b7571b85 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -174,6 +174,13 @@ The reiserfsprogs package should be used for reiserfs-3.6.x versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and ``reiserfsck``. These utils work on both i386 and alpha platforms. +Reiser4progs +------------ + +The reiser4progs package contains utilities for the reiser4 file system. +Detailed instructions are provided in the README file located at: +. + Xfsprogs -------- @@ -371,6 +378,11 @@ Reiserfsprogs - +Reiser4progs +------------ + +- + Xfsprogs -------- diff --git a/fs/Kconfig b/fs/Kconfig index b42d356344ab..b7b75d07be2c 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -42,6 +42,7 @@ config FS_MBCACHE default y if EXT4_FS=y default m if EXT2_FS_XATTR || EXT4_FS +source "fs/reiser4/Kconfig" source "fs/reiserfs/Kconfig" source "fs/jfs/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 08fe716e57c7..12c076b80834 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_DLM) += dlm/ # Do not add any filesystems before this line obj-$(CONFIG_FSCACHE) += fscache/ obj-$(CONFIG_REISERFS_FS) += reiserfs/ +obj-$(CONFIG_REISER4_FS) += reiser4/ obj-$(CONFIG_EXT4_FS) += ext4/ # We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the # ext2 driver, which doesn't know about journalling! Explicitly request ext2 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d3f57eb0ebe..be633c836b79 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -39,25 +39,6 @@ struct wb_completion { atomic_t cnt; }; -/* - * Passed into wb_writeback(), essentially a subset of writeback_control - */ -struct wb_writeback_work { - long nr_pages; - struct super_block *sb; - enum writeback_sync_modes sync_mode; - unsigned int tagged_writepages:1; - unsigned int for_kupdate:1; - unsigned int range_cyclic:1; - unsigned int for_background:1; - unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ - unsigned int auto_free:1; /* free on completion */ - enum wb_reason reason; /* why was writeback initiated? */ - - struct list_head list; /* pending work list */ - struct wb_completion *done; /* set if the caller waits */ -}; - /* * If one wants to wait for one or more wb_writeback_works, each work's * ->done should be set to a wb_completion defined using the following @@ -270,6 +251,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page) if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) wb_put(wb); } +EXPORT_SYMBOL_GPL(__inode_attach_wb); /** * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it @@ -1531,20 +1513,12 @@ static long writeback_chunk_size(struct bdi_writeback *wb, * unlock and relock that for each inode it ends up doing * IO for. */ -static long writeback_sb_inodes(struct super_block *sb, - struct bdi_writeback *wb, - struct wb_writeback_work *work) +long generic_writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all) { - struct writeback_control wbc = { - .sync_mode = work->sync_mode, - .tagged_writepages = work->tagged_writepages, - .for_kupdate = work->for_kupdate, - .for_background = work->for_background, - .for_sync = work->for_sync, - .range_cyclic = work->range_cyclic, - .range_start = 0, - .range_end = LLONG_MAX, - }; unsigned long start_time = jiffies; long write_chunk; long wrote = 0; /* count both pages and inodes */ @@ -1583,7 +1557,7 @@ static long writeback_sb_inodes(struct super_block *sb, spin_unlock(&inode->i_lock); continue; } - if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { + if ((inode->i_state & I_SYNC) && wbc->sync_mode != WB_SYNC_ALL) { /* * If this inode is locked for writeback and we are not * doing writeback-for-data-integrity, move it to @@ -1613,21 +1587,21 @@ static long writeback_sb_inodes(struct super_block *sb, continue; } inode->i_state |= I_SYNC; - wbc_attach_and_unlock_inode(&wbc, inode); + wbc_attach_and_unlock_inode(wbc, inode); write_chunk = writeback_chunk_size(wb, work); - wbc.nr_to_write = write_chunk; - wbc.pages_skipped = 0; + wbc->nr_to_write = write_chunk; + wbc->pages_skipped = 0; /* * We use I_SYNC to pin the inode in memory. While it is set * evict_inode() will wait so the inode cannot be freed. */ - __writeback_single_inode(inode, &wbc); + __writeback_single_inode(inode, wbc); - wbc_detach_inode(&wbc); - work->nr_pages -= write_chunk - wbc.nr_to_write; - wrote += write_chunk - wbc.nr_to_write; + wbc_detach_inode(wbc); + work->nr_pages -= write_chunk - wbc->nr_to_write; + wrote += write_chunk - wbc->nr_to_write; if (need_resched()) { /* @@ -1650,7 +1624,7 @@ static long writeback_sb_inodes(struct super_block *sb, spin_lock(&inode->i_lock); if (!(inode->i_state & I_DIRTY_ALL)) wrote++; - requeue_inode(inode, tmp_wb, &wbc); + requeue_inode(inode, tmp_wb, wbc); inode_sync_complete(inode); spin_unlock(&inode->i_lock); @@ -1664,7 +1638,7 @@ static long writeback_sb_inodes(struct super_block *sb, * background threshold and other termination conditions. */ if (wrote) { - if (time_is_before_jiffies(start_time + HZ / 10UL)) + if (!flush_all && time_is_before_jiffies(start_time + HZ / 10UL)) break; if (work->nr_pages <= 0) break; @@ -1672,6 +1646,26 @@ static long writeback_sb_inodes(struct super_block *sb, } return wrote; } +EXPORT_SYMBOL(generic_writeback_sb_inodes); + +long writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct wb_writeback_work *work) +{ + struct writeback_control wbc = { + .sync_mode = work->sync_mode, + .tagged_writepages = work->tagged_writepages, + .for_kupdate = work->for_kupdate, + .for_background = work->for_background, + .range_cyclic = work->range_cyclic, + .range_start = 0, + .range_end = LLONG_MAX, + }; + if (sb->s_op->writeback_inodes) + return sb->s_op->writeback_inodes(sb, wb, &wbc, work, false); + else + return generic_writeback_sb_inodes(sb, wb, &wbc, work, false); +} static long __writeback_inodes_wb(struct bdi_writeback *wb, struct wb_writeback_work *work) @@ -1942,6 +1936,31 @@ static long wb_do_writeback(struct bdi_writeback *wb) return wrote; } +/* + * This function is for file systems which have their + * own means of periodical write-out of old data. + * NOTE: inode_lock should be hold. + * + * Skip a portion of b_io inodes which belong to @sb + * and go sequentially in reverse order. + */ +void writeback_skip_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb) +{ + while (1) { + struct inode *inode; + + if (list_empty(&wb->b_io)) + break; + inode = wb_inode(wb->b_io.prev); + if (sb != inode->i_sb) + break; + redirty_tail(inode, wb); + } +} +EXPORT_SYMBOL(writeback_skip_sb_inodes); + + /* * Handle writeback of dirty data for the device backed by this bdi. Also * reschedules periodically and does kupdated style flushing. @@ -1953,7 +1972,7 @@ void wb_workfn(struct work_struct *work) long pages_written; set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); - current->flags |= PF_SWAPWRITE; + current->flags |= PF_FLUSHER | PF_SWAPWRITE; if (likely(!current_is_workqueue_rescuer() || !test_bit(WB_registered, &wb->state))) { diff --git a/fs/read_write.c b/fs/read_write.c index ee66fa47b0c1..032fdb0040f2 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -241,12 +241,11 @@ loff_t no_llseek(struct file *file, loff_t offset, int whence) } EXPORT_SYMBOL(no_llseek); -loff_t default_llseek(struct file *file, loff_t offset, int whence) +loff_t default_llseek_unlocked(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); loff_t retval; - inode_lock(inode); switch (whence) { case SEEK_END: offset += i_size_read(inode); @@ -291,9 +290,19 @@ loff_t default_llseek(struct file *file, loff_t offset, int whence) retval = offset; } out: - inode_unlock(inode); return retval; } +EXPORT_SYMBOL(default_llseek_unlocked); + +loff_t default_llseek(struct file *file, loff_t offset, int origin) +{ + loff_t retval; + + inode_lock(file_inode(file)); + retval = default_llseek_unlocked(file, offset, origin); + inode_unlock(file_inode(file)); + return retval; +} EXPORT_SYMBOL(default_llseek); loff_t vfs_llseek(struct file *file, loff_t offset, int whence) @@ -395,7 +404,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t read_write == READ ? MAY_READ : MAY_WRITE); } -static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) +ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = buf, .iov_len = len }; struct kiocb kiocb; @@ -411,6 +420,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo *ppos = kiocb.ki_pos; return ret; } +EXPORT_SYMBOL(new_sync_read); ssize_t __vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) diff --git a/fs/reiser4/Kconfig b/fs/reiser4/Kconfig new file mode 100644 index 000000000000..e55f8bc51caf --- /dev/null +++ b/fs/reiser4/Kconfig @@ -0,0 +1,36 @@ +config REISER4_FS + tristate "Reiser4 (EXPERIMENTAL)" + select ZLIB_INFLATE + select ZLIB_DEFLATE + select LZO_COMPRESS + select LZO_DECOMPRESS + select ZSTD_COMPRESS + select ZSTD_DECOMPRESS + select CRYPTO + select CRYPTO_CRC32C + help + Reiser4 is a filesystem that performs all filesystem operations + as atomic transactions, which means that it either performs a + write, or it does not, and in the event of a crash it does not + partially perform it or corrupt it. + + It stores files in dancing trees, which are like balanced trees but + faster. It packs small files together so that they share blocks + without wasting space. This means you can use it to store really + small files. It also means that it saves you disk space. It avoids + hassling you with anachronisms like having a maximum number of + inodes, and wasting space if you use less than that number. + + Reiser4 is a distinct filesystem type from reiserfs (V3). + It's therefore not possible to use reiserfs file systems + with reiser4. + + To learn more about reiser4, go to http://www.namesys.com + +config REISER4_DEBUG + bool "Enable reiser4 debug mode" + depends on REISER4_FS + help + Don't use this unless you are debugging reiser4. + + If unsure, say N. diff --git a/fs/reiser4/Makefile b/fs/reiser4/Makefile new file mode 100644 index 000000000000..8917789dc106 --- /dev/null +++ b/fs/reiser4/Makefile @@ -0,0 +1,105 @@ +# +# reiser4/Makefile +# + +obj-$(CONFIG_REISER4_FS) += reiser4.o + +ccflags-$(CONFIG_REISER4_FS) += -Wno-incompatible-pointer-types + +reiser4-y := \ + debug.o \ + jnode.o \ + znode.o \ + key.o \ + pool.o \ + tree_mod.o \ + estimate.o \ + carry.o \ + carry_ops.o \ + lock.o \ + tree.o \ + context.o \ + tap.o \ + coord.o \ + block_alloc.o \ + txnmgr.o \ + kassign.o \ + flush.o \ + wander.o \ + eottl.o \ + search.o \ + page_cache.o \ + seal.o \ + dscale.o \ + flush_queue.o \ + ktxnmgrd.o \ + blocknrset.o \ + super.o \ + super_ops.o \ + fsdata.o \ + export_ops.o \ + oid.o \ + tree_walk.o \ + inode.o \ + vfs_ops.o \ + as_ops.o \ + entd.o\ + readahead.o \ + status_flags.o \ + init_super.o \ + safe_link.o \ + blocknrlist.o \ + discard.o \ + checksum.o \ + \ + plugin/plugin.o \ + plugin/plugin_set.o \ + plugin/node/node.o \ + plugin/object.o \ + plugin/cluster.o \ + plugin/txmod.o \ + plugin/inode_ops.o \ + plugin/inode_ops_rename.o \ + plugin/file_ops.o \ + plugin/file_ops_readdir.o \ + plugin/file_plugin_common.o \ + plugin/file/file.o \ + plugin/file/tail_conversion.o \ + plugin/file/file_conversion.o \ + plugin/file/symlink.o \ + plugin/file/cryptcompress.o \ + plugin/dir_plugin_common.o \ + plugin/dir/hashed_dir.o \ + plugin/dir/seekable_dir.o \ + plugin/node/node40.o \ + plugin/node/node41.o \ + \ + plugin/crypto/cipher.o \ + plugin/crypto/digest.o \ + \ + plugin/compress/compress.o \ + plugin/compress/compress_mode.o \ + \ + plugin/item/static_stat.o \ + plugin/item/sde.o \ + plugin/item/cde.o \ + plugin/item/blackbox.o \ + plugin/item/internal.o \ + plugin/item/tail.o \ + plugin/item/ctail.o \ + plugin/item/extent.o \ + plugin/item/extent_item_ops.o \ + plugin/item/extent_file_ops.o \ + plugin/item/extent_flush_ops.o \ + \ + plugin/hash.o \ + plugin/fibration.o \ + plugin/tail_policy.o \ + plugin/item/item.o \ + \ + plugin/security/perm.o \ + plugin/space/bitmap.o \ + \ + plugin/disk_format/disk_format40.o \ + plugin/disk_format/disk_format.o + diff --git a/fs/reiser4/README b/fs/reiser4/README new file mode 100644 index 000000000000..80c5efe15c2d --- /dev/null +++ b/fs/reiser4/README @@ -0,0 +1,128 @@ +[LICENSING] + +Reiser4 is hereby licensed under the GNU General +Public License version 2. + +Source code files that contain the phrase "licensing governed by +reiser4/README" are "governed files" throughout this file. Governed +files are licensed under the GPL. The portions of them owned by Hans +Reiser, or authorized to be licensed by him, have been in the past, +and likely will be in the future, licensed to other parties under +other licenses. If you add your code to governed files, and don't +want it to be owned by Hans Reiser, put your copyright label on that +code so the poor blight and his customers can keep things straight. +All portions of governed files not labeled otherwise are owned by Hans +Reiser, and by adding your code to it, widely distributing it to +others or sending us a patch, and leaving the sentence in stating that +licensing is governed by the statement in this file, you accept this. +It will be a kindness if you identify whether Hans Reiser is allowed +to license code labeled as owned by you on your behalf other than +under the GPL, because he wants to know if it is okay to do so and put +a check in the mail to you (for non-trivial improvements) when he +makes his next sale. He makes no guarantees as to the amount if any, +though he feels motivated to motivate contributors, and you can surely +discuss this with him before or after contributing. You have the +right to decline to allow him to license your code contribution other +than under the GPL. + +Further licensing options are available for commercial and/or other +interests directly from Hans Reiser: reiser@namesys.com. If you interpret +the GPL as not allowing those additional licensing options, you read +it wrongly, and Richard Stallman agrees with me, when carefully read +you can see that those restrictions on additional terms do not apply +to the owner of the copyright, and my interpretation of this shall +govern for this license. + +[END LICENSING] + +Reiser4 is a file system based on dancing tree algorithms, and is +described at http://www.namesys.com + +mkfs.reiser4 and other utilities are on our webpage or wherever your +Linux provider put them. You really want to be running the latest +version off the website if you use fsck. + +Yes, if you update your reiser4 kernel module you do have to +recompile your kernel, most of the time. The errors you get will be +quite cryptic if your forget to do so. + +Hideous Commercial Pitch: Spread your development costs across other OS +vendors. Select from the best in the world, not the best in your +building, by buying from third party OS component suppliers. Leverage +the software component development power of the internet. Be the most +aggressive in taking advantage of the commercial possibilities of +decentralized internet development, and add value through your branded +integration that you sell as an operating system. Let your competitors +be the ones to compete against the entire internet by themselves. Be +hip, get with the new economic trend, before your competitors do. Send +email to reiser@namesys.com + +Hans Reiser was the primary architect of Reiser4, but a whole team +chipped their ideas in. He invested everything he had into Namesys +for 5.5 dark years of no money before Reiser3 finally started to work well +enough to bring in money. He owns the copyright. + +DARPA was the primary sponsor of Reiser4. DARPA does not endorse +Reiser4, it merely sponsors it. DARPA is, in solely Hans's personal +opinion, unique in its willingness to invest into things more +theoretical than the VC community can readily understand, and more +longterm than allows them to be sure that they will be the ones to +extract the economic benefits from. DARPA also integrated us into a +security community that transformed our security worldview. + +Vladimir Saveliev is our lead programmer, with us from the beginning, +and he worked long hours writing the cleanest code. This is why he is +now the lead programmer after years of commitment to our work. He +always made the effort to be the best he could be, and to make his +code the best that it could be. What resulted was quite remarkable. I +don't think that money can ever motivate someone to work the way he +did, he is one of the most selfless men I know. + +Alexander Lyamin was our sysadmin, and helped to educate us in +security issues. Moscow State University and IMT were very generous +in the internet access they provided us, and in lots of other little +ways that a generous institution can be. + +Alexander Zarochentcev (sometimes known as zam, or sasha), wrote the +locking code, the block allocator, and finished the flushing code. +His code is always crystal clean and well structured. + +Nikita Danilov wrote the core of the balancing code, the core of the +plugins code, and the directory code. He worked a steady pace of long +hours that produced a whole lot of well abstracted code. He is our +senior computer scientist. + +Vladimir Demidov wrote the parser. Writing an in kernel parser is +something very few persons have the skills for, and it is thanks to +him that we can say that the parser is really not so big compared to +various bits of our other code, and making a parser work in the kernel +was not so complicated as everyone would imagine mainly because it was +him doing it... + +Joshua McDonald wrote the transaction manager, and the flush code. +The flush code unexpectedly turned out be extremely hairy for reasons +you can read about on our web page, and he did a great job on an +extremely difficult task. + +Nina Reiser handled our accounting, government relations, and much +more. + +Ramon Reiser developed our website. + +Beverly Palmer drew our graphics. + +Vitaly Fertman developed librepair, userspace plugins repair code, fsck +and worked with Umka on developing libreiser4 and userspace plugins. + +Yury Umanets (aka Umka) developed libreiser4, userspace plugins and +userspace tools (reiser4progs). + +Oleg Drokin (aka Green) is the release manager who fixes everything. +It is so nice to have someone like that on the team. He (plus Chris +and Jeff) make it possible for the entire rest of the Namesys team to +focus on Reiser4, and he fixed a whole lot of Reiser4 bugs also. It +is just amazing to watch his talent for spotting bugs in action. + +Edward Shishkin wrote cryptcompress file plugin (which manages files +built of encrypted and(or) compressed bodies) and other plugins related +to transparent encryption and compression support. diff --git a/fs/reiser4/as_ops.c b/fs/reiser4/as_ops.c new file mode 100644 index 000000000000..393e9d123c88 --- /dev/null +++ b/fs/reiser4/as_ops.c @@ -0,0 +1,348 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Interface to VFS. Reiser4 address_space_operations are defined here. */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/file/file.h" +#include "plugin/security/perm.h" +#include "plugin/disk_format/disk_format.h" +#include "plugin/plugin.h" +#include "plugin/plugin_set.h" +#include "plugin/object.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "page_cache.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "reiser4.h" +#include "entd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* address space operations */ + +/** + * reiser4_set_page_dirty - set dirty bit, tag in page tree, dirty accounting + * @page: page to be dirtied + * + * Operation of struct address_space_operations. This implementation is used by + * unix and cryptcompress file plugins. + * + * This is called when reiser4 page gets dirtied outside of reiser4, for + * example, when dirty bit is moved from pte to physical page. + * + * Tags page in the mapping's page tree with special tag so that it is possible + * to do all the reiser4 specific work wrt dirty pages (jnode creation, + * capturing by an atom) later because it can not be done in the contexts where + * set_page_dirty is called. + */ +int reiser4_set_page_dirty(struct page *page) +{ + /* this page can be unformatted only */ + assert("vs-1734", (page->mapping && + page->mapping->host && + reiser4_get_super_fake(page->mapping->host->i_sb) != + page->mapping->host && + reiser4_get_cc_fake(page->mapping->host->i_sb) != + page->mapping->host && + reiser4_get_bitmap_fake(page->mapping->host->i_sb) != + page->mapping->host)); + return __set_page_dirty_nobuffers(page); +} + +/* ->invalidatepage method for reiser4 */ + +/* + * this is called for each truncated page from + * truncate_inode_pages()->truncate_{complete,partial}_page(). + * + * At the moment of call, page is under lock, and outstanding io (if any) has + * completed. + */ + +/** + * reiser4_invalidatepage + * @page: page to invalidate + * @offset: starting offset for partial invalidation + * + */ +void reiser4_invalidatepage(struct page *page, unsigned int offset, unsigned int length) +{ + int ret = 0; + int partial_page = (offset || length < PAGE_SIZE); + reiser4_context *ctx; + struct inode *inode; + jnode *node; + + /* + * This is called to truncate file's page. + * + * Originally, reiser4 implemented truncate in a standard way + * (vmtruncate() calls ->invalidatepage() on all truncated pages + * first, then file system ->truncate() call-back is invoked). + * + * This lead to the problem when ->invalidatepage() was called on a + * page with jnode that was captured into atom in ASTAGE_PRE_COMMIT + * process. That is, truncate was bypassing transactions. To avoid + * this, try_capture_page_to_invalidate() call was added here. + * + * After many troubles with vmtruncate() based truncate (including + * races with flush, tail conversion, etc.) it was re-written in the + * top-to-bottom style: items are killed in reiser4_cut_tree_object() + * and pages belonging to extent are invalidated in kill_hook_extent(). + * So probably now additional call to capture is not needed here. + */ + + assert("nikita-3137", PageLocked(page)); + assert("nikita-3138", !PageWriteback(page)); + inode = page->mapping->host; + + /* + * ->invalidatepage() should only be called for the unformatted + * jnodes. Destruction of all other types of jnodes is performed + * separately. But, during some corner cases (like handling errors + * during mount) it is simpler to let ->invalidatepage to be called on + * them. Check for this, and do nothing. + */ + if (reiser4_get_super_fake(inode->i_sb) == inode) + return; + if (reiser4_get_cc_fake(inode->i_sb) == inode) + return; + if (reiser4_get_bitmap_fake(inode->i_sb) == inode) + return; + assert("vs-1426", PagePrivate(page)); + assert("vs-1427", + page->mapping == jnode_get_mapping(jnode_by_page(page))); + assert("", jprivate(page) != NULL); + assert("", ergo(inode_file_plugin(inode) != + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID), + offset == 0)); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return; + + node = jprivate(page); + spin_lock_jnode(node); + if (!(node->state & ((1 << JNODE_DIRTY) | (1 << JNODE_FLUSH_QUEUED) | + (1 << JNODE_WRITEBACK) | (1 << JNODE_OVRWR)))) { + /* there is not need to capture */ + jref(node); + JF_SET(node, JNODE_HEARD_BANSHEE); + page_clear_jnode(page, node); + reiser4_uncapture_jnode(node); + unhash_unformatted_jnode(node); + jput(node); + reiser4_exit_context(ctx); + return; + } + spin_unlock_jnode(node); + + /* capture page being truncated. */ + ret = try_capture_page_to_invalidate(page); + if (ret != 0) + warning("nikita-3141", "Cannot capture: %i", ret); + + if (!partial_page) { + /* remove jnode from transaction and detach it from page. */ + jref(node); + JF_SET(node, JNODE_HEARD_BANSHEE); + /* page cannot be detached from jnode concurrently, because it + * is locked */ + reiser4_uncapture_page(page); + + /* this detaches page from jnode, so that jdelete will not try + * to lock page which is already locked */ + spin_lock_jnode(node); + page_clear_jnode(page, node); + spin_unlock_jnode(node); + unhash_unformatted_jnode(node); + + jput(node); + } + + reiser4_exit_context(ctx); +} + +/* help function called from reiser4_releasepage(). It returns true if jnode + * can be detached from its page and page released. */ +int jnode_is_releasable(jnode * node/* node to check */) +{ + assert("nikita-2781", node != NULL); + assert_spin_locked(&(node->guard)); + assert_spin_locked(&(node->load)); + + /* is some thread is currently using jnode page, later cannot be + * detached */ + if (atomic_read(&node->d_count) != 0) + return 0; + + assert("vs-1214", !jnode_is_loaded(node)); + + /* + * can only release page if real block number is assigned to it. Simple + * check for ->atom wouldn't do, because it is possible for node to be + * clean, not it atom yet, and still having fake block number. For + * example, node just created in jinit_new(). + */ + if (reiser4_blocknr_is_fake(jnode_get_block(node))) + return 0; + + /* + * pages prepared for write can not be released anyway, so avoid + * detaching jnode from the page + */ + if (JF_ISSET(node, JNODE_WRITE_PREPARED)) + return 0; + + /* + * dirty jnode cannot be released. It can however be submitted to disk + * as part of early flushing, but only after getting flush-prepped. + */ + if (JF_ISSET(node, JNODE_DIRTY)) + return 0; + + /* overwrite set is only written by log writer. */ + if (JF_ISSET(node, JNODE_OVRWR)) + return 0; + + /* jnode is already under writeback */ + if (JF_ISSET(node, JNODE_WRITEBACK)) + return 0; + + /* don't flush bitmaps or journal records */ + if (!jnode_is_znode(node) && !jnode_is_unformatted(node)) + return 0; + + return 1; +} + +/* + * ->releasepage method for reiser4 + * + * This is called by VM scanner when it comes across clean page. What we have + * to do here is to check whether page can really be released (freed that is) + * and if so, detach jnode from it and remove page from the page cache. + * + * Check for releasability is done by releasable() function. + */ +int reiser4_releasepage(struct page *page, gfp_t gfp UNUSED_ARG) +{ + jnode *node; + + assert("nikita-2257", PagePrivate(page)); + assert("nikita-2259", PageLocked(page)); + assert("nikita-2892", !PageWriteback(page)); + assert("nikita-3019", reiser4_schedulable()); + + /* NOTE-NIKITA: this can be called in the context of reiser4 call. It + is not clear what to do in this case. A lot of deadlocks seems be + possible. */ + + node = jnode_by_page(page); + assert("nikita-2258", node != NULL); + assert("reiser4-4", page->mapping != NULL); + assert("reiser4-5", page->mapping->host != NULL); + + if (PageDirty(page)) + return 0; + + /* extra page reference is used by reiser4 to protect + * jnode<->page link from this ->releasepage(). */ + if (page_count(page) > 3) + return 0; + + /* releasable() needs jnode lock, because it looks at the jnode fields + * and we need jload_lock here to avoid races with jload(). */ + spin_lock_jnode(node); + spin_lock(&(node->load)); + if (jnode_is_releasable(node)) { + struct address_space *mapping; + + mapping = page->mapping; + jref(node); + /* there is no need to synchronize against + * jnode_extent_write() here, because pages seen by + * jnode_extent_write() are !releasable(). */ + page_clear_jnode(page, node); + spin_unlock(&(node->load)); + spin_unlock_jnode(node); + + /* we are under memory pressure so release jnode also. */ + jput(node); + + return 1; + } else { + spin_unlock(&(node->load)); + spin_unlock_jnode(node); + assert("nikita-3020", reiser4_schedulable()); + return 0; + } +} + +#ifdef CONFIG_MIGRATION +int reiser4_migratepage(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode) +{ + /* TODO: implement movable mapping + */ + return -EIO; +} +#endif /* CONFIG_MIGRATION */ + +int reiser4_readpage_dispatch(struct file *file, struct page *page) +{ + assert("edward-1533", PageLocked(page)); + assert("edward-1534", !PageUptodate(page)); + assert("edward-1535", page->mapping && page->mapping->host); + + return inode_file_plugin(page->mapping->host)->readpage(file, page); +} + +int reiser4_readpages_dispatch(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + return inode_file_plugin(mapping->host)->readpages(file, mapping, + pages, nr_pages); +} + +int reiser4_writepages_dispatch(struct address_space *mapping, + struct writeback_control *wbc) +{ + return inode_file_plugin(mapping->host)->writepages(mapping, wbc); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/block_alloc.c b/fs/reiser4/block_alloc.c new file mode 100644 index 000000000000..fee1c185e0b2 --- /dev/null +++ b/fs/reiser4/block_alloc.c @@ -0,0 +1,1176 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by +reiser4/README */ + +#include "debug.h" +#include "dformat.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "super.h" +#include "discard.h" + +#include /* for __u?? */ +#include /* for struct super_block */ +#include + +/* THE REISER4 DISK SPACE RESERVATION SCHEME. */ + +/* We need to be able to reserve enough disk space to ensure that an atomic + operation will have enough disk space to flush (see flush.c and + http://namesys.com/v4/v4.html) and commit it once it is started. + + In our design a call for reserving disk space may fail but not an actual + block allocation. + + All free blocks, already allocated blocks, and all kinds of reserved blocks + are counted in different per-fs block counters. + + A reiser4 super block's set of block counters currently is: + + free -- free blocks, + used -- already allocated blocks, + + grabbed -- initially reserved for performing an fs operation, those blocks + are taken from free blocks, then grabbed disk space leaks from grabbed + blocks counter to other counters like "fake allocated", "flush + reserved", "used", the rest of not used grabbed space is returned to + free space at the end of fs operation; + + fake allocated -- counts all nodes without real disk block numbers assigned, + we have separate accounting for formatted and unformatted + nodes (for easier debugging); + + flush reserved -- disk space needed for flushing and committing an atom. + Each dirty already allocated block could be written as a + part of atom's overwrite set or as a part of atom's + relocate set. In both case one additional block is needed, + it is used as a wandered block if we do overwrite or as a + new location for a relocated block. + + In addition, blocks in some states are counted on per-thread and per-atom + basis. A reiser4 context has a counter of blocks grabbed by this transaction + and the sb's grabbed blocks counter is a sum of grabbed blocks counter values + of each reiser4 context. Each reiser4 atom has a counter of "flush reserved" + blocks, which are reserved for flush processing and atom commit. */ + +/* AN EXAMPLE: suppose we insert new item to the reiser4 tree. We estimate + number of blocks to grab for most expensive case of balancing when the leaf + node we insert new item to gets split and new leaf node is allocated. + + So, we need to grab blocks for + + 1) one block for possible dirtying the node we insert an item to. That block + would be used for node relocation at flush time or for allocating of a + wandered one, it depends what will be a result (what set, relocate or + overwrite the node gets assigned to) of the node processing by the flush + algorithm. + + 2) one block for either allocating a new node, or dirtying of right or left + clean neighbor, only one case may happen. + + VS-FIXME-HANS: why can only one case happen? I would expect to see dirtying + of left neighbor, right neighbor, current node, and creation of new node. + Have I forgotten something? email me. + + These grabbed blocks are counted in both reiser4 context "grabbed blocks" + counter and in the fs-wide one (both ctx->grabbed_blocks and + sbinfo->blocks_grabbed get incremented by 2), sb's free blocks counter is + decremented by 2. + + Suppose both two blocks were spent for dirtying of an already allocated clean + node (one block went from "grabbed" to "flush reserved") and for new block + allocating (one block went from "grabbed" to "fake allocated formatted"). + + Inserting of a child pointer to the parent node caused parent node to be + split, the balancing code takes care about this grabbing necessary space + immediately by calling reiser4_grab with BA_RESERVED flag set which means + "can use the 5% reserved disk space". + + At this moment insertion completes and grabbed blocks (if they were not used) + should be returned to the free space counter. + + However the atom life-cycle is not completed. The atom had one "flush + reserved" block added by our insertion and the new fake allocated node is + counted as a "fake allocated formatted" one. The atom has to be fully + processed by flush before commit. Suppose that the flush moved the first, + already allocated node to the atom's overwrite list, the new fake allocated + node, obviously, went into the atom relocate set. The reiser4 flush + allocates the new node using one unit from "fake allocated formatted" + counter, the log writer uses one from "flush reserved" for wandered block + allocation. + + And, it is not the end. When the wandered block is deallocated after the + atom gets fully played (see wander.c for term description), the disk space + occupied for it is returned to free blocks. */ + +/* BLOCK NUMBERS */ + +/* Any reiser4 node has a block number assigned to it. We use these numbers for + indexing in hash tables, so if a block has not yet been assigned a location + on disk we need to give it a temporary fake block number. + + Current implementation of reiser4 uses 64-bit integers for block numbers. We + use highest bit in 64-bit block number to distinguish fake and real block + numbers. So, only 63 bits may be used to addressing of real device + blocks. That "fake" block numbers space is divided into subspaces of fake + block numbers for data blocks and for shadow (working) bitmap blocks. + + Fake block numbers for data blocks are generated by a cyclic counter, which + gets incremented after each real block allocation. We assume that it is + impossible to overload this counter during one transaction life. */ + +/* Initialize a blocknr hint. */ +void reiser4_blocknr_hint_init(reiser4_blocknr_hint * hint) +{ + memset(hint, 0, sizeof(reiser4_blocknr_hint)); +} + +/* Release any resources of a blocknr hint. */ +void reiser4_blocknr_hint_done(reiser4_blocknr_hint * hint UNUSED_ARG) +{ +/* No resources should be freed in current blocknr_hint implementation. */ +} + +/* see above for explanation of fake block number. */ +/* Audited by: green(2002.06.11) */ +int reiser4_blocknr_is_fake(const reiser4_block_nr * da) +{ + /* The reason for not simply returning result of '&' operation is that + while return value is (possibly 32bit) int, the reiser4_block_nr is + at least 64 bits long, and high bit (which is the only possible + non zero bit after the masking) would be stripped off */ + return (*da & REISER4_FAKE_BLOCKNR_BIT_MASK) ? 1 : 0; +} + +/* Static functions for / block counters + arithmetic. Mostly, they are isolated to not to code same assertions in + several places. */ +static void sub_from_ctx_grabbed(reiser4_context * ctx, __u64 count) +{ + BUG_ON(ctx->grabbed_blocks < count); + assert("zam-527", ctx->grabbed_blocks >= count); + ctx->grabbed_blocks -= count; +} + +static void add_to_ctx_grabbed(reiser4_context * ctx, __u64 count) +{ + ctx->grabbed_blocks += count; +} + +static void sub_from_sb_grabbed(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("zam-525", sbinfo->blocks_grabbed >= count); + sbinfo->blocks_grabbed -= count; +} + +/* Decrease the counter of block reserved for flush in super block. */ +static void +sub_from_sb_flush_reserved(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("vpf-291", sbinfo->blocks_flush_reserved >= count); + sbinfo->blocks_flush_reserved -= count; +} + +static void +sub_from_sb_fake_allocated(reiser4_super_info_data * sbinfo, __u64 count, + reiser4_ba_flags_t flags) +{ + if (flags & BA_FORMATTED) { + assert("zam-806", sbinfo->blocks_fake_allocated >= count); + sbinfo->blocks_fake_allocated -= count; + } else { + assert("zam-528", + sbinfo->blocks_fake_allocated_unformatted >= count); + sbinfo->blocks_fake_allocated_unformatted -= count; + } +} + +static void sub_from_sb_used(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("zam-530", + sbinfo->blocks_used >= count + sbinfo->min_blocks_used); + sbinfo->blocks_used -= count; +} + +static void +sub_from_cluster_reserved(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("edward-501", sbinfo->blocks_clustered >= count); + sbinfo->blocks_clustered -= count; +} + +/* Increase the counter of block reserved for flush in atom. */ +static void add_to_atom_flush_reserved_nolock(txn_atom * atom, __u32 count) +{ + assert("zam-772", atom != NULL); + assert_spin_locked(&(atom->alock)); + atom->flush_reserved += count; +} + +/* Decrease the counter of block reserved for flush in atom. */ +static void sub_from_atom_flush_reserved_nolock(txn_atom * atom, __u32 count) +{ + assert("zam-774", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("nikita-2790", atom->flush_reserved >= count); + atom->flush_reserved -= count; +} + +/* super block has 6 counters: free, used, grabbed, fake allocated + (formatted and unformatted) and flush reserved. Their sum must be + number of blocks on a device. This function checks this */ +int reiser4_check_block_counters(const struct super_block *super) +{ + __u64 sum; + + sum = reiser4_grabbed_blocks(super) + reiser4_free_blocks(super) + + reiser4_data_blocks(super) + reiser4_fake_allocated(super) + + reiser4_fake_allocated_unformatted(super) + reiser4_flush_reserved(super) + + reiser4_clustered_blocks(super); + if (reiser4_block_count(super) != sum) { + printk("super block counters: " + "used %llu, free %llu, " + "grabbed %llu, fake allocated (formatetd %llu, unformatted %llu), " + "reserved %llu, clustered %llu, sum %llu, must be (block count) %llu\n", + (unsigned long long)reiser4_data_blocks(super), + (unsigned long long)reiser4_free_blocks(super), + (unsigned long long)reiser4_grabbed_blocks(super), + (unsigned long long)reiser4_fake_allocated(super), + (unsigned long long) + reiser4_fake_allocated_unformatted(super), + (unsigned long long)reiser4_flush_reserved(super), + (unsigned long long)reiser4_clustered_blocks(super), + (unsigned long long)sum, + (unsigned long long)reiser4_block_count(super)); + return 0; + } + return 1; +} + +/* Adjust "working" free blocks counter for number of blocks we are going to + allocate. Record number of grabbed blocks in fs-wide and per-thread + counters. This function should be called before bitmap scanning or + allocating fake block numbers + + @super -- pointer to reiser4 super block; + @count -- number of blocks we reserve; + + @return -- 0 if success, -ENOSPC, if all + free blocks are preserved or already allocated. +*/ + +static int +reiser4_grab(reiser4_context * ctx, __u64 count, reiser4_ba_flags_t flags) +{ + __u64 free_blocks; + int ret = 0, use_reserved = flags & BA_RESERVED; + reiser4_super_info_data *sbinfo; + + assert("vs-1276", ctx == get_current_context()); + + /* Do not grab anything on ro-mounted fs. */ + if (rofs_super(ctx->super)) { + ctx->grab_enabled = 0; + return 0; + } + + sbinfo = get_super_private(ctx->super); + + spin_lock_reiser4_super(sbinfo); + + free_blocks = sbinfo->blocks_free; + + if ((use_reserved && free_blocks < count) || + (!use_reserved && free_blocks < count + sbinfo->blocks_reserved)) { + ret = RETERR(-ENOSPC); + goto unlock_and_ret; + } + + add_to_ctx_grabbed(ctx, count); + + sbinfo->blocks_grabbed += count; + sbinfo->blocks_free -= count; + +#if REISER4_DEBUG + if (ctx->grabbed_initially == 0) + ctx->grabbed_initially = count; +#endif + + assert("nikita-2986", reiser4_check_block_counters(ctx->super)); + + /* disable grab space in current context */ + ctx->grab_enabled = 0; + +unlock_and_ret: + spin_unlock_reiser4_super(sbinfo); + + return ret; +} + +int reiser4_grab_space(__u64 count, reiser4_ba_flags_t flags) +{ + int ret; + reiser4_context *ctx; + + assert("nikita-2964", ergo(flags & BA_CAN_COMMIT, + lock_stack_isclean(get_current_lock_stack + ()))); + ctx = get_current_context(); + if (!(flags & BA_FORCE) && !is_grab_enabled(ctx)) + return 0; + + ret = reiser4_grab(ctx, count, flags); + if (ret == -ENOSPC) { + + /* Trying to commit the all transactions if BA_CAN_COMMIT flag + present */ + if (flags & BA_CAN_COMMIT) { + txnmgr_force_commit_all(ctx->super, 0); + ctx->grab_enabled = 1; + ret = reiser4_grab(ctx, count, flags); + } + } + /* + * allocation from reserved pool cannot fail. This is severe error. + */ + assert("nikita-3005", ergo(flags & BA_RESERVED, ret == 0)); + return ret; +} + +/* + * SPACE RESERVED FOR UNLINK/TRUNCATE + * + * Unlink and truncate require space in transaction (to update stat data, at + * least). But we don't want rm(1) to fail with "No space on device" error. + * + * Solution is to reserve 5% of disk space for truncates and + * unlinks. Specifically, normal space grabbing requests don't grab space from + * reserved area. Only requests with BA_RESERVED bit in flags are allowed to + * drain it. Per super block delete mutex is used to allow only one + * thread at a time to grab from reserved area. + * + * Grabbing from reserved area should always be performed with BA_CAN_COMMIT + * flag. + * + */ + +int reiser4_grab_reserved(struct super_block *super, + __u64 count, reiser4_ba_flags_t flags) +{ + reiser4_super_info_data *sbinfo = get_super_private(super); + + assert("nikita-3175", flags & BA_CAN_COMMIT); + + /* Check the delete mutex already taken by us, we assume that + * reading of machine word is atomic. */ + if (sbinfo->delete_mutex_owner == current) { + if (reiser4_grab_space + (count, (flags | BA_RESERVED) & ~BA_CAN_COMMIT)) { + warning("zam-1003", + "nested call of grab_reserved fails count=(%llu)", + (unsigned long long)count); + reiser4_release_reserved(super); + return RETERR(-ENOSPC); + } + return 0; + } + + if (reiser4_grab_space(count, flags)) { + mutex_lock(&sbinfo->delete_mutex); + assert("nikita-2929", sbinfo->delete_mutex_owner == NULL); + sbinfo->delete_mutex_owner = current; + + if (reiser4_grab_space(count, flags | BA_RESERVED)) { + warning("zam-833", + "reserved space is not enough (%llu)", + (unsigned long long)count); + reiser4_release_reserved(super); + return RETERR(-ENOSPC); + } + } + return 0; +} + +void reiser4_release_reserved(struct super_block *super) +{ + reiser4_super_info_data *info; + + info = get_super_private(super); + if (info->delete_mutex_owner == current) { + info->delete_mutex_owner = NULL; + mutex_unlock(&info->delete_mutex); + } +} + +static reiser4_super_info_data *grabbed2fake_allocated_head(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sub_from_ctx_grabbed(ctx, count); + + sbinfo = get_super_private(ctx->super); + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + /* return sbinfo locked */ + return sbinfo; +} + +/* is called after @count fake block numbers are allocated and pointer to + those blocks are inserted into tree. */ +static void grabbed2fake_allocated_formatted(void) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = grabbed2fake_allocated_head(1); + sbinfo->blocks_fake_allocated++; + + assert("vs-922", reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/** + * grabbed2fake_allocated_unformatted + * @count: + * + */ +static void grabbed2fake_allocated_unformatted(int count) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = grabbed2fake_allocated_head(count); + sbinfo->blocks_fake_allocated_unformatted += count; + + assert("vs-9221", reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +void grabbed2cluster_reserved(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sub_from_ctx_grabbed(ctx, count); + + sbinfo = get_super_private(ctx->super); + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + sbinfo->blocks_clustered += count; + + assert("edward-504", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void cluster_reserved2grabbed(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + + sbinfo = get_super_private(ctx->super); + spin_lock_reiser4_super(sbinfo); + + sub_from_cluster_reserved(sbinfo, count); + sbinfo->blocks_grabbed += count; + + assert("edward-505", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); + add_to_ctx_grabbed(ctx, count); +} + +void cluster_reserved2free(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + cluster_reserved2grabbed(count); + grabbed2free(ctx, sbinfo, count); +} + +static DEFINE_SPINLOCK(fake_lock); +static reiser4_block_nr fake_gen = 0; + +/** + * assign_fake_blocknr + * @blocknr: + * @count: + * + * Obtain a fake block number for new node which will be used to refer to + * this newly allocated node until real allocation is done. + */ +static void assign_fake_blocknr(reiser4_block_nr *blocknr, int count) +{ + spin_lock(&fake_lock); + *blocknr = fake_gen; + fake_gen += count; + spin_unlock(&fake_lock); + + BUG_ON(*blocknr & REISER4_BLOCKNR_STATUS_BIT_MASK); + /**blocknr &= ~REISER4_BLOCKNR_STATUS_BIT_MASK;*/ + *blocknr |= REISER4_UNALLOCATED_STATUS_VALUE; + assert("zam-394", zlook(current_tree, blocknr) == NULL); +} + +int assign_fake_blocknr_formatted(reiser4_block_nr * blocknr) +{ + assign_fake_blocknr(blocknr, 1); + grabbed2fake_allocated_formatted(); + return 0; +} + +/** + * fake_blocknrs_unformatted + * @count: number of fake numbers to get + * + * Allocates @count fake block numbers which will be assigned to jnodes + */ +reiser4_block_nr fake_blocknr_unformatted(int count) +{ + reiser4_block_nr blocknr; + + assign_fake_blocknr(&blocknr, count); + grabbed2fake_allocated_unformatted(count); + + return blocknr; +} + +/* adjust sb block counters, if real (on-disk) block allocation immediately + follows grabbing of free disk space. */ +static void grabbed2used(reiser4_context *ctx, reiser4_super_info_data *sbinfo, + __u64 count) +{ + sub_from_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + sbinfo->blocks_used += count; + + assert("nikita-2679", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +/* adjust sb block counters when @count unallocated blocks get mapped to disk */ +static void fake_allocated2used(reiser4_super_info_data *sbinfo, __u64 count, + reiser4_ba_flags_t flags) +{ + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_fake_allocated(sbinfo, count, flags); + sbinfo->blocks_used += count; + + assert("nikita-2680", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +static void flush_reserved2used(txn_atom * atom, __u64 count) +{ + reiser4_super_info_data *sbinfo; + + assert("zam-787", atom != NULL); + assert_spin_locked(&(atom->alock)); + + sub_from_atom_flush_reserved_nolock(atom, (__u32) count); + + sbinfo = get_current_super_private(); + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_flush_reserved(sbinfo, count); + sbinfo->blocks_used += count; + + assert("zam-789", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/* update the per fs blocknr hint default value. */ +void +update_blocknr_hint_default(const struct super_block *s, + const reiser4_block_nr * block) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + + assert("nikita-3342", !reiser4_blocknr_is_fake(block)); + + spin_lock_reiser4_super(sbinfo); + if (*block < sbinfo->block_count) { + sbinfo->blocknr_hint_default = *block; + } else { + warning("zam-676", + "block number %llu is too large to be used in a blocknr hint\n", + (unsigned long long)*block); + dump_stack(); + DEBUGON(1); + } + spin_unlock_reiser4_super(sbinfo); +} + +/* get current value of the default blocknr hint. */ +void get_blocknr_hint_default(reiser4_block_nr * result) +{ + reiser4_super_info_data *sbinfo = get_current_super_private(); + + spin_lock_reiser4_super(sbinfo); + *result = sbinfo->blocknr_hint_default; + assert("zam-677", *result < sbinfo->block_count); + spin_unlock_reiser4_super(sbinfo); +} + +/* Allocate "real" disk blocks by calling a proper space allocation plugin + * method. Blocks are allocated in one contiguous disk region. The plugin + * independent part accounts blocks by subtracting allocated amount from grabbed + * or fake block counter and add the same amount to the counter of allocated + * blocks. + * + * @hint -- a reiser4 blocknr hint object which contains further block + * allocation hints and parameters (search start, a stage of block + * which will be mapped to disk, etc.), + * @blk -- an out parameter for the beginning of the allocated region, + * @len -- in/out parameter, it should contain the maximum number of allocated + * blocks, after block allocation completes, it contains the length of + * allocated disk region. + * @flags -- see reiser4_ba_flags_t description. + * + * @return -- 0 if success, error code otherwise. + */ +int +reiser4_alloc_blocks(reiser4_blocknr_hint * hint, reiser4_block_nr * blk, + reiser4_block_nr * len, reiser4_ba_flags_t flags) +{ + __u64 needed = *len; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + int ret; + + assert("zam-986", hint != NULL); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + /* For write-optimized data we use default search start value, which is + * close to last write location. */ + if (flags & BA_USE_DEFAULT_SEARCH_START) + get_blocknr_hint_default(&hint->blk); + + /* VITALY: allocator should grab this for internal/tx-lists/similar + only. */ +/* VS-FIXME-HANS: why is this comment above addressed to vitaly (from vitaly)?*/ + if (hint->block_stage == BLOCK_NOT_COUNTED) { + ret = reiser4_grab_space_force(*len, flags); + if (ret != 0) + return ret; + } + + ret = + sa_alloc_blocks(reiser4_get_space_allocator(ctx->super), + hint, (int)needed, blk, len); + + if (!ret) { + assert("zam-680", *blk < reiser4_block_count(ctx->super)); + assert("zam-681", + *blk + *len <= reiser4_block_count(ctx->super)); + + if (flags & BA_PERMANENT) { + /* we assume that current atom exists at this moment */ + txn_atom *atom = get_current_atom_locked(); + atom->nr_blocks_allocated += *len; + spin_unlock_atom(atom); + } + + switch (hint->block_stage) { + case BLOCK_NOT_COUNTED: + case BLOCK_GRABBED: + grabbed2used(ctx, sbinfo, *len); + break; + case BLOCK_UNALLOCATED: + fake_allocated2used(sbinfo, *len, flags); + break; + case BLOCK_FLUSH_RESERVED: + { + txn_atom *atom = get_current_atom_locked(); + flush_reserved2used(atom, *len); + spin_unlock_atom(atom); + } + break; + default: + impossible("zam-531", "wrong block stage"); + } + } else { + assert("zam-821", + ergo(hint->max_dist == 0 + && !hint->backward, ret != -ENOSPC)); + if (hint->block_stage == BLOCK_NOT_COUNTED) + grabbed2free(ctx, sbinfo, needed); + } + + return ret; +} + +/** + * ask block allocator for some unformatted blocks + */ +void allocate_blocks_unformatted(reiser4_blocknr_hint *preceder, + reiser4_block_nr wanted_count, + reiser4_block_nr *first_allocated, + reiser4_block_nr *allocated, + block_stage_t block_stage) +{ + *allocated = wanted_count; + preceder->max_dist = 0; /* scan whole disk, if needed */ + + /* that number of blocks (wanted_count) is either in UNALLOCATED or in GRABBED */ + preceder->block_stage = block_stage; + + /* FIXME: we do not handle errors here now */ + check_me("vs-420", + reiser4_alloc_blocks(preceder, first_allocated, allocated, + BA_PERMANENT) == 0); + /* update flush_pos's preceder to last allocated block number */ + preceder->blk = *first_allocated + *allocated - 1; +} + +/* used -> fake_allocated -> grabbed -> free */ + +/* adjust sb block counters when @count unallocated blocks get unmapped from + disk */ +static void +used2fake_allocated(reiser4_super_info_data * sbinfo, __u64 count, + int formatted) +{ + spin_lock_reiser4_super(sbinfo); + + if (formatted) + sbinfo->blocks_fake_allocated += count; + else + sbinfo->blocks_fake_allocated_unformatted += count; + + sub_from_sb_used(sbinfo, count); + + assert("nikita-2681", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +static void +used2flush_reserved(reiser4_super_info_data * sbinfo, txn_atom * atom, + __u64 count, reiser4_ba_flags_t flags UNUSED_ARG) +{ + assert("nikita-2791", atom != NULL); + assert_spin_locked(&(atom->alock)); + + add_to_atom_flush_reserved_nolock(atom, (__u32) count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_flush_reserved += count; + /*add_to_sb_flush_reserved(sbinfo, count); */ + sub_from_sb_used(sbinfo, count); + + assert("nikita-2681", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/* disk space, virtually used by fake block numbers is counted as "grabbed" + again. */ +static void +fake_allocated2grabbed(reiser4_context * ctx, reiser4_super_info_data * sbinfo, + __u64 count, reiser4_ba_flags_t flags) +{ + add_to_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + assert("nikita-2682", reiser4_check_block_counters(ctx->super)); + + sbinfo->blocks_grabbed += count; + sub_from_sb_fake_allocated(sbinfo, count, flags & BA_FORMATTED); + + assert("nikita-2683", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + fake_allocated2grabbed(ctx, sbinfo, count, flags); + grabbed2free(ctx, sbinfo, count); +} + +void grabbed2free_mark(__u64 mark) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + assert("nikita-3007", (__s64) mark >= 0); + assert("nikita-3006", ctx->grabbed_blocks >= mark); + grabbed2free(ctx, sbinfo, ctx->grabbed_blocks - mark); +} + +/** + * grabbed2free - adjust grabbed and free block counters + * @ctx: context to update grabbed block counter of + * @sbinfo: super block to update grabbed and free block counters of + * @count: number of blocks to adjust counters by + * + * Decreases context's and per filesystem's counters of grabbed + * blocks. Increases per filesystem's counter of free blocks. + */ +void grabbed2free(reiser4_context *ctx, reiser4_super_info_data *sbinfo, + __u64 count) +{ + sub_from_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + sbinfo->blocks_free += count; + assert("nikita-2684", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + assert("vs-1095", atom); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + sub_from_ctx_grabbed(ctx, count); + + add_to_atom_flush_reserved_nolock(atom, count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_flush_reserved += count; + sub_from_sb_grabbed(sbinfo, count); + + assert("vpf-292", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void grabbed2flush_reserved(__u64 count) +{ + txn_atom *atom = get_current_atom_locked(); + + grabbed2flush_reserved_nolock(atom, count); + + spin_unlock_atom(atom); +} + +void flush_reserved2grabbed(txn_atom * atom, __u64 count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + assert("nikita-2788", atom != NULL); + assert_spin_locked(&(atom->alock)); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + add_to_ctx_grabbed(ctx, count); + + sub_from_atom_flush_reserved_nolock(atom, (__u32) count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_grabbed += count; + sub_from_sb_flush_reserved(sbinfo, count); + + assert("vpf-292", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +/** + * all_grabbed2free - releases all blocks grabbed in context + * + * Decreases context's and super block's grabbed block counters by number of + * blocks grabbed by current context and increases super block's free block + * counter correspondingly. + */ +void all_grabbed2free(void) +{ + reiser4_context *ctx = get_current_context(); + + grabbed2free(ctx, get_super_private(ctx->super), ctx->grabbed_blocks); +} + +/* adjust sb block counters if real (on-disk) blocks do not become unallocated + after freeing, @count blocks become "grabbed". */ +static void +used2grabbed(reiser4_context * ctx, reiser4_super_info_data * sbinfo, + __u64 count) +{ + add_to_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_grabbed += count; + sub_from_sb_used(sbinfo, count); + + assert("nikita-2685", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +/* this used to be done through used2grabbed and grabbed2free*/ +static void used2free(reiser4_super_info_data * sbinfo, __u64 count) +{ + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_free += count; + sub_from_sb_used(sbinfo, count); + + assert("nikita-2685", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/* check "allocated" state of given block range */ +int +reiser4_check_blocks(const reiser4_block_nr * start, + const reiser4_block_nr * len, int desired) +{ + return sa_check_blocks(start, len, desired); +} + +/* Blocks deallocation function may do an actual deallocation through space + plugin allocation or store deleted block numbers in atom's delete_set data + structure depend on @defer parameter. */ + +/* if BA_DEFER bit is not turned on, @target_stage means the stage of blocks + which will be deleted from WORKING bitmap. They might be just unmapped from + disk, or freed but disk space is still grabbed by current thread, or these + blocks must not be counted in any reiser4 sb block counters, + see block_stage_t comment */ + +/* BA_FORMATTED bit is only used when BA_DEFER in not present: it is used to + distinguish blocks allocated for unformatted and formatted nodes */ + +int +reiser4_dealloc_blocks(const reiser4_block_nr * start, + const reiser4_block_nr * len, + block_stage_t target_stage, reiser4_ba_flags_t flags) +{ + txn_atom *atom = NULL; + int ret; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + void *new_entry = NULL; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + if (REISER4_DEBUG) { + assert("zam-431", *len != 0); + assert("zam-432", *start != 0); + assert("zam-558", !reiser4_blocknr_is_fake(start)); + + spin_lock_reiser4_super(sbinfo); + assert("zam-562", *start < sbinfo->block_count); + spin_unlock_reiser4_super(sbinfo); + } + + if (flags & BA_DEFER) { + /* + * These blocks will be later deallocated by apply_dset(). + * It is equivalent to a non-deferred deallocation with target + * stage BLOCK_NOT_COUNTED. + */ + + /* store deleted block numbers in the atom's deferred delete set + for further actual deletion */ + do { + atom = get_current_atom_locked(); + assert("zam-430", atom != NULL); + + ret = atom_dset_deferred_add_extent(atom, &new_entry, start, len); + + if (ret == -ENOMEM) + return ret; + + /* This loop might spin at most two times */ + } while (ret == -E_REPEAT); + + assert("zam-477", ret == 0); + assert("zam-433", atom != NULL); + + spin_unlock_atom(atom); + + } else { + assert("zam-425", get_current_super_private() != NULL); + sa_dealloc_blocks(reiser4_get_space_allocator(ctx->super), + *start, *len); + + if (flags & BA_PERMANENT) { + /* These blocks were counted as allocated, we have to + * revert it back if allocation is discarded. */ + txn_atom *atom = get_current_atom_locked(); + atom->nr_blocks_allocated -= *len; + spin_unlock_atom(atom); + } + + switch (target_stage) { + case BLOCK_NOT_COUNTED: + assert("vs-960", flags & BA_FORMATTED); + /* VITALY: This is what was grabbed for + internal/tx-lists/similar only */ + used2free(sbinfo, *len); + break; + + case BLOCK_GRABBED: + used2grabbed(ctx, sbinfo, *len); + break; + + case BLOCK_UNALLOCATED: + used2fake_allocated(sbinfo, *len, flags & BA_FORMATTED); + break; + + case BLOCK_FLUSH_RESERVED:{ + txn_atom *atom; + + atom = get_current_atom_locked(); + used2flush_reserved(sbinfo, atom, *len, + flags & BA_FORMATTED); + spin_unlock_atom(atom); + break; + } + default: + impossible("zam-532", "wrong block stage"); + } + } + + return 0; +} + +/* wrappers for block allocator plugin methods */ +int reiser4_pre_commit_hook(void) +{ + assert("zam-502", get_current_super_private() != NULL); + sa_pre_commit_hook(); + return 0; +} + +/* an actor which applies delete set to block allocator data */ +static int +apply_dset(txn_atom * atom UNUSED_ARG, const reiser4_block_nr * a, + const reiser4_block_nr * b, void *data UNUSED_ARG) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + __u64 len = 1; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + assert("zam-877", atom->stage >= ASTAGE_PRE_COMMIT); + assert("zam-552", sbinfo != NULL); + + if (b != NULL) + len = *b; + + if (REISER4_DEBUG) { + spin_lock_reiser4_super(sbinfo); + + assert("zam-554", *a < reiser4_block_count(ctx->super)); + assert("zam-555", *a + len <= reiser4_block_count(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); + } + + sa_dealloc_blocks(&sbinfo->space_allocator, *a, len); + /* adjust sb block counters */ + used2free(sbinfo, len); + return 0; +} + +void reiser4_post_commit_hook(void) +{ +#ifdef REISER4_DEBUG + txn_atom *atom; + + atom = get_current_atom_locked(); + assert("zam-452", atom->stage == ASTAGE_POST_COMMIT); + spin_unlock_atom(atom); +#endif + + assert("zam-504", get_current_super_private() != NULL); + sa_post_commit_hook(); +} + +void reiser4_post_write_back_hook(void) +{ + struct list_head discarded_set; + txn_atom *atom; + int ret; + + /* process and issue discard requests */ + blocknr_list_init (&discarded_set); + do { + atom = get_current_atom_locked(); + ret = discard_atom(atom, &discarded_set); + } while (ret == -E_REPEAT); + + if (ret) { + warning("intelfx-8", "discard atom failed (%d)", ret); + } + + atom = get_current_atom_locked(); + discard_atom_post(atom, &discarded_set); + + /* do the block deallocation which was deferred + until commit is done */ + atom_dset_deferred_apply(atom, apply_dset, NULL, 1); + + assert("zam-504", get_current_super_private() != NULL); + sa_post_write_back_hook(); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/block_alloc.h b/fs/reiser4/block_alloc.h new file mode 100644 index 000000000000..a4e98af51903 --- /dev/null +++ b/fs/reiser4/block_alloc.h @@ -0,0 +1,177 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined(__FS_REISER4_BLOCK_ALLOC_H__) +#define __FS_REISER4_BLOCK_ALLOC_H__ + +#include "dformat.h" +#include "forward.h" + +#include /* for __u?? */ +#include + +/* Mask when is applied to given block number shows is that block number is a + fake one */ +#define REISER4_FAKE_BLOCKNR_BIT_MASK 0x8000000000000000ULL +/* Mask which isolates a type of object this fake block number was assigned + to */ +#define REISER4_BLOCKNR_STATUS_BIT_MASK 0xC000000000000000ULL + +/*result after applying the REISER4_BLOCKNR_STATUS_BIT_MASK should be compared + against these two values to understand is the object unallocated or bitmap + shadow object (WORKING BITMAP block, look at the plugin/space/bitmap.c) */ +#define REISER4_UNALLOCATED_STATUS_VALUE 0xC000000000000000ULL +#define REISER4_BITMAP_BLOCKS_STATUS_VALUE 0x8000000000000000ULL + +/* specification how block allocation was counted in sb block counters */ +typedef enum { + BLOCK_NOT_COUNTED = 0, /* reiser4 has no info about this block yet */ + BLOCK_GRABBED = 1, /* free space grabbed for further allocation + of this block */ + BLOCK_FLUSH_RESERVED = 2, /* block is reserved for flush needs. */ + BLOCK_UNALLOCATED = 3, /* block is used for existing in-memory object + ( unallocated formatted or unformatted + node) */ + BLOCK_ALLOCATED = 4 /* block is mapped to disk, real on-disk block + number assigned */ +} block_stage_t; + +/* a hint for block allocator */ +struct reiser4_blocknr_hint { + /* FIXME: I think we want to add a longterm lock on the bitmap block + here. This is to prevent jnode_flush() calls from interleaving + allocations on the same bitmap, once a hint is established. */ + + /* search start hint */ + reiser4_block_nr blk; + /* if not zero, it is a region size we search for free blocks in */ + reiser4_block_nr max_dist; + /* level for allocation, may be useful have branch-level and higher + write-optimized. */ + tree_level level; + /* block allocator assumes that blocks, which will be mapped to disk, + are in this specified block_stage */ + block_stage_t block_stage; + /* If direction = 1 allocate blocks in backward direction from the end + * of disk to the beginning of disk. */ + unsigned int backward:1; + +}; + +/* These flags control block allocation/deallocation behavior */ +enum reiser4_ba_flags { + /* do allocatations from reserved (5%) area */ + BA_RESERVED = (1 << 0), + + /* block allocator can do commit trying to recover free space */ + BA_CAN_COMMIT = (1 << 1), + + /* if operation will be applied to formatted block */ + BA_FORMATTED = (1 << 2), + + /* defer actual block freeing until transaction commit */ + BA_DEFER = (1 << 3), + + /* allocate blocks for permanent fs objects (formatted or unformatted), + not wandered of log blocks */ + BA_PERMANENT = (1 << 4), + + /* grab space even it was disabled */ + BA_FORCE = (1 << 5), + + /* use default start value for free blocks search. */ + BA_USE_DEFAULT_SEARCH_START = (1 << 6) +}; + +typedef enum reiser4_ba_flags reiser4_ba_flags_t; + +extern void reiser4_blocknr_hint_init(reiser4_blocknr_hint * hint); +extern void reiser4_blocknr_hint_done(reiser4_blocknr_hint * hint); +extern void update_blocknr_hint_default(const struct super_block *, + const reiser4_block_nr *); +extern void get_blocknr_hint_default(reiser4_block_nr *); + +extern reiser4_block_nr reiser4_fs_reserved_space(struct super_block *super); + +int assign_fake_blocknr_formatted(reiser4_block_nr *); +reiser4_block_nr fake_blocknr_unformatted(int); + +/* free -> grabbed -> fake_allocated -> used */ + +int reiser4_grab_space(__u64 count, reiser4_ba_flags_t flags); +void all_grabbed2free(void); +void grabbed2free(reiser4_context * , reiser4_super_info_data * , __u64 count); +void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags); +void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count); +void grabbed2flush_reserved(__u64 count); +int reiser4_alloc_blocks(reiser4_blocknr_hint * hint, + reiser4_block_nr * start, + reiser4_block_nr * len, reiser4_ba_flags_t flags); +int reiser4_dealloc_blocks(const reiser4_block_nr *, + const reiser4_block_nr *, + block_stage_t, reiser4_ba_flags_t flags); + +static inline int reiser4_alloc_block(reiser4_blocknr_hint * hint, + reiser4_block_nr * start, + reiser4_ba_flags_t flags) +{ + reiser4_block_nr one = 1; + return reiser4_alloc_blocks(hint, start, &one, flags); +} + +static inline int reiser4_dealloc_block(const reiser4_block_nr * block, + block_stage_t stage, + reiser4_ba_flags_t flags) +{ + const reiser4_block_nr one = 1; + return reiser4_dealloc_blocks(block, &one, stage, flags); +} + +#define reiser4_grab_space_force(count, flags) \ + reiser4_grab_space(count, flags | BA_FORCE) + +extern void grabbed2free_mark(__u64 mark); +extern int reiser4_grab_reserved(struct super_block *, + __u64, reiser4_ba_flags_t); +extern void reiser4_release_reserved(struct super_block *super); + +/* grabbed -> fake_allocated */ + +/* fake_allocated -> used */ + +/* used -> fake_allocated -> grabbed -> free */ + +extern void flush_reserved2grabbed(txn_atom * atom, __u64 count); + +extern int reiser4_blocknr_is_fake(const reiser4_block_nr * da); + +extern void grabbed2cluster_reserved(int count); +extern void cluster_reserved2grabbed(int count); +extern void cluster_reserved2free(int count); + +extern int reiser4_check_block_counters(const struct super_block *); + + +extern int reiser4_check_blocks(const reiser4_block_nr *start, + const reiser4_block_nr *len, int desired); + +static inline int reiser4_check_block(const reiser4_block_nr *start, + int desired) +{ + return reiser4_check_blocks(start, NULL, desired); +} + +extern int reiser4_pre_commit_hook(void); +extern void reiser4_post_commit_hook(void); +extern void reiser4_post_write_back_hook(void); + +#endif /* __FS_REISER4_BLOCK_ALLOC_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/blocknrlist.c b/fs/reiser4/blocknrlist.c new file mode 100644 index 000000000000..39a4a9bd78ec --- /dev/null +++ b/fs/reiser4/blocknrlist.c @@ -0,0 +1,336 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* This is a block list implementation, used to create ordered block sets + (at the cost of being less memory efficient than blocknr_set). + It is used by discard code. */ + +#include "debug.h" +#include "dformat.h" +#include "txnmgr.h" +#include "context.h" +#include "super.h" + +#include +#include + +static struct kmem_cache *blocknr_list_slab = NULL; + +/** + * Represents an extent range [@start; @end). + */ +struct blocknr_list_entry { + reiser4_block_nr start, len; + struct list_head link; +}; + +#define blocknr_list_entry(ptr) list_entry(ptr, blocknr_list_entry, link) + +static void blocknr_list_entry_init(blocknr_list_entry *entry) +{ + assert("intelfx-11", entry != NULL); + + entry->start = 0; + entry->len = 0; + INIT_LIST_HEAD(&entry->link); +} + +static blocknr_list_entry *blocknr_list_entry_alloc(void) +{ + blocknr_list_entry *entry; + + entry = (blocknr_list_entry *)kmem_cache_alloc(blocknr_list_slab, + reiser4_ctx_gfp_mask_get()); + if (entry == NULL) { + return NULL; + } + + blocknr_list_entry_init(entry); + + return entry; +} + +static void blocknr_list_entry_free(blocknr_list_entry *entry) +{ + assert("intelfx-12", entry != NULL); + + kmem_cache_free(blocknr_list_slab, entry); +} + +/** + * Given ranges @to and [@start; @end), if they overlap, their union + * is calculated and saved in @to. + */ +static int blocknr_list_entry_merge(blocknr_list_entry *to, + reiser4_block_nr start, + reiser4_block_nr len) +{ + reiser4_block_nr end, to_end; + + assert("intelfx-13", to != NULL); + + assert("intelfx-16", to->len > 0); + assert("intelfx-17", len > 0); + + end = start + len; + to_end = to->start + to->len; + + if ((to->start <= end) && (start <= to_end)) { + if (start < to->start) { + to->start = start; + } + + if (end > to_end) { + to_end = end; + } + + to->len = to_end - to->start; + + return 0; + } + + return -1; +} + +static int blocknr_list_entry_merge_entry(blocknr_list_entry *to, + blocknr_list_entry *from) +{ + assert("intelfx-18", from != NULL); + + return blocknr_list_entry_merge(to, from->start, from->len); +} + +/** + * A comparison function for list_sort(). + * + * "The comparison function @cmp must return a negative value if @a + * should sort before @b, and a positive value if @a should sort after + * @b. If @a and @b are equivalent, and their original relative + * ordering is to be preserved, @cmp must return 0." + */ +static int blocknr_list_entry_compare(void* priv UNUSED_ARG, + struct list_head *a, struct list_head *b) +{ + blocknr_list_entry *entry_a, *entry_b; + reiser4_block_nr entry_a_end, entry_b_end; + + assert("intelfx-19", a != NULL); + assert("intelfx-20", b != NULL); + + entry_a = blocknr_list_entry(a); + entry_b = blocknr_list_entry(b); + + entry_a_end = entry_a->start + entry_a->len; + entry_b_end = entry_b->start + entry_b->len; + + /* First sort by starting block numbers... */ + if (entry_a->start < entry_b->start) { + return -1; + } + + if (entry_a->start > entry_b->start) { + return 1; + } + + /** Then by ending block numbers. + * If @a contains @b, it will be sorted before. */ + if (entry_a_end > entry_b_end) { + return -1; + } + + if (entry_a_end < entry_b_end) { + return 1; + } + + return 0; +} + +int blocknr_list_init_static(void) +{ + assert("intelfx-54", blocknr_list_slab == NULL); + + blocknr_list_slab = kmem_cache_create("blocknr_list_entry", + sizeof(blocknr_list_entry), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, + NULL); + if (blocknr_list_slab == NULL) { + return RETERR(-ENOMEM); + } + + return 0; +} + +void blocknr_list_done_static(void) +{ + destroy_reiser4_cache(&blocknr_list_slab); +} + +void blocknr_list_init(struct list_head* blist) +{ + assert("intelfx-24", blist != NULL); + + INIT_LIST_HEAD(blist); +} + +void blocknr_list_destroy(struct list_head* blist) +{ + struct list_head *pos, *tmp; + blocknr_list_entry *entry; + + assert("intelfx-25", blist != NULL); + + list_for_each_safe(pos, tmp, blist) { + entry = blocknr_list_entry(pos); + list_del_init(pos); + blocknr_list_entry_free(entry); + } + + assert("intelfx-48", list_empty(blist)); +} + +void blocknr_list_merge(struct list_head *from, struct list_head *to) +{ + assert("intelfx-26", from != NULL); + assert("intelfx-27", to != NULL); + + list_splice_tail_init(from, to); + + assert("intelfx-49", list_empty(from)); +} + +void blocknr_list_sort_and_join(struct list_head *blist) +{ + struct list_head *pos, *next; + struct blocknr_list_entry *entry, *next_entry; + + assert("intelfx-50", blist != NULL); + + /* Step 1. Sort the extent list. */ + list_sort(NULL, blist, blocknr_list_entry_compare); + + /* Step 2. Join adjacent extents in the list. */ + pos = blist->next; + next = pos->next; + entry = blocknr_list_entry(pos); + + for (; next != blist; next = pos->next) { + /** @next is a valid node at this point */ + next_entry = blocknr_list_entry(next); + + /** try to merge @next into @pos */ + if (!blocknr_list_entry_merge_entry(entry, next_entry)) { + /** successful; delete the @next node. + * next merge will be attempted into the same node. */ + list_del_init(next); + blocknr_list_entry_free(next_entry); + } else { + /** otherwise advance @pos. */ + pos = next; + entry = next_entry; + } + } +} + +int blocknr_list_add_extent(txn_atom *atom, + struct list_head *blist, + blocknr_list_entry **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len) +{ + assert("intelfx-29", atom != NULL); + assert("intelfx-42", atom_is_protected(atom)); + assert("intelfx-43", blist != NULL); + assert("intelfx-30", new_entry != NULL); + assert("intelfx-31", start != NULL); + assert("intelfx-32", len != NULL && *len > 0); + + if (*new_entry == NULL) { + /* + * Optimization: try to merge new extent into the last one. + */ + if (!list_empty(blist)) { + blocknr_list_entry *last_entry; + last_entry = blocknr_list_entry(blist->prev); + if (!blocknr_list_entry_merge(last_entry, *start, *len)) { + return 0; + } + } + + /* + * Otherwise, allocate a new entry and tell -E_REPEAT. + * Next time we'll take the branch below. + */ + spin_unlock_atom(atom); + *new_entry = blocknr_list_entry_alloc(); + return (*new_entry != NULL) ? -E_REPEAT : RETERR(-ENOMEM); + } + + /* + * The entry has been allocated beforehand, fill it and link to the list. + */ + (*new_entry)->start = *start; + (*new_entry)->len = *len; + list_add_tail(&(*new_entry)->link, blist); + + return 0; +} + +int blocknr_list_iterator(txn_atom *atom, + struct list_head *blist, + blocknr_set_actor_f actor, + void *data, + int delete) +{ + struct list_head *pos; + blocknr_list_entry *entry; + int ret = 0; + + assert("intelfx-46", blist != NULL); + assert("intelfx-47", actor != NULL); + + if (delete) { + struct list_head *tmp; + + list_for_each_safe(pos, tmp, blist) { + entry = blocknr_list_entry(pos); + + /* + * Do not exit, delete flag is set. Instead, on the first error we + * downgrade from iterating to just deleting. + */ + if (ret == 0) { + ret = actor(atom, &entry->start, &entry->len, data); + } + + list_del_init(pos); + blocknr_list_entry_free(entry); + } + + assert("intelfx-44", list_empty(blist)); + } else { + list_for_each(pos, blist) { + entry = blocknr_list_entry(pos); + + ret = actor(atom, &entry->start, &entry->len, data); + + if (ret != 0) { + return ret; + } + } + } + + return ret; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/blocknrset.c b/fs/reiser4/blocknrset.c new file mode 100644 index 000000000000..2f18cbc10da3 --- /dev/null +++ b/fs/reiser4/blocknrset.c @@ -0,0 +1,399 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by +reiser4/README */ + +/* This file contains code for various block number sets used by the atom to + track the deleted set and wandered block mappings. */ + +#include "debug.h" +#include "dformat.h" +#include "txnmgr.h" +#include "context.h" +#include "super.h" + +#include + +/* The proposed data structure for storing unordered block number sets is a + list of elements, each of which contains an array of block number or/and + array of block number pairs. That element called blocknr_set_entry is used + to store block numbers from the beginning and for extents from the end of + the data field (char data[...]). The ->nr_blocks and ->nr_pairs fields + count numbers of blocks and extents. + + +------------------- blocknr_set_entry->data ------------------+ + |block1|block2| ... ... |pair3|pair2|pair1| + +------------------------------------------------------------+ + + When current blocknr_set_entry is full, allocate a new one. */ + +/* Usage examples: blocknr sets are used in reiser4 for storing atom's delete + * set (single blocks and block extents), in that case blocknr pair represent an + * extent; atom's wandered map is also stored as a blocknr set, blocknr pairs + * there represent a (real block) -> (wandered block) mapping. */ + +/* Protection: blocknr sets belong to reiser4 atom, and + * their modifications are performed with the atom lock held */ + +/* The total size of a blocknr_set_entry. */ +#define BLOCKNR_SET_ENTRY_SIZE 128 + +/* The number of blocks that can fit the blocknr data area. */ +#define BLOCKNR_SET_ENTRIES_NUMBER \ + ((BLOCKNR_SET_ENTRY_SIZE - \ + 2 * sizeof(unsigned) - \ + sizeof(struct list_head)) / \ + sizeof(reiser4_block_nr)) + +static struct kmem_cache *blocknr_set_slab = NULL; + +/* An entry of the blocknr_set */ +struct blocknr_set_entry { + unsigned nr_singles; + unsigned nr_pairs; + struct list_head link; + reiser4_block_nr entries[BLOCKNR_SET_ENTRIES_NUMBER]; +}; + +/* A pair of blocks as recorded in the blocknr_set_entry data. */ +struct blocknr_pair { + reiser4_block_nr a; + reiser4_block_nr b; +}; + +/* Return the number of blocknr slots available in a blocknr_set_entry. */ +/* Audited by: green(2002.06.11) */ +static unsigned bse_avail(blocknr_set_entry * bse) +{ + unsigned used = bse->nr_singles + 2 * bse->nr_pairs; + + assert("jmacd-5088", BLOCKNR_SET_ENTRIES_NUMBER >= used); + cassert(sizeof(blocknr_set_entry) == BLOCKNR_SET_ENTRY_SIZE); + + return BLOCKNR_SET_ENTRIES_NUMBER - used; +} + +/* Initialize a blocknr_set_entry. */ +static void bse_init(blocknr_set_entry *bse) +{ + bse->nr_singles = 0; + bse->nr_pairs = 0; + INIT_LIST_HEAD(&bse->link); +} + +/* Allocate and initialize a blocknr_set_entry. */ +/* Audited by: green(2002.06.11) */ +static blocknr_set_entry *bse_alloc(void) +{ + blocknr_set_entry *e; + + if ((e = (blocknr_set_entry *) kmem_cache_alloc(blocknr_set_slab, + reiser4_ctx_gfp_mask_get())) == NULL) + return NULL; + + bse_init(e); + + return e; +} + +/* Free a blocknr_set_entry. */ +/* Audited by: green(2002.06.11) */ +static void bse_free(blocknr_set_entry * bse) +{ + kmem_cache_free(blocknr_set_slab, bse); +} + +/* Add a block number to a blocknr_set_entry */ +/* Audited by: green(2002.06.11) */ +static void +bse_put_single(blocknr_set_entry * bse, const reiser4_block_nr * block) +{ + assert("jmacd-5099", bse_avail(bse) >= 1); + + bse->entries[bse->nr_singles++] = *block; +} + +/* Get a pair of block numbers */ +/* Audited by: green(2002.06.11) */ +static inline struct blocknr_pair *bse_get_pair(blocknr_set_entry * bse, + unsigned pno) +{ + assert("green-1", BLOCKNR_SET_ENTRIES_NUMBER >= 2 * (pno + 1)); + + return (struct blocknr_pair *) (bse->entries + + BLOCKNR_SET_ENTRIES_NUMBER - + 2 * (pno + 1)); +} + +/* Add a pair of block numbers to a blocknr_set_entry */ +/* Audited by: green(2002.06.11) */ +static void +bse_put_pair(blocknr_set_entry * bse, const reiser4_block_nr * a, + const reiser4_block_nr * b) +{ + struct blocknr_pair *pair; + + assert("jmacd-5100", bse_avail(bse) >= 2 && a != NULL && b != NULL); + + pair = bse_get_pair(bse, bse->nr_pairs++); + + pair->a = *a; + pair->b = *b; +} + +/* Add either a block or pair of blocks to the block number set. The first + blocknr (@a) must be non-NULL. If @b is NULL a single blocknr is added, if + @b is non-NULL a pair is added. The block number set belongs to atom, and + the call is made with the atom lock held. There may not be enough space in + the current blocknr_set_entry. If new_bsep points to a non-NULL + blocknr_set_entry then it will be added to the blocknr_set and new_bsep + will be set to NULL. If new_bsep contains NULL then the atom lock will be + released and a new bse will be allocated in new_bsep. E_REPEAT will be + returned with the atom unlocked for the operation to be tried again. If + the operation succeeds, 0 is returned. If new_bsep is non-NULL and not + used during the call, it will be freed automatically. */ +static int blocknr_set_add(txn_atom *atom, struct list_head *bset, + blocknr_set_entry **new_bsep, const reiser4_block_nr *a, + const reiser4_block_nr *b) +{ + blocknr_set_entry *bse; + unsigned entries_needed; + + assert("jmacd-5101", a != NULL); + + entries_needed = (b == NULL) ? 1 : 2; + if (list_empty(bset) || + bse_avail(list_entry(bset->next, blocknr_set_entry, link)) < entries_needed) { + /* See if a bse was previously allocated. */ + if (*new_bsep == NULL) { + spin_unlock_atom(atom); + *new_bsep = bse_alloc(); + return (*new_bsep != NULL) ? -E_REPEAT : + RETERR(-ENOMEM); + } + + /* Put it on the head of the list. */ + list_add(&((*new_bsep)->link), bset); + + *new_bsep = NULL; + } + + /* Add the single or pair. */ + bse = list_entry(bset->next, blocknr_set_entry, link); + if (b == NULL) { + bse_put_single(bse, a); + } else { + bse_put_pair(bse, a, b); + } + + /* If new_bsep is non-NULL then there was an allocation race, free this + copy. */ + if (*new_bsep != NULL) { + bse_free(*new_bsep); + *new_bsep = NULL; + } + + return 0; +} + +/* Add an extent to the block set. If the length is 1, it is treated as a + single block (e.g., reiser4_set_add_block). */ +/* Audited by: green(2002.06.11) */ +/* Auditor note: Entire call chain cannot hold any spinlocks, because + kmalloc might schedule. The only exception is atom spinlock, which is + properly freed. */ +int +blocknr_set_add_extent(txn_atom * atom, + struct list_head *bset, + blocknr_set_entry ** new_bsep, + const reiser4_block_nr * start, + const reiser4_block_nr * len) +{ + assert("jmacd-5102", start != NULL && len != NULL && *len > 0); + return blocknr_set_add(atom, bset, new_bsep, start, + *len == 1 ? NULL : len); +} + +/* Add a block pair to the block set. It adds exactly a pair, which is checked + * by an assertion that both arguments are not null.*/ +/* Audited by: green(2002.06.11) */ +/* Auditor note: Entire call chain cannot hold any spinlocks, because + kmalloc might schedule. The only exception is atom spinlock, which is + properly freed. */ +int +blocknr_set_add_pair(txn_atom * atom, + struct list_head *bset, + blocknr_set_entry ** new_bsep, const reiser4_block_nr * a, + const reiser4_block_nr * b) +{ + assert("jmacd-5103", a != NULL && b != NULL); + return blocknr_set_add(atom, bset, new_bsep, a, b); +} + +/* Initialize slab cache of blocknr_set_entry objects. */ +int blocknr_set_init_static(void) +{ + assert("intelfx-55", blocknr_set_slab == NULL); + + blocknr_set_slab = kmem_cache_create("blocknr_set_entry", + sizeof(blocknr_set_entry), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, + NULL); + + if (blocknr_set_slab == NULL) { + return RETERR(-ENOMEM); + } + + return 0; +} + +/* Destroy slab cache of blocknr_set_entry objects. */ +void blocknr_set_done_static(void) +{ + destroy_reiser4_cache(&blocknr_set_slab); +} + +/* Initialize a blocknr_set. */ +void blocknr_set_init(struct list_head *bset) +{ + INIT_LIST_HEAD(bset); +} + +/* Release the entries of a blocknr_set. */ +void blocknr_set_destroy(struct list_head *bset) +{ + blocknr_set_entry *bse; + + while (!list_empty(bset)) { + bse = list_entry(bset->next, blocknr_set_entry, link); + list_del_init(&bse->link); + bse_free(bse); + } +} + +/* Merge blocknr_set entries out of @from into @into. */ +/* Audited by: green(2002.06.11) */ +/* Auditor comments: This merge does not know if merged sets contain + blocks pairs (As for wandered sets) or extents, so it cannot really merge + overlapping ranges if there is some. So I believe it may lead to + some blocks being presented several times in one blocknr_set. To help + debugging such problems it might help to check for duplicate entries on + actual processing of this set. Testing this kind of stuff right here is + also complicated by the fact that these sets are not sorted and going + through whole set on each element addition is going to be CPU-heavy task */ +void blocknr_set_merge(struct list_head *from, struct list_head *into) +{ + blocknr_set_entry *bse_into = NULL; + + /* If @from is empty, no work to perform. */ + if (list_empty(from)) + return; + /* If @into is not empty, try merging partial-entries. */ + if (!list_empty(into)) { + + /* Neither set is empty, pop the front to members and try to + combine them. */ + blocknr_set_entry *bse_from; + unsigned into_avail; + + bse_into = list_entry(into->next, blocknr_set_entry, link); + list_del_init(&bse_into->link); + bse_from = list_entry(from->next, blocknr_set_entry, link); + list_del_init(&bse_from->link); + + /* Combine singles. */ + for (into_avail = bse_avail(bse_into); + into_avail != 0 && bse_from->nr_singles != 0; + into_avail -= 1) { + bse_put_single(bse_into, + &bse_from->entries[--bse_from-> + nr_singles]); + } + + /* Combine pairs. */ + for (; into_avail > 1 && bse_from->nr_pairs != 0; + into_avail -= 2) { + struct blocknr_pair *pair = + bse_get_pair(bse_from, --bse_from->nr_pairs); + bse_put_pair(bse_into, &pair->a, &pair->b); + } + + /* If bse_from is empty, delete it now. */ + if (bse_avail(bse_from) == BLOCKNR_SET_ENTRIES_NUMBER) { + bse_free(bse_from); + } else { + /* Otherwise, bse_into is full or nearly full (e.g., + it could have one slot avail and bse_from has one + pair left). Push it back onto the list. bse_from + becomes bse_into, which will be the new partial. */ + list_add(&bse_into->link, into); + bse_into = bse_from; + } + } + + /* Splice lists together. */ + list_splice_init(from, into->prev); + + /* Add the partial entry back to the head of the list. */ + if (bse_into != NULL) + list_add(&bse_into->link, into); +} + +/* Iterate over all blocknr set elements. */ +int blocknr_set_iterator(txn_atom *atom, struct list_head *bset, + blocknr_set_actor_f actor, void *data, int delete) +{ + + blocknr_set_entry *entry; + + assert("zam-429", atom != NULL); + assert("zam-430", atom_is_protected(atom)); + assert("zam-431", bset != 0); + assert("zam-432", actor != NULL); + + entry = list_entry(bset->next, blocknr_set_entry, link); + while (bset != &entry->link) { + blocknr_set_entry *tmp = list_entry(entry->link.next, blocknr_set_entry, link); + unsigned int i; + int ret; + + for (i = 0; i < entry->nr_singles; i++) { + ret = actor(atom, &entry->entries[i], NULL, data); + + /* We can't break a loop if delete flag is set. */ + if (ret != 0 && !delete) + return ret; + } + + for (i = 0; i < entry->nr_pairs; i++) { + struct blocknr_pair *ab; + + ab = bse_get_pair(entry, i); + + ret = actor(atom, &ab->a, &ab->b, data); + + if (ret != 0 && !delete) + return ret; + } + + if (delete) { + list_del(&entry->link); + bse_free(entry); + } + + entry = tmp; + } + + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/carry.c b/fs/reiser4/carry.c new file mode 100644 index 000000000000..536ab6213642 --- /dev/null +++ b/fs/reiser4/carry.c @@ -0,0 +1,1408 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ +/* Functions to "carry" tree modification(s) upward. */ +/* Tree is modified one level at a time. As we modify a level we accumulate a + set of changes that need to be propagated to the next level. We manage + node locking such that any searches that collide with carrying are + restarted, from the root if necessary. + + Insertion of a new item may result in items being moved among nodes and + this requires the delimiting key to be updated at the least common parent + of the nodes modified to preserve search tree invariants. Also, insertion + may require allocation of a new node. A pointer to the new node has to be + inserted into some node on the parent level, etc. + + Tree carrying is meant to be analogous to arithmetic carrying. + + A carry operation is always associated with some node (&carry_node). + + Carry process starts with some initial set of operations to be performed + and an initial set of already locked nodes. Operations are performed one + by one. Performing each single operation has following possible effects: + + - content of carry node associated with operation is modified + - new carry nodes are locked and involved into carry process on this level + - new carry operations are posted to the next level + + After all carry operations on this level are done, process is repeated for + the accumulated sequence on carry operations for the next level. This + starts by trying to lock (in left to right order) all carry nodes + associated with carry operations on the parent level. After this, we decide + whether more nodes are required on the left of already locked set. If so, + all locks taken on the parent level are released, new carry nodes are + added, and locking process repeats. + + It may happen that balancing process fails owing to unrecoverable error on + some of upper levels of a tree (possible causes are io error, failure to + allocate new node, etc.). In this case we should unmount the filesystem, + rebooting if it is the root, and possibly advise the use of fsck. + + USAGE: + + int some_tree_operation( znode *node, ... ) + { + // Allocate on a stack pool of carry objects: operations and nodes. + // Most carry processes will only take objects from here, without + // dynamic allocation. + +I feel uneasy about this pool. It adds to code complexity, I understand why it +exists, but.... -Hans + + carry_pool pool; + carry_level lowest_level; + carry_op *op; + + init_carry_pool( &pool ); + init_carry_level( &lowest_level, &pool ); + + // operation may be one of: + // COP_INSERT --- insert new item into node + // COP_CUT --- remove part of or whole node + // COP_PASTE --- increase size of item + // COP_DELETE --- delete pointer from parent node + // COP_UPDATE --- update delimiting key in least + // common ancestor of two + + op = reiser4_post_carry( &lowest_level, operation, node, 0 ); + if( IS_ERR( op ) || ( op == NULL ) ) { + handle error + } else { + // fill in remaining fields in @op, according to carry.h:carry_op + result = carry(&lowest_level, NULL); + } + done_carry_pool(&pool); + } + + When you are implementing node plugin method that participates in carry + (shifting, insertion, deletion, etc.), do the following: + + int foo_node_method(znode * node, ..., carry_level * todo) + { + carry_op *op; + + .... + + // note, that last argument to reiser4_post_carry() is non-null + // here, because @op is to be applied to the parent of @node, rather + // than to the @node itself as in the previous case. + + op = node_post_carry(todo, operation, node, 1); + // fill in remaining fields in @op, according to carry.h:carry_op + + .... + + } + + BATCHING: + + One of the main advantages of level-by-level balancing implemented here is + ability to batch updates on a parent level and to peform them more + efficiently as a result. + + Description To Be Done (TBD). + + DIFFICULTIES AND SUBTLE POINTS: + + 1. complex plumbing is required, because: + + a. effective allocation through pools is needed + + b. target of operation is not exactly known when operation is + posted. This is worked around through bitfields in &carry_node and + logic in lock_carry_node() + + c. of interaction with locking code: node should be added into sibling + list when pointer to it is inserted into its parent, which is some time + after node was created. Between these moments, node is somewhat in + suspended state and is only registered in the carry lists + + 2. whole balancing logic is implemented here, in particular, insertion + logic is coded in make_space(). + + 3. special cases like insertion (reiser4_add_tree_root()) or deletion + (reiser4_kill_tree_root()) of tree root and morphing of paste into insert + (insert_paste()) have to be handled. + + 4. there is non-trivial interdependency between allocation of new nodes + and almost everything else. This is mainly due to the (1.c) above. I shall + write about this later. + +*/ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/item/extent.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "znode.h" +#include "tree_mod.h" +#include "tree_walk.h" +#include "block_alloc.h" +#include "pool.h" +#include "tree.h" +#include "carry.h" +#include "carry_ops.h" +#include "super.h" +#include "reiser4.h" + +#include + +/* level locking/unlocking */ +static int lock_carry_level(carry_level * level); +static void unlock_carry_level(carry_level * level, int failure); +static void done_carry_level(carry_level * level); +static void unlock_carry_node(carry_level * level, carry_node * node, int fail); + +int lock_carry_node(carry_level * level, carry_node * node); +int lock_carry_node_tail(carry_node * node); + +/* carry processing proper */ +static int carry_on_level(carry_level * doing, carry_level * todo); + +static carry_op *add_op(carry_level * level, pool_ordering order, + carry_op * reference); + +/* handlers for carry operations. */ + +static void fatal_carry_error(carry_level * doing, int ecode); +static int add_new_root(carry_level * level, carry_node * node, znode * fake); + +static void print_level(const char *prefix, carry_level * level); + +#if REISER4_DEBUG +typedef enum { + CARRY_TODO, + CARRY_DOING +} carry_queue_state; +static int carry_level_invariant(carry_level * level, carry_queue_state state); +#endif + +/* main entry point for tree balancing. + + Tree carry performs operations from @doing and while doing so accumulates + information about operations to be performed on the next level ("carried" + to the parent level). Carried operations are performed, causing possibly + more operations to be carried upward etc. carry() takes care about + locking and pinning znodes while operating on them. + + For usage, see comment at the top of fs/reiser4/carry.c + +*/ +int reiser4_carry(carry_level * doing /* set of carry operations to be + * performed */ , + carry_level * done /* set of nodes, already performed + * at the previous level. + * NULL in most cases */) +{ + int result = 0; + gfp_t old_mask; + /* queue of new requests */ + carry_level *todo; + ON_DEBUG(STORE_COUNTERS); + + assert("nikita-888", doing != NULL); + BUG_ON(done != NULL); + + todo = doing + 1; + init_carry_level(todo, doing->pool); + + /* queue of requests preformed on the previous level */ + done = todo + 1; + init_carry_level(done, doing->pool); + /* + * NOTE: We are not allowed to fail in the loop below. + * Incomplete carry (even if carry_on_level is complete) + * can leave the tree in an inconsistent state (broken + * order of keys in a node, etc). + */ + old_mask = get_current_context()->gfp_mask; + get_current_context()->gfp_mask |= __GFP_NOFAIL; + + /* iterate until there is nothing more to do */ + while (result == 0 && doing->ops_num > 0) { + carry_level *tmp; + + /* at this point @done is locked. */ + /* repeat lock/do/unlock while + + (1) lock_carry_level() fails due to deadlock avoidance, or + + (2) carry_on_level() decides that more nodes have to + be involved. + + (3) some unexpected error occurred while balancing on the + upper levels. In this case all changes are rolled back. + + */ + while (1) { + result = lock_carry_level(doing); + if (result == 0) { + /* perform operations from @doing and + accumulate new requests in @todo */ + result = carry_on_level(doing, todo); + if (result == 0) + break; + else if (result != -E_REPEAT || + !doing->restartable) { + warning("nikita-1043", + "Fatal error during carry: %i", + result); + print_level("done", done); + print_level("doing", doing); + print_level("todo", todo); + /* do some rough stuff like aborting + all pending transcrashes and thus + pushing tree back to the consistent + state. Alternatvely, just panic. + */ + fatal_carry_error(doing, result); + return result; + } + } else if (result != -E_REPEAT) { + fatal_carry_error(doing, result); + return result; + } + unlock_carry_level(doing, 1); + } + /* at this point @done can be safely unlocked */ + done_carry_level(done); + + /* cyclically shift queues */ + tmp = done; + done = doing; + doing = todo; + todo = tmp; + init_carry_level(todo, doing->pool); + + /* give other threads chance to run */ + reiser4_preempt_point(); + } + get_current_context()->gfp_mask = old_mask; + done_carry_level(done); + + /* all counters, but x_refs should remain the same. x_refs can change + owing to transaction manager */ + ON_DEBUG(CHECK_COUNTERS); + return result; +} + +/* perform carry operations on given level. + + Optimizations proposed by pooh: + + (1) don't lock all nodes from queue at the same time. Lock nodes lazily as + required; + + (2) unlock node if there are no more operations to be performed upon it and + node didn't add any operation to @todo. This can be implemented by + attaching to each node two counters: counter of operaions working on this + node and counter and operations carried upward from this node. + +*/ +static int carry_on_level(carry_level * doing /* queue of carry operations to + * do on this level */ , + carry_level * todo /* queue where new carry + * operations to be performed on + * the * parent level are + * accumulated during @doing + * processing. */ ) +{ + int result; + int (*f) (carry_op *, carry_level *, carry_level *); + carry_op *op; + carry_op *tmp_op; + + assert("nikita-1034", doing != NULL); + assert("nikita-1035", todo != NULL); + + /* @doing->nodes are locked. */ + + /* This function can be split into two phases: analysis and modification + + Analysis calculates precisely what items should be moved between + nodes. This information is gathered in some structures attached to + each carry_node in a @doing queue. Analysis also determines whether + new nodes are to be allocated etc. + + After analysis is completed, actual modification is performed. Here + we can take advantage of "batch modification": if there are several + operations acting on the same node, modifications can be performed + more efficiently when batched together. + + Above is an optimization left for the future. + */ + /* Important, but delayed optimization: it's possible to batch + operations together and perform them more efficiently as a + result. For example, deletion of several neighboring items from a + node can be converted to a single ->cut() operation. + + Before processing queue, it should be scanned and "mergeable" + operations merged. + */ + result = 0; + for_all_ops(doing, op, tmp_op) { + carry_opcode opcode; + + assert("nikita-1041", op != NULL); + opcode = op->op; + assert("nikita-1042", op->op < COP_LAST_OP); + f = op_dispatch_table[op->op].handler; + result = f(op, doing, todo); + /* locking can fail with -E_REPEAT. Any different error is fatal + and will be handled by fatal_carry_error() sledgehammer. + */ + if (result != 0) + break; + } + if (result == 0) { + carry_plugin_info info; + carry_node *scan; + carry_node *tmp_scan; + + info.doing = doing; + info.todo = todo; + + assert("nikita-3002", + carry_level_invariant(doing, CARRY_DOING)); + for_all_nodes(doing, scan, tmp_scan) { + znode *node; + + node = reiser4_carry_real(scan); + assert("nikita-2547", node != NULL); + if (node_is_empty(node)) { + result = + node_plugin_by_node(node)-> + prepare_removal(node, &info); + if (result != 0) + break; + } + } + } + return result; +} + +/* post carry operation + + This is main function used by external carry clients: node layout plugins + and tree operations to create new carry operation to be performed on some + level. + + New operation will be included in the @level queue. To actually perform it, + call carry( level, ... ). This function takes write lock on @node. Carry + manages all its locks by itself, don't worry about this. + + This function adds operation and node at the end of the queue. It is up to + caller to guarantee proper ordering of node queue. + +*/ +carry_op * reiser4_post_carry(carry_level * level /* queue where new operation + * is to be posted at */ , + carry_opcode op /* opcode of operation */ , + znode * node /* node on which this operation + * will operate */ , + int apply_to_parent_p /* whether operation will + * operate directly on @node + * or on it parent. */) +{ + carry_op *result; + carry_node *child; + + assert("nikita-1046", level != NULL); + assert("nikita-1788", znode_is_write_locked(node)); + + result = add_op(level, POOLO_LAST, NULL); + if (IS_ERR(result)) + return result; + child = reiser4_add_carry(level, POOLO_LAST, NULL); + if (IS_ERR(child)) { + reiser4_pool_free(&level->pool->op_pool, &result->header); + return (carry_op *) child; + } + result->node = child; + result->op = op; + child->parent = apply_to_parent_p; + if (ZF_ISSET(node, JNODE_ORPHAN)) + child->left_before = 1; + child->node = node; + return result; +} + +/* initialize carry queue */ +void init_carry_level(carry_level * level /* level to initialize */ , + carry_pool * pool /* pool @level will allocate objects + * from */ ) +{ + assert("nikita-1045", level != NULL); + assert("nikita-967", pool != NULL); + + memset(level, 0, sizeof *level); + level->pool = pool; + + INIT_LIST_HEAD(&level->nodes); + INIT_LIST_HEAD(&level->ops); +} + +/* allocate carry pool and initialize pools within queue */ +carry_pool *init_carry_pool(int size) +{ + carry_pool *pool; + + assert("", size >= sizeof(carry_pool) + 3 * sizeof(carry_level)); + pool = kmalloc(size, reiser4_ctx_gfp_mask_get()); + if (pool == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + reiser4_init_pool(&pool->op_pool, sizeof(carry_op), CARRIES_POOL_SIZE, + (char *)pool->op); + reiser4_init_pool(&pool->node_pool, sizeof(carry_node), + NODES_LOCKED_POOL_SIZE, (char *)pool->node); + return pool; +} + +/* finish with queue pools */ +void done_carry_pool(carry_pool * pool/* pool to destroy */) +{ + reiser4_done_pool(&pool->op_pool); + reiser4_done_pool(&pool->node_pool); + kfree(pool); +} + +/* add new carry node to the @level. + + Returns pointer to the new carry node allocated from pool. It's up to + callers to maintain proper order in the @level. Assumption is that if carry + nodes on one level are already sorted and modifications are peroformed from + left to right, carry nodes added on the parent level will be ordered + automatically. To control ordering use @order and @reference parameters. + +*/ +carry_node *reiser4_add_carry_skip(carry_level * level /* &carry_level to add + * node to */ , + pool_ordering order /* where to insert: + * at the beginning of + * @level, + * before @reference, + * after @reference, + * at the end of @level + */ , + carry_node * reference/* reference node for + * insertion */) +{ + ON_DEBUG(carry_node * orig_ref = reference); + + if (order == POOLO_BEFORE) { + reference = find_left_carry(reference, level); + if (reference == NULL) + reference = list_entry(level->nodes.next, carry_node, + header.level_linkage); + else + reference = list_entry(reference->header.level_linkage.next, + carry_node, header.level_linkage); + } else if (order == POOLO_AFTER) { + reference = find_right_carry(reference, level); + if (reference == NULL) + reference = list_entry(level->nodes.prev, carry_node, + header.level_linkage); + else + reference = list_entry(reference->header.level_linkage.prev, + carry_node, header.level_linkage); + } + assert("nikita-2209", + ergo(orig_ref != NULL, + reiser4_carry_real(reference) == + reiser4_carry_real(orig_ref))); + return reiser4_add_carry(level, order, reference); +} + +carry_node *reiser4_add_carry(carry_level * level, /* carry_level to add + node to */ + pool_ordering order, /* where to insert: + * at the beginning of + * @level; + * before @reference; + * after @reference; + * at the end of @level + */ + carry_node * reference /* reference node for + * insertion */) +{ + carry_node *result; + + result = + (carry_node *) reiser4_add_obj(&level->pool->node_pool, + &level->nodes, + order, &reference->header); + if (!IS_ERR(result) && (result != NULL)) + ++level->nodes_num; + return result; +} + +/** + * add new carry operation to the @level. + * + * Returns pointer to the new carry operations allocated from pool. It's up to + * callers to maintain proper order in the @level. To control ordering use + * @order and @reference parameters. + */ +static carry_op *add_op(carry_level * level, /* &carry_level to add node to */ + pool_ordering order, /* where to insert: + * at the beginning of @level; + * before @reference; + * after @reference; + * at the end of @level */ + carry_op * reference /* reference node for insertion */) +{ + carry_op *result; + + result = + (carry_op *) reiser4_add_obj(&level->pool->op_pool, &level->ops, + order, &reference->header); + if (!IS_ERR(result) && (result != NULL)) + ++level->ops_num; + return result; +} + +/** + * Return node on the right of which @node was created. + * + * Each node is created on the right of some existing node (or it is new root, + * which is special case not handled here). + * + * @node is new node created on some level, but not yet inserted into its + * parent, it has corresponding bit (JNODE_ORPHAN) set in zstate. + */ +static carry_node *find_begetting_brother(carry_node * node,/* node to start + search from */ + carry_level * kin UNUSED_ARG + /* level to scan */) +{ + carry_node *scan; + + assert("nikita-1614", node != NULL); + assert("nikita-1615", kin != NULL); + assert("nikita-1616", LOCK_CNT_GTZ(rw_locked_tree)); + assert("nikita-1619", ergo(reiser4_carry_real(node) != NULL, + ZF_ISSET(reiser4_carry_real(node), + JNODE_ORPHAN))); + for (scan = node;; + scan = list_entry(scan->header.level_linkage.prev, carry_node, + header.level_linkage)) { + assert("nikita-1617", &kin->nodes != &scan->header.level_linkage); + if ((scan->node != node->node) && + !ZF_ISSET(scan->node, JNODE_ORPHAN)) { + assert("nikita-1618", reiser4_carry_real(scan) != NULL); + break; + } + } + return scan; +} + +static cmp_t +carry_node_cmp(carry_level * level, carry_node * n1, carry_node * n2) +{ + assert("nikita-2199", n1 != NULL); + assert("nikita-2200", n2 != NULL); + + if (n1 == n2) + return EQUAL_TO; + while (1) { + n1 = carry_node_next(n1); + if (carry_node_end(level, n1)) + return GREATER_THAN; + if (n1 == n2) + return LESS_THAN; + } + impossible("nikita-2201", "End of level reached"); +} + +carry_node *find_carry_node(carry_level * level, const znode * node) +{ + carry_node *scan; + carry_node *tmp_scan; + + assert("nikita-2202", level != NULL); + assert("nikita-2203", node != NULL); + + for_all_nodes(level, scan, tmp_scan) { + if (reiser4_carry_real(scan) == node) + return scan; + } + return NULL; +} + +znode *reiser4_carry_real(const carry_node * node) +{ + assert("nikita-3061", node != NULL); + + return node->lock_handle.node; +} + +carry_node *insert_carry_node(carry_level * doing, carry_level * todo, + const znode * node) +{ + carry_node *base; + carry_node *scan; + carry_node *tmp_scan; + carry_node *proj; + + base = find_carry_node(doing, node); + assert("nikita-2204", base != NULL); + + for_all_nodes(todo, scan, tmp_scan) { + proj = find_carry_node(doing, scan->node); + assert("nikita-2205", proj != NULL); + if (carry_node_cmp(doing, proj, base) != LESS_THAN) + break; + } + return scan; +} + +static carry_node *add_carry_atplace(carry_level * doing, carry_level * todo, + znode * node) +{ + carry_node *reference; + + assert("nikita-2994", doing != NULL); + assert("nikita-2995", todo != NULL); + assert("nikita-2996", node != NULL); + + reference = insert_carry_node(doing, todo, node); + assert("nikita-2997", reference != NULL); + + return reiser4_add_carry(todo, POOLO_BEFORE, reference); +} + +/* like reiser4_post_carry(), but designed to be called from node plugin + methods. This function is different from reiser4_post_carry() in that it + finds proper place to insert node in the queue. */ +carry_op *node_post_carry(carry_plugin_info * info /* carry parameters + * passed down to node + * plugin */ , + carry_opcode op /* opcode of operation */ , + znode * node /* node on which this + * operation will operate */ , + int apply_to_parent_p /* whether operation will + * operate directly on @node + * or on it parent. */ ) +{ + carry_op *result; + carry_node *child; + + assert("nikita-2207", info != NULL); + assert("nikita-2208", info->todo != NULL); + + if (info->doing == NULL) + return reiser4_post_carry(info->todo, op, node, + apply_to_parent_p); + + result = add_op(info->todo, POOLO_LAST, NULL); + if (IS_ERR(result)) + return result; + child = add_carry_atplace(info->doing, info->todo, node); + if (IS_ERR(child)) { + reiser4_pool_free(&info->todo->pool->op_pool, &result->header); + return (carry_op *) child; + } + result->node = child; + result->op = op; + child->parent = apply_to_parent_p; + if (ZF_ISSET(node, JNODE_ORPHAN)) + child->left_before = 1; + child->node = node; + return result; +} + +/* lock all carry nodes in @level */ +static int lock_carry_level(carry_level * level/* level to lock */) +{ + int result; + carry_node *node; + carry_node *tmp_node; + + assert("nikita-881", level != NULL); + assert("nikita-2229", carry_level_invariant(level, CARRY_TODO)); + + /* lock nodes from left to right */ + result = 0; + for_all_nodes(level, node, tmp_node) { + result = lock_carry_node(level, node); + if (result != 0) + break; + } + return result; +} + +/* Synchronize delimiting keys between @node and its left neighbor. + + To reduce contention on dk key and simplify carry code, we synchronize + delimiting keys only when carry ultimately leaves tree level (carrying + changes upward) and unlocks nodes at this level. + + This function first finds left neighbor of @node and then updates left + neighbor's right delimiting key to conincide with least key in @node. + +*/ + +ON_DEBUG(extern atomic_t delim_key_version; + ) + +static void sync_dkeys(znode * spot/* node to update */) +{ + reiser4_key pivot; + reiser4_tree *tree; + + assert("nikita-1610", spot != NULL); + assert("nikita-1612", LOCK_CNT_NIL(rw_locked_dk)); + + tree = znode_get_tree(spot); + read_lock_tree(tree); + write_lock_dk(tree); + + assert("nikita-2192", znode_is_loaded(spot)); + + /* sync left delimiting key of @spot with key in its leftmost item */ + if (node_is_empty(spot)) + pivot = *znode_get_rd_key(spot); + else + leftmost_key_in_node(spot, &pivot); + + znode_set_ld_key(spot, &pivot); + + /* there can be sequence of empty nodes pending removal on the left of + @spot. Scan them and update their left and right delimiting keys to + match left delimiting key of @spot. Also, update right delimiting + key of first non-empty left neighbor. + */ + while (1) { + if (!ZF_ISSET(spot, JNODE_LEFT_CONNECTED)) + break; + + spot = spot->left; + if (spot == NULL) + break; + + znode_set_rd_key(spot, &pivot); + /* don't sink into the domain of another balancing */ + if (!znode_is_write_locked(spot)) + break; + if (ZF_ISSET(spot, JNODE_HEARD_BANSHEE)) + znode_set_ld_key(spot, &pivot); + else + break; + } + + write_unlock_dk(tree); + read_unlock_tree(tree); +} + +/* unlock all carry nodes in @level */ +static void unlock_carry_level(carry_level * level /* level to unlock */ , + int failure /* true if unlocking owing to + * failure */ ) +{ + carry_node *node; + carry_node *tmp_node; + + assert("nikita-889", level != NULL); + + if (!failure) { + znode *spot; + + spot = NULL; + /* update delimiting keys */ + for_all_nodes(level, node, tmp_node) { + if (reiser4_carry_real(node) != spot) { + spot = reiser4_carry_real(node); + sync_dkeys(spot); + } + } + } + + /* nodes can be unlocked in arbitrary order. In preemptible + environment it's better to unlock in reverse order of locking, + though. + */ + for_all_nodes_back(level, node, tmp_node) { + /* all allocated nodes should be already linked to their + parents at this moment. */ + assert("nikita-1631", + ergo(!failure, !ZF_ISSET(reiser4_carry_real(node), + JNODE_ORPHAN))); + ON_DEBUG(check_dkeys(reiser4_carry_real(node))); + unlock_carry_node(level, node, failure); + } + level->new_root = NULL; +} + +/* finish with @level + + Unlock nodes and release all allocated resources */ +static void done_carry_level(carry_level * level/* level to finish */) +{ + carry_node *node; + carry_node *tmp_node; + carry_op *op; + carry_op *tmp_op; + + assert("nikita-1076", level != NULL); + + unlock_carry_level(level, 0); + for_all_nodes(level, node, tmp_node) { + assert("nikita-2113", list_empty_careful(&node->lock_handle.locks_link)); + assert("nikita-2114", list_empty_careful(&node->lock_handle.owners_link)); + reiser4_pool_free(&level->pool->node_pool, &node->header); + } + for_all_ops(level, op, tmp_op) + reiser4_pool_free(&level->pool->op_pool, &op->header); +} + +/* helper function to complete locking of carry node + + Finish locking of carry node. There are several ways in which new carry + node can be added into carry level and locked. Normal is through + lock_carry_node(), but also from find_{left|right}_neighbor(). This + function factors out common final part of all locking scenarios. It + supposes that @node -> lock_handle is lock handle for lock just taken and + fills ->real_node from this lock handle. + +*/ +int lock_carry_node_tail(carry_node * node/* node to complete locking of */) +{ + assert("nikita-1052", node != NULL); + assert("nikita-1187", reiser4_carry_real(node) != NULL); + assert("nikita-1188", !node->unlock); + + node->unlock = 1; + /* Load node content into memory and install node plugin by + looking at the node header. + + Most of the time this call is cheap because the node is + already in memory. + + Corresponding zrelse() is in unlock_carry_node() + */ + return zload(reiser4_carry_real(node)); +} + +/* lock carry node + + "Resolve" node to real znode, lock it and mark as locked. + This requires recursive locking of znodes. + + When operation is posted to the parent level, node it will be applied to is + not yet known. For example, when shifting data between two nodes, + delimiting has to be updated in parent or parents of nodes involved. But + their parents is not yet locked and, moreover said nodes can be reparented + by concurrent balancing. + + To work around this, carry operation is applied to special "carry node" + rather than to the znode itself. Carry node consists of some "base" or + "reference" znode and flags indicating how to get to the target of carry + operation (->real_node field of carry_node) from base. + +*/ +int lock_carry_node(carry_level * level /* level @node is in */ , + carry_node * node/* node to lock */) +{ + int result; + znode *reference_point; + lock_handle lh; + lock_handle tmp_lh; + reiser4_tree *tree; + + assert("nikita-887", level != NULL); + assert("nikita-882", node != NULL); + + result = 0; + reference_point = node->node; + init_lh(&lh); + init_lh(&tmp_lh); + if (node->left_before) { + /* handling of new nodes, allocated on the previous level: + + some carry ops were propably posted from the new node, but + this node neither has parent pointer set, nor is + connected. This will be done in ->create_hook() for + internal item. + + No then less, parent of new node has to be locked. To do + this, first go to the "left" in the carry order. This + depends on the decision to always allocate new node on the + right of existing one. + + Loop handles case when multiple nodes, all orphans, were + inserted. + + Strictly speaking, taking tree lock is not necessary here, + because all nodes scanned by loop in + find_begetting_brother() are write-locked by this thread, + and thus, their sibling linkage cannot change. + + */ + tree = znode_get_tree(reference_point); + read_lock_tree(tree); + reference_point = find_begetting_brother(node, level)->node; + read_unlock_tree(tree); + assert("nikita-1186", reference_point != NULL); + } + if (node->parent && (result == 0)) { + result = + reiser4_get_parent(&tmp_lh, reference_point, + ZNODE_WRITE_LOCK); + if (result != 0) { + ; /* nothing */ + } else if (znode_get_level(tmp_lh.node) == 0) { + assert("nikita-1347", znode_above_root(tmp_lh.node)); + result = add_new_root(level, node, tmp_lh.node); + if (result == 0) { + reference_point = level->new_root; + move_lh(&lh, &node->lock_handle); + } + } else if ((level->new_root != NULL) + && (level->new_root != + znode_parent_nolock(reference_point))) { + /* parent of node exists, but this level aready + created different new root, so */ + warning("nikita-1109", + /* it should be "radicis", but tradition is + tradition. do banshees read latin? */ + "hodie natus est radici frater"); + result = -EIO; + } else { + move_lh(&lh, &tmp_lh); + reference_point = lh.node; + } + } + if (node->left && (result == 0)) { + assert("nikita-1183", node->parent); + assert("nikita-883", reference_point != NULL); + result = + reiser4_get_left_neighbor(&tmp_lh, reference_point, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result == 0) { + done_lh(&lh); + move_lh(&lh, &tmp_lh); + reference_point = lh.node; + } + } + if (!node->parent && !node->left && !node->left_before) { + result = + longterm_lock_znode(&lh, reference_point, ZNODE_WRITE_LOCK, + ZNODE_LOCK_HIPRI); + } + if (result == 0) { + move_lh(&node->lock_handle, &lh); + result = lock_carry_node_tail(node); + } + done_lh(&tmp_lh); + done_lh(&lh); + return result; +} + +/* release a lock on &carry_node. + + Release if necessary lock on @node. This opearion is pair of + lock_carry_node() and is idempotent: you can call it more than once on the + same node. + +*/ +static void +unlock_carry_node(carry_level * level, + carry_node * node /* node to be released */ , + int failure /* 0 if node is unlocked due + * to some error */ ) +{ + znode *real_node; + + assert("nikita-884", node != NULL); + + real_node = reiser4_carry_real(node); + /* pair to zload() in lock_carry_node_tail() */ + zrelse(real_node); + if (node->unlock && (real_node != NULL)) { + assert("nikita-899", real_node == node->lock_handle.node); + longterm_unlock_znode(&node->lock_handle); + } + if (failure) { + if (node->deallocate && (real_node != NULL)) { + /* free node in bitmap + + Prepare node for removal. Last zput() will finish + with it. + */ + ZF_SET(real_node, JNODE_HEARD_BANSHEE); + } + if (node->free) { + assert("nikita-2177", + list_empty_careful(&node->lock_handle.locks_link)); + assert("nikita-2112", + list_empty_careful(&node->lock_handle.owners_link)); + reiser4_pool_free(&level->pool->node_pool, + &node->header); + } + } +} + +/* fatal_carry_error() - all-catching error handling function + + It is possible that carry faces unrecoverable error, like unability to + insert pointer at the internal level. Our simple solution is just panic in + this situation. More sophisticated things like attempt to remount + file-system as read-only can be implemented without much difficlties. + + It is believed, that: + + 1. in stead of panicking, all current transactions can be aborted rolling + system back to the consistent state. + +Umm, if you simply panic without doing anything more at all, then all current +transactions are aborted and the system is rolled back to a consistent state, +by virtue of the design of the transactional mechanism. Well, wait, let's be +precise. If an internal node is corrupted on disk due to hardware failure, +then there may be no consistent state that can be rolled back to, so instead +we should say that it will rollback the transactions, which barring other +factors means rolling back to a consistent state. + +# Nikita: there is a subtle difference between panic and aborting +# transactions: machine doesn't reboot. Processes aren't killed. Processes +# don't using reiser4 (not that we care about such processes), or using other +# reiser4 mounts (about them we do care) will simply continue to run. With +# some luck, even application using aborted file system can survive: it will +# get some error, like EBADF, from each file descriptor on failed file system, +# but applications that do care about tolerance will cope with this (squid +# will). + +It would be a nice feature though to support rollback without rebooting +followed by remount, but this can wait for later versions. + + 2. once isolated transactions will be implemented it will be possible to + roll back offending transaction. + +2. is additional code complexity of inconsistent value (it implies that a +broken tree should be kept in operation), so we must think about it more +before deciding if it should be done. -Hans + +*/ +static void fatal_carry_error(carry_level * doing UNUSED_ARG /* carry level + * where + * unrecoverable + * error + * occurred */ , + int ecode/* error code */) +{ + assert("nikita-1230", doing != NULL); + assert("nikita-1231", ecode < 0); + + reiser4_panic("nikita-1232", "Carry failed: %i", ecode); +} + +/** + * Add new root to the tree + * + * This function itself only manages changes in carry structures and delegates + * all hard work (allocation of znode for new root, changes of parent and + * sibling pointers) to the reiser4_add_tree_root(). + * + * Locking: old tree root is locked by carry at this point. Fake znode is also + * locked. + */ +static int add_new_root(carry_level * level,/* carry level in context of which + * operation is performed */ + carry_node * node, /* carry node for existing root */ + znode * fake /* "fake" znode already locked by + * us */) +{ + int result; + + assert("nikita-1104", level != NULL); + assert("nikita-1105", node != NULL); + + assert("nikita-1403", znode_is_write_locked(node->node)); + assert("nikita-1404", znode_is_write_locked(fake)); + + /* trying to create new root. */ + /* @node is root and it's already locked by us. This + means that nobody else can be trying to add/remove + tree root right now. + */ + if (level->new_root == NULL) + level->new_root = reiser4_add_tree_root(node->node, fake); + if (!IS_ERR(level->new_root)) { + assert("nikita-1210", znode_is_root(level->new_root)); + node->deallocate = 1; + result = + longterm_lock_znode(&node->lock_handle, level->new_root, + ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI); + if (result == 0) + zput(level->new_root); + } else { + result = PTR_ERR(level->new_root); + level->new_root = NULL; + } + return result; +} + +/* allocate new znode and add the operation that inserts the + pointer to it into the parent node into the todo level + + Allocate new znode, add it into carry queue and post into @todo queue + request to add pointer to new node into its parent. + + This is carry related routing that calls reiser4_new_node() to allocate new + node. +*/ +carry_node *add_new_znode(znode * brother /* existing left neighbor of new + * node */ , + carry_node * ref /* carry node after which new + * carry node is to be inserted + * into queue. This affects + * locking. */ , + carry_level * doing /* carry queue where new node is + * to be added */ , + carry_level * todo /* carry queue where COP_INSERT + * operation to add pointer to + * new node will ne added */ ) +{ + carry_node *fresh; + znode *new_znode; + carry_op *add_pointer; + carry_plugin_info info; + + assert("nikita-1048", brother != NULL); + assert("nikita-1049", todo != NULL); + + /* There is a lot of possible variations here: to what parent + new node will be attached and where. For simplicity, always + do the following: + + (1) new node and @brother will have the same parent. + + (2) new node is added on the right of @brother + + */ + + fresh = reiser4_add_carry_skip(doing, + ref ? POOLO_AFTER : POOLO_LAST, ref); + if (IS_ERR(fresh)) + return fresh; + + fresh->deallocate = 1; + fresh->free = 1; + + new_znode = reiser4_new_node(brother, znode_get_level(brother)); + if (IS_ERR(new_znode)) + /* @fresh will be deallocated automatically by error + handling code in the caller. */ + return (carry_node *) new_znode; + + /* new_znode returned znode with x_count 1. Caller has to decrease + it. make_space() does. */ + + ZF_SET(new_znode, JNODE_ORPHAN); + fresh->node = new_znode; + + while (ZF_ISSET(reiser4_carry_real(ref), JNODE_ORPHAN)) { + ref = carry_node_prev(ref); + assert("nikita-1606", !carry_node_end(doing, ref)); + } + + info.todo = todo; + info.doing = doing; + add_pointer = node_post_carry(&info, COP_INSERT, + reiser4_carry_real(ref), 1); + if (IS_ERR(add_pointer)) { + /* no need to deallocate @new_znode here: it will be + deallocated during carry error handling. */ + return (carry_node *) add_pointer; + } + + add_pointer->u.insert.type = COPT_CHILD; + add_pointer->u.insert.child = fresh; + add_pointer->u.insert.brother = brother; + /* initially new node spawns empty key range */ + write_lock_dk(znode_get_tree(brother)); + znode_set_ld_key(new_znode, + znode_set_rd_key(new_znode, + znode_get_rd_key(brother))); + write_unlock_dk(znode_get_tree(brother)); + return fresh; +} + +/* DEBUGGING FUNCTIONS. + + Probably we also should leave them on even when + debugging is turned off to print dumps at errors. +*/ +#if REISER4_DEBUG +static int carry_level_invariant(carry_level * level, carry_queue_state state) +{ + carry_node *node; + carry_node *tmp_node; + + if (level == NULL) + return 0; + + if (level->track_type != 0 && + level->track_type != CARRY_TRACK_NODE && + level->track_type != CARRY_TRACK_CHANGE) + return 0; + + /* check that nodes are in ascending order */ + for_all_nodes(level, node, tmp_node) { + znode *left; + znode *right; + + reiser4_key lkey; + reiser4_key rkey; + + if (node != carry_node_front(level)) { + if (state == CARRY_TODO) { + right = node->node; + left = carry_node_prev(node)->node; + } else { + right = reiser4_carry_real(node); + left = reiser4_carry_real(carry_node_prev(node)); + } + if (right == NULL || left == NULL) + continue; + if (node_is_empty(right) || node_is_empty(left)) + continue; + if (!keyle(leftmost_key_in_node(left, &lkey), + leftmost_key_in_node(right, &rkey))) { + warning("", "wrong key order"); + return 0; + } + } + } + return 1; +} +#endif + +/* get symbolic name for boolean */ +static const char *tf(int boolean/* truth value */) +{ + return boolean ? "t" : "f"; +} + +/* symbolic name for carry operation */ +static const char *carry_op_name(carry_opcode op/* carry opcode */) +{ + switch (op) { + case COP_INSERT: + return "COP_INSERT"; + case COP_DELETE: + return "COP_DELETE"; + case COP_CUT: + return "COP_CUT"; + case COP_PASTE: + return "COP_PASTE"; + case COP_UPDATE: + return "COP_UPDATE"; + case COP_EXTENT: + return "COP_EXTENT"; + case COP_INSERT_FLOW: + return "COP_INSERT_FLOW"; + default:{ + /* not mt safe, but who cares? */ + static char buf[20]; + + sprintf(buf, "unknown op: %x", op); + return buf; + } + } +} + +/* dump information about carry node */ +static void print_carry(const char *prefix /* prefix to print */ , + carry_node * node/* node to print */) +{ + if (node == NULL) { + printk("%s: null\n", prefix); + return; + } + printk + ("%s: %p parent: %s, left: %s, unlock: %s, free: %s, dealloc: %s\n", + prefix, node, tf(node->parent), tf(node->left), tf(node->unlock), + tf(node->free), tf(node->deallocate)); +} + +/* dump information about carry operation */ +static void print_op(const char *prefix /* prefix to print */ , + carry_op * op/* operation to print */) +{ + if (op == NULL) { + printk("%s: null\n", prefix); + return; + } + printk("%s: %p carry_opcode: %s\n", prefix, op, carry_op_name(op->op)); + print_carry("\tnode", op->node); + switch (op->op) { + case COP_INSERT: + case COP_PASTE: + print_coord("\tcoord", + op->u.insert.d ? op->u.insert.d->coord : NULL, 0); + reiser4_print_key("\tkey", + op->u.insert.d ? op->u.insert.d->key : NULL); + print_carry("\tchild", op->u.insert.child); + break; + case COP_DELETE: + print_carry("\tchild", op->u.delete.child); + break; + case COP_CUT: + if (op->u.cut_or_kill.is_cut) { + print_coord("\tfrom", + op->u.cut_or_kill.u.kill->params.from, 0); + print_coord("\tto", op->u.cut_or_kill.u.kill->params.to, + 0); + } else { + print_coord("\tfrom", + op->u.cut_or_kill.u.cut->params.from, 0); + print_coord("\tto", op->u.cut_or_kill.u.cut->params.to, + 0); + } + break; + case COP_UPDATE: + print_carry("\tleft", op->u.update.left); + break; + default: + /* do nothing */ + break; + } +} + +/* dump information about all nodes and operations in a @level */ +static void print_level(const char *prefix /* prefix to print */ , + carry_level * level/* level to print */) +{ + carry_node *node; + carry_node *tmp_node; + carry_op *op; + carry_op *tmp_op; + + if (level == NULL) { + printk("%s: null\n", prefix); + return; + } + printk("%s: %p, restartable: %s\n", + prefix, level, tf(level->restartable)); + + for_all_nodes(level, node, tmp_node) + print_carry("\tcarry node", node); + for_all_ops(level, op, tmp_op) + print_op("\tcarry op", op); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/carry.h b/fs/reiser4/carry.h new file mode 100644 index 000000000000..d1f5b608442b --- /dev/null +++ b/fs/reiser4/carry.h @@ -0,0 +1,445 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Functions and data types to "carry" tree modification(s) upward. + See fs/reiser4/carry.c for details. */ + +#if !defined(__FS_REISER4_CARRY_H__) +#define __FS_REISER4_CARRY_H__ + +#include "forward.h" +#include "debug.h" +#include "pool.h" +#include "znode.h" + +#include + +/* &carry_node - "location" of carry node. + + "location" of node that is involved or going to be involved into + carry process. Node where operation will be carried to on the + parent level cannot be recorded explicitly. Operation will be carried + usually to the parent of some node (where changes are performed at + the current level) or, to the left neighbor of its parent. But while + modifications are performed at the current level, parent may + change. So, we have to allow some indirection (or, positevly, + flexibility) in locating carry nodes. + +*/ +typedef struct carry_node { + /* pool linkage */ + struct reiser4_pool_header header; + + /* base node from which real_node is calculated. See + fs/reiser4/carry.c:lock_carry_node(). */ + znode *node; + + /* how to get ->real_node */ + /* to get ->real_node obtain parent of ->node */ + __u32 parent:1; + /* to get ->real_node obtain left neighbor of parent of + ->node */ + __u32 left:1; + __u32 left_before:1; + + /* locking */ + + /* this node was locked by carry process and should be + unlocked when carry leaves a level */ + __u32 unlock:1; + + /* disk block for this node was allocated by carry process and + should be deallocated when carry leaves a level */ + __u32 deallocate:1; + /* this carry node was allocated by carry process and should be + freed when carry leaves a level */ + __u32 free:1; + + /* type of lock we want to take on this node */ + lock_handle lock_handle; +} carry_node; + +/* &carry_opcode - elementary operations that can be carried upward + + Operations that carry() can handle. This list is supposed to be + expanded. + + Each carry operation (cop) is handled by appropriate function defined + in fs/reiser4/carry.c. For example COP_INSERT is handled by + fs/reiser4/carry.c:carry_insert() etc. These functions in turn + call plugins of nodes affected by operation to modify nodes' content + and to gather operations to be performed on the next level. + +*/ +typedef enum { + /* insert new item into node. */ + COP_INSERT, + /* delete pointer from parent node */ + COP_DELETE, + /* remove part of or whole node. */ + COP_CUT, + /* increase size of item. */ + COP_PASTE, + /* insert extent (that is sequence of unformatted nodes). */ + COP_EXTENT, + /* update delimiting key in least common ancestor of two + nodes. This is performed when items are moved between two + nodes. + */ + COP_UPDATE, + /* insert flow */ + COP_INSERT_FLOW, + COP_LAST_OP, +} carry_opcode; + +#define CARRY_FLOW_NEW_NODES_LIMIT 20 + +/* mode (or subtype) of COP_{INSERT|PASTE} operation. Specifies how target + item is determined. */ +typedef enum { + /* target item is one containing pointer to the ->child node */ + COPT_CHILD, + /* target item is given explicitly by @coord */ + COPT_ITEM_DATA, + /* target item is given by key */ + COPT_KEY, + /* see insert_paste_common() for more comments on this. */ + COPT_PASTE_RESTARTED, +} cop_insert_pos_type; + +/* flags to cut and delete */ +typedef enum { + /* don't kill node even if it became completely empty as results of + * cut. This is needed for eottl handling. See carry_extent() for + * details. */ + DELETE_RETAIN_EMPTY = (1 << 0) +} cop_delete_flag; + +/* + * carry() implements "lock handle tracking" feature. + * + * Callers supply carry with node where to perform initial operation and lock + * handle on this node. Trying to optimize node utilization carry may actually + * move insertion point to different node. Callers expect that lock handle + * will rebe transferred to the new node also. + * + */ +typedef enum { + /* transfer lock handle along with insertion point */ + CARRY_TRACK_CHANGE = 1, + /* acquire new lock handle to the node where insertion point is. This + * is used when carry() client doesn't initially possess lock handle + * on the insertion point node, for example, by extent insertion + * code. See carry_extent(). */ + CARRY_TRACK_NODE = 2 +} carry_track_type; + +/* data supplied to COP_{INSERT|PASTE} by callers */ +typedef struct carry_insert_data { + /* position where new item is to be inserted */ + coord_t *coord; + /* new item description */ + reiser4_item_data * data; + /* key of new item */ + const reiser4_key * key; +} carry_insert_data; + +/* cut and kill are similar, so carry_cut_data and carry_kill_data share the + below structure of parameters */ +struct cut_kill_params { + /* coord where cut starts (inclusive) */ + coord_t *from; + /* coord where cut stops (inclusive, this item/unit will also be + * cut) */ + coord_t *to; + /* starting key. This is necessary when item and unit pos don't + * uniquely identify what portion or tree to remove. For example, this + * indicates what portion of extent unit will be affected. */ + const reiser4_key * from_key; + /* exclusive stop key */ + const reiser4_key * to_key; + /* if this is not NULL, smallest actually removed key is stored + * here. */ + reiser4_key *smallest_removed; + /* kill_node_content() is called for file truncate */ + int truncate; +}; + +struct carry_cut_data { + struct cut_kill_params params; +}; + +struct carry_kill_data { + struct cut_kill_params params; + /* parameter to be passed to the ->kill_hook() method of item + * plugin */ + /*void *iplug_params; *//* FIXME: unused currently */ + /* if not NULL---inode whose items are being removed. This is needed + * for ->kill_hook() of extent item to update VM structures when + * removing pages. */ + struct inode *inode; + /* sibling list maintenance is complicated by existence of eottl. When + * eottl whose left and right neighbors are formatted leaves is + * removed, one has to connect said leaves in the sibling list. This + * cannot be done when extent removal is just started as locking rules + * require sibling list update to happen atomically with removal of + * extent item. Therefore: 1. pointers to left and right neighbors + * have to be passed down to the ->kill_hook() of extent item, and + * 2. said neighbors have to be locked. */ + lock_handle *left; + lock_handle *right; + /* flags modifying behavior of kill. Currently, it may have + DELETE_RETAIN_EMPTY set. */ + unsigned flags; + char *buf; +}; + +/* &carry_tree_op - operation to "carry" upward. + + Description of an operation we want to "carry" to the upper level of + a tree: e.g, when we insert something and there is not enough space + we allocate a new node and "carry" the operation of inserting a + pointer to the new node to the upper level, on removal of empty node, + we carry up operation of removing appropriate entry from parent. + + There are two types of carry ops: when adding or deleting node we + node at the parent level where appropriate modification has to be + performed is known in advance. When shifting items between nodes + (split, merge), delimiting key should be changed in the least common + parent of the nodes involved that is not known in advance. + + For the operations of the first type we store in &carry_op pointer to + the &carry_node at the parent level. For the operation of the second + type we store &carry_node or parents of the left and right nodes + modified and keep track of them upward until they coincide. + +*/ +typedef struct carry_op { + /* pool linkage */ + struct reiser4_pool_header header; + carry_opcode op; + /* node on which operation is to be performed: + + for insert, paste: node where new item is to be inserted + + for delete: node where pointer is to be deleted + + for cut: node to cut from + + for update: node where delimiting key is to be modified + + for modify: parent of modified node + + */ + carry_node *node; + union { + struct { + /* (sub-)type of insertion/paste. Taken from + cop_insert_pos_type. */ + __u8 type; + /* various operation flags. Taken from + cop_insert_flag. */ + __u8 flags; + carry_insert_data *d; + carry_node *child; + znode *brother; + } insert, paste, extent; + + struct { + int is_cut; + union { + carry_kill_data *kill; + carry_cut_data *cut; + } u; + } cut_or_kill; + + struct { + carry_node *left; + } update; + struct { + /* changed child */ + carry_node *child; + /* bitmask of changes. See &cop_modify_flag */ + __u32 flag; + } modify; + struct { + /* flags to deletion operation. Are taken from + cop_delete_flag */ + __u32 flags; + /* child to delete from parent. If this is + NULL, delete op->node. */ + carry_node *child; + } delete; + struct { + /* various operation flags. Taken from + cop_insert_flag. */ + __u32 flags; + flow_t *flow; + coord_t *insert_point; + reiser4_item_data *data; + /* flow insertion is limited by number of new blocks + added in that operation which do not get any data + but part of flow. This limit is set by macro + CARRY_FLOW_NEW_NODES_LIMIT. This field stores number + of nodes added already during one carry_flow */ + int new_nodes; + } insert_flow; + } u; +} carry_op; + +/* &carry_op_pool - preallocated pool of carry operations, and nodes */ +typedef struct carry_pool { + carry_op op[CARRIES_POOL_SIZE]; + struct reiser4_pool op_pool; + carry_node node[NODES_LOCKED_POOL_SIZE]; + struct reiser4_pool node_pool; +} carry_pool; + +/* &carry_tree_level - carry process on given level + + Description of balancing process on the given level. + + No need for locking here, as carry_tree_level is essentially per + thread thing (for now). + +*/ +struct carry_level { + /* this level may be restarted */ + __u32 restartable:1; + /* list of carry nodes on this level, ordered by key order */ + struct list_head nodes; + struct list_head ops; + /* pool where new objects are allocated from */ + carry_pool *pool; + int ops_num; + int nodes_num; + /* new root created on this level, if any */ + znode *new_root; + /* This is set by caller (insert_by_key(), rreiser4_esize_item(), etc.) + when they want ->tracked to automagically wander to the node where + insertion point moved after insert or paste. + */ + carry_track_type track_type; + /* lock handle supplied by user that we are tracking. See + above. */ + lock_handle *tracked; +}; + +/* information carry passes to plugin methods that may add new operations to + the @todo queue */ +struct carry_plugin_info { + carry_level *doing; + carry_level *todo; +}; + +int reiser4_carry(carry_level * doing, carry_level * done); + +carry_node *reiser4_add_carry(carry_level * level, pool_ordering order, + carry_node * reference); +carry_node *reiser4_add_carry_skip(carry_level * level, pool_ordering order, + carry_node * reference); + +extern carry_node *insert_carry_node(carry_level * doing, + carry_level * todo, const znode * node); + +extern carry_pool *init_carry_pool(int); +extern void done_carry_pool(carry_pool * pool); + +extern void init_carry_level(carry_level * level, carry_pool * pool); + +extern carry_op *reiser4_post_carry(carry_level * level, carry_opcode op, + znode * node, int apply_to_parent); +extern carry_op *node_post_carry(carry_plugin_info * info, carry_opcode op, + znode * node, int apply_to_parent_p); + +carry_node *add_new_znode(znode * brother, carry_node * reference, + carry_level * doing, carry_level * todo); + +carry_node *find_carry_node(carry_level * level, const znode * node); + +extern znode *reiser4_carry_real(const carry_node * node); + +/* helper macros to iterate over carry queues */ + +#define carry_node_next(node) \ + list_entry((node)->header.level_linkage.next, carry_node, \ + header.level_linkage) + +#define carry_node_prev(node) \ + list_entry((node)->header.level_linkage.prev, carry_node, \ + header.level_linkage) + +#define carry_node_front(level) \ + list_entry((level)->nodes.next, carry_node, header.level_linkage) + +#define carry_node_back(level) \ + list_entry((level)->nodes.prev, carry_node, header.level_linkage) + +#define carry_node_end(level, node) \ + (&(level)->nodes == &(node)->header.level_linkage) + +/* macro to iterate over all operations in a @level */ +#define for_all_ops(level /* carry level (of type carry_level *) */, \ + op /* pointer to carry operation, modified by loop (of \ + * type carry_op *) */, \ + tmp /* pointer to carry operation (of type carry_op *), \ + * used to make iterator stable in the face of \ + * deletions from the level */ ) \ +for (op = list_entry(level->ops.next, carry_op, header.level_linkage), \ + tmp = list_entry(op->header.level_linkage.next, carry_op, header.level_linkage); \ + &op->header.level_linkage != &level->ops; \ + op = tmp, \ + tmp = list_entry(op->header.level_linkage.next, carry_op, header.level_linkage)) + +#if 0 +for (op = (carry_op *) pool_level_list_front(&level->ops), \ + tmp = (carry_op *) pool_level_list_next(&op->header) ; \ + !pool_level_list_end(&level->ops, &op->header) ; \ + op = tmp, tmp = (carry_op *) pool_level_list_next(&op->header)) +#endif + +/* macro to iterate over all nodes in a @level */ \ +#define for_all_nodes(level /* carry level (of type carry_level *) */, \ + node /* pointer to carry node, modified by loop (of \ + * type carry_node *) */, \ + tmp /* pointer to carry node (of type carry_node *), \ + * used to make iterator stable in the face of * \ + * deletions from the level */ ) \ +for (node = list_entry(level->nodes.next, carry_node, header.level_linkage), \ + tmp = list_entry(node->header.level_linkage.next, carry_node, header.level_linkage); \ + &node->header.level_linkage != &level->nodes; \ + node = tmp, \ + tmp = list_entry(node->header.level_linkage.next, carry_node, header.level_linkage)) + +#if 0 +for (node = carry_node_front(level), \ + tmp = carry_node_next(node) ; !carry_node_end(level, node) ; \ + node = tmp, tmp = carry_node_next(node)) +#endif + +/* macro to iterate over all nodes in a @level in reverse order + + This is used, because nodes are unlocked in reversed order of locking */ +#define for_all_nodes_back(level /* carry level (of type carry_level *) */, \ + node /* pointer to carry node, modified by loop \ + * (of type carry_node *) */, \ + tmp /* pointer to carry node (of type carry_node \ + * *), used to make iterator stable in the \ + * face of deletions from the level */ ) \ +for (node = carry_node_back(level), \ + tmp = carry_node_prev(node) ; !carry_node_end(level, node) ; \ + node = tmp, tmp = carry_node_prev(node)) + +/* __FS_REISER4_CARRY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/carry_ops.c b/fs/reiser4/carry_ops.c new file mode 100644 index 000000000000..9871da4464f1 --- /dev/null +++ b/fs/reiser4/carry_ops.c @@ -0,0 +1,2136 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* implementation of carry operations */ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "pool.h" +#include "tree_mod.h" +#include "carry.h" +#include "carry_ops.h" +#include "tree.h" +#include "super.h" +#include "reiser4.h" + +#include +#include + +static int carry_shift_data(sideof side, coord_t *insert_coord, znode * node, + carry_level * doing, carry_level * todo, + unsigned int including_insert_coord_p); + +extern int lock_carry_node(carry_level * level, carry_node * node); +extern int lock_carry_node_tail(carry_node * node); + +/* find left neighbor of a carry node + + Look for left neighbor of @node and add it to the @doing queue. See + comments in the body. + +*/ +static carry_node *find_left_neighbor(carry_op * op /* node to find left + * neighbor of */ , + carry_level * doing/* level to scan */) +{ + int result; + carry_node *node; + carry_node *left; + int flags; + reiser4_tree *tree; + + node = op->node; + + tree = current_tree; + read_lock_tree(tree); + /* first, check whether left neighbor is already in a @doing queue */ + if (reiser4_carry_real(node)->left != NULL) { + /* NOTE: there is locking subtlety here. Look into + * find_right_neighbor() for more info */ + if (find_carry_node(doing, + reiser4_carry_real(node)->left) != NULL) { + read_unlock_tree(tree); + left = node; + do { + left = list_entry(left->header.level_linkage.prev, + carry_node, header.level_linkage); + assert("nikita-3408", !carry_node_end(doing, + left)); + } while (reiser4_carry_real(left) == + reiser4_carry_real(node)); + return left; + } + } + read_unlock_tree(tree); + + left = reiser4_add_carry_skip(doing, POOLO_BEFORE, node); + if (IS_ERR(left)) + return left; + + left->node = node->node; + left->free = 1; + + flags = GN_TRY_LOCK; + if (!(op->u.insert.flags & COPI_LOAD_LEFT)) + flags |= GN_NO_ALLOC; + + /* then, feeling lucky, peek left neighbor in the cache. */ + result = reiser4_get_left_neighbor(&left->lock_handle, + reiser4_carry_real(node), + ZNODE_WRITE_LOCK, flags); + if (result == 0) { + /* ok, node found and locked. */ + result = lock_carry_node_tail(left); + if (result != 0) + left = ERR_PTR(result); + } else if (result == -E_NO_NEIGHBOR || result == -ENOENT) { + /* node is leftmost node in a tree, or neighbor wasn't in + cache, or there is an extent on the left. */ + reiser4_pool_free(&doing->pool->node_pool, &left->header); + left = NULL; + } else if (doing->restartable) { + /* if left neighbor is locked, and level is restartable, add + new node to @doing and restart. */ + assert("nikita-913", node->parent != 0); + assert("nikita-914", node->node != NULL); + left->left = 1; + left->free = 0; + left = ERR_PTR(-E_REPEAT); + } else { + /* left neighbor is locked, level cannot be restarted. Just + ignore left neighbor. */ + reiser4_pool_free(&doing->pool->node_pool, &left->header); + left = NULL; + } + return left; +} + +/* find right neighbor of a carry node + + Look for right neighbor of @node and add it to the @doing queue. See + comments in the body. + +*/ +static carry_node *find_right_neighbor(carry_op * op /* node to find right + * neighbor of */ , + carry_level * doing/* level to scan */) +{ + int result; + carry_node *node; + carry_node *right; + lock_handle lh; + int flags; + reiser4_tree *tree; + + init_lh(&lh); + + node = op->node; + + tree = current_tree; + read_lock_tree(tree); + /* first, check whether right neighbor is already in a @doing queue */ + if (reiser4_carry_real(node)->right != NULL) { + /* + * Tree lock is taken here anyway, because, even if _outcome_ + * of (find_carry_node() != NULL) doesn't depends on + * concurrent updates to ->right, find_carry_node() cannot + * work with second argument NULL. Hence, following comment is + * of historic importance only. + * + * Subtle: + * + * Q: why don't we need tree lock here, looking for the right + * neighbor? + * + * A: even if value of node->real_node->right were changed + * during find_carry_node() execution, outcome of execution + * wouldn't change, because (in short) other thread cannot add + * elements to the @doing, and if node->real_node->right + * already was in @doing, value of node->real_node->right + * couldn't change, because node cannot be inserted between + * locked neighbors. + */ + if (find_carry_node(doing, + reiser4_carry_real(node)->right) != NULL) { + read_unlock_tree(tree); + /* + * What we are doing here (this is also applicable to + * the find_left_neighbor()). + * + * tree_walk.c code requires that insertion of a + * pointer to a child, modification of parent pointer + * in the child, and insertion of the child into + * sibling list are atomic (see + * plugin/item/internal.c:create_hook_internal()). + * + * carry allocates new node long before pointer to it + * is inserted into parent and, actually, long before + * parent is even known. Such allocated-but-orphaned + * nodes are only trackable through carry level lists. + * + * Situation that is handled here is following: @node + * has valid ->right pointer, but there is + * allocated-but-orphaned node in the carry queue that + * is logically between @node and @node->right. Here + * we are searching for it. Critical point is that + * this is only possible if @node->right is also in + * the carry queue (this is checked above), because + * this is the only way new orphaned node could be + * inserted between them (before inserting new node, + * make_space() first tries to shift to the right, so, + * right neighbor will be locked and queued). + * + */ + right = node; + do { + right = list_entry(right->header.level_linkage.next, + carry_node, header.level_linkage); + assert("nikita-3408", !carry_node_end(doing, + right)); + } while (reiser4_carry_real(right) == + reiser4_carry_real(node)); + return right; + } + } + read_unlock_tree(tree); + + flags = GN_CAN_USE_UPPER_LEVELS; + if (!(op->u.insert.flags & COPI_LOAD_RIGHT)) + flags = GN_NO_ALLOC; + + /* then, try to lock right neighbor */ + init_lh(&lh); + result = reiser4_get_right_neighbor(&lh, + reiser4_carry_real(node), + ZNODE_WRITE_LOCK, flags); + if (result == 0) { + /* ok, node found and locked. */ + right = reiser4_add_carry_skip(doing, POOLO_AFTER, node); + if (!IS_ERR(right)) { + right->node = lh.node; + move_lh(&right->lock_handle, &lh); + right->free = 1; + result = lock_carry_node_tail(right); + if (result != 0) + right = ERR_PTR(result); + } + } else if ((result == -E_NO_NEIGHBOR) || (result == -ENOENT)) { + /* node is rightmost node in a tree, or neighbor wasn't in + cache, or there is an extent on the right. */ + right = NULL; + } else + right = ERR_PTR(result); + done_lh(&lh); + return right; +} + +/* how much free space in a @node is needed for @op + + How much space in @node is required for completion of @op, where @op is + insert or paste operation. +*/ +static unsigned int space_needed_for_op(znode * node /* znode data are + * inserted or + * pasted in */ , + carry_op * op /* carry + operation */ ) +{ + assert("nikita-919", op != NULL); + + switch (op->op) { + default: + impossible("nikita-1701", "Wrong opcode"); + case COP_INSERT: + return space_needed(node, NULL, op->u.insert.d->data, 1); + case COP_PASTE: + return space_needed(node, op->u.insert.d->coord, + op->u.insert.d->data, 0); + } +} + +/* how much space in @node is required to insert or paste @data at + @coord. */ +unsigned int space_needed(const znode * node /* node data are inserted or + * pasted in */ , + const coord_t *coord /* coord where data are + * inserted or pasted + * at */ , + const reiser4_item_data * data /* data to insert or + * paste */ , + int insertion/* non-0 is inserting, 0---paste */) +{ + int result; + item_plugin *iplug; + + assert("nikita-917", node != NULL); + assert("nikita-918", node_plugin_by_node(node) != NULL); + assert("vs-230", !insertion || (coord == NULL)); + + result = 0; + iplug = data->iplug; + if (iplug->b.estimate != NULL) { + /* ask item plugin how much space is needed to insert this + item */ + result += iplug->b.estimate(insertion ? NULL : coord, data); + } else { + /* reasonable default */ + result += data->length; + } + if (insertion) { + node_plugin *nplug; + + nplug = node->nplug; + /* and add node overhead */ + if (nplug->item_overhead != NULL) + result += nplug->item_overhead(node, NULL); + } + return result; +} + +/* find &coord in parent where pointer to new child is to be stored. */ +static int find_new_child_coord(carry_op * op /* COP_INSERT carry operation to + * insert pointer to new + * child */ ) +{ + int result; + znode *node; + znode *child; + + assert("nikita-941", op != NULL); + assert("nikita-942", op->op == COP_INSERT); + + node = reiser4_carry_real(op->node); + assert("nikita-943", node != NULL); + assert("nikita-944", node_plugin_by_node(node) != NULL); + + child = reiser4_carry_real(op->u.insert.child); + result = + find_new_child_ptr(node, child, op->u.insert.brother, + op->u.insert.d->coord); + + build_child_ptr_data(child, op->u.insert.d->data); + return result; +} + +/* additional amount of free space in @node required to complete @op */ +static int free_space_shortage(znode * node /* node to check */ , + carry_op * op/* operation being performed */) +{ + assert("nikita-1061", node != NULL); + assert("nikita-1062", op != NULL); + + switch (op->op) { + default: + impossible("nikita-1702", "Wrong opcode"); + case COP_INSERT: + case COP_PASTE: + return space_needed_for_op(node, op) - znode_free_space(node); + case COP_EXTENT: + /* when inserting extent shift data around until insertion + point is utmost in the node. */ + if (coord_wrt(op->u.insert.d->coord) == COORD_INSIDE) + return +1; + else + return -1; + } +} + +/* helper function: update node pointer in operation after insertion + point was probably shifted into @target. */ +static znode *sync_op(carry_op * op, carry_node * target) +{ + znode *insertion_node; + + /* reget node from coord: shift might move insertion coord to + the neighbor */ + insertion_node = op->u.insert.d->coord->node; + /* if insertion point was actually moved into new node, + update carry node pointer in operation. */ + if (insertion_node != reiser4_carry_real(op->node)) { + op->node = target; + assert("nikita-2540", + reiser4_carry_real(target) == insertion_node); + } + assert("nikita-2541", + reiser4_carry_real(op->node) == op->u.insert.d->coord->node); + return insertion_node; +} + +/* + * complete make_space() call: update tracked lock handle if necessary. See + * comments for fs/reiser4/carry.h:carry_track_type + */ +static int +make_space_tail(carry_op * op, carry_level * doing, znode * orig_node) +{ + int result; + carry_track_type tracking; + znode *node; + + tracking = doing->track_type; + node = op->u.insert.d->coord->node; + + if (tracking == CARRY_TRACK_NODE || + (tracking == CARRY_TRACK_CHANGE && node != orig_node)) { + /* inserting or pasting into node different from + original. Update lock handle supplied by caller. */ + assert("nikita-1417", doing->tracked != NULL); + done_lh(doing->tracked); + init_lh(doing->tracked); + result = longterm_lock_znode(doing->tracked, node, + ZNODE_WRITE_LOCK, + ZNODE_LOCK_HIPRI); + } else + result = 0; + return result; +} + +/* This is insertion policy function. It shifts data to the left and right + neighbors of insertion coord and allocates new nodes until there is enough + free space to complete @op. + + See comments in the body. + + Assumes that the node format favors insertions at the right end of the node + as node40 does. + + See carry_flow() on detail about flow insertion +*/ +static int make_space(carry_op * op /* carry operation, insert or paste */ , + carry_level * doing /* current carry queue */ , + carry_level * todo/* carry queue on the parent level */) +{ + znode *node; + int result; + int not_enough_space; + int blk_alloc; + znode *orig_node; + __u32 flags; + + coord_t *coord; + + assert("nikita-890", op != NULL); + assert("nikita-891", todo != NULL); + assert("nikita-892", + op->op == COP_INSERT || + op->op == COP_PASTE || op->op == COP_EXTENT); + assert("nikita-1607", + reiser4_carry_real(op->node) == op->u.insert.d->coord->node); + + flags = op->u.insert.flags; + + /* NOTE check that new node can only be allocated after checking left + * and right neighbors. This is necessary for proper work of + * find_{left,right}_neighbor(). */ + assert("nikita-3410", ergo(flags & COPI_DONT_ALLOCATE, + flags & COPI_DONT_SHIFT_LEFT)); + assert("nikita-3411", ergo(flags & COPI_DONT_ALLOCATE, + flags & COPI_DONT_SHIFT_RIGHT)); + + coord = op->u.insert.d->coord; + orig_node = node = coord->node; + + assert("nikita-908", node != NULL); + assert("nikita-909", node_plugin_by_node(node) != NULL); + + result = 0; + /* If there is not enough space in a node, try to shift something to + the left neighbor. This is a bit tricky, as locking to the left is + low priority. This is handled by restart logic in carry(). + */ + not_enough_space = free_space_shortage(node, op); + if (not_enough_space <= 0) + /* it is possible that carry was called when there actually + was enough space in the node. For example, when inserting + leftmost item so that delimiting keys have to be updated. + */ + return make_space_tail(op, doing, orig_node); + if (!(flags & COPI_DONT_SHIFT_LEFT)) { + carry_node *left; + /* make note in statistics of an attempt to move + something into the left neighbor */ + left = find_left_neighbor(op, doing); + if (unlikely(IS_ERR(left))) { + if (PTR_ERR(left) == -E_REPEAT) + return -E_REPEAT; + else { + /* some error other than restart request + occurred. This shouldn't happen. Issue a + warning and continue as if left neighbor + weren't existing. + */ + warning("nikita-924", + "Error accessing left neighbor: %li", + PTR_ERR(left)); + } + } else if (left != NULL) { + + /* shift everything possible on the left of and + including insertion coord into the left neighbor */ + result = carry_shift_data(LEFT_SIDE, coord, + reiser4_carry_real(left), + doing, todo, + flags & COPI_GO_LEFT); + + /* reget node from coord: shift_left() might move + insertion coord to the left neighbor */ + node = sync_op(op, left); + + not_enough_space = free_space_shortage(node, op); + /* There is not enough free space in @node, but + may be, there is enough free space in + @left. Various balancing decisions are valid here. + The same for the shifiting to the right. + */ + } + } + /* If there still is not enough space, shift to the right */ + if (not_enough_space > 0 && !(flags & COPI_DONT_SHIFT_RIGHT)) { + carry_node *right; + + right = find_right_neighbor(op, doing); + if (IS_ERR(right)) { + warning("nikita-1065", + "Error accessing right neighbor: %li", + PTR_ERR(right)); + } else if (right != NULL) { + /* node containing insertion point, and its right + neighbor node are write locked by now. + + shift everything possible on the right of but + excluding insertion coord into the right neighbor + */ + result = carry_shift_data(RIGHT_SIDE, coord, + reiser4_carry_real(right), + doing, todo, + flags & COPI_GO_RIGHT); + /* reget node from coord: shift_right() might move + insertion coord to the right neighbor */ + node = sync_op(op, right); + not_enough_space = free_space_shortage(node, op); + } + } + /* If there is still not enough space, allocate new node(s). + + We try to allocate new blocks if COPI_DONT_ALLOCATE is not set in + the carry operation flags (currently this is needed during flush + only). + */ + for (blk_alloc = 0; + not_enough_space > 0 && result == 0 && blk_alloc < 2 && + !(flags & COPI_DONT_ALLOCATE); ++blk_alloc) { + carry_node *fresh; /* new node we are allocating */ + coord_t coord_shadow; /* remembered insertion point before + * shifting data into new node */ + carry_node *node_shadow; /* remembered insertion node + * before shifting */ + unsigned int gointo; /* whether insertion point should move + * into newly allocated node */ + + /* allocate new node on the right of @node. Znode and disk + fake block number for new node are allocated. + + add_new_znode() posts carry operation COP_INSERT with + COPT_CHILD option to the parent level to add + pointer to newly created node to its parent. + + Subtle point: if several new nodes are required to complete + insertion operation at this level, they will be inserted + into their parents in the order of creation, which means + that @node will be valid "cookie" at the time of insertion. + + */ + fresh = add_new_znode(node, op->node, doing, todo); + if (IS_ERR(fresh)) + return PTR_ERR(fresh); + + /* Try to shift into new node. */ + result = lock_carry_node(doing, fresh); + zput(reiser4_carry_real(fresh)); + if (result != 0) { + warning("nikita-947", + "Cannot lock new node: %i", result); + return result; + } + + /* both nodes are write locked by now. + + shift everything possible on the right of and + including insertion coord into the right neighbor. + */ + coord_dup(&coord_shadow, op->u.insert.d->coord); + node_shadow = op->node; + /* move insertion point into newly created node if: + + . insertion point is rightmost in the source node, or + . this is not the first node we are allocating in a row. + */ + gointo = + (blk_alloc > 0) || + coord_is_after_rightmost(op->u.insert.d->coord); + + if (gointo && + op->op == COP_PASTE && + coord_is_existing_item(op->u.insert.d->coord) && + is_solid_item((item_plugin_by_coord(op->u.insert.d->coord)))) { + /* paste into solid (atomic) item, which can contain + only one unit, so we need to shift it right, where + insertion point supposed to be */ + + assert("edward-1444", op->u.insert.d->data->iplug == + item_plugin_by_id(STATIC_STAT_DATA_ID)); + assert("edward-1445", + op->u.insert.d->data->length > + node_plugin_by_node(coord->node)->free_space + (coord->node)); + + op->u.insert.d->coord->between = BEFORE_UNIT; + } + + result = carry_shift_data(RIGHT_SIDE, coord, + reiser4_carry_real(fresh), + doing, todo, gointo); + /* if insertion point was actually moved into new node, + update carry node pointer in operation. */ + node = sync_op(op, fresh); + not_enough_space = free_space_shortage(node, op); + if ((not_enough_space > 0) && (node != coord_shadow.node)) { + /* there is not enough free in new node. Shift + insertion point back to the @shadow_node so that + next new node would be inserted between + @shadow_node and @fresh. + */ + coord_normalize(&coord_shadow); + coord_dup(coord, &coord_shadow); + node = coord->node; + op->node = node_shadow; + if (1 || (flags & COPI_STEP_BACK)) { + /* still not enough space?! Maybe there is + enough space in the source node (i.e., node + data are moved from) now. + */ + not_enough_space = + free_space_shortage(node, op); + } + } + } + if (not_enough_space > 0) { + if (!(flags & COPI_DONT_ALLOCATE)) + warning("nikita-948", "Cannot insert new item"); + result = -E_NODE_FULL; + } + assert("nikita-1622", ergo(result == 0, + reiser4_carry_real(op->node) == coord->node)); + assert("nikita-2616", coord == op->u.insert.d->coord); + if (result == 0) + result = make_space_tail(op, doing, orig_node); + return result; +} + +/* insert_paste_common() - common part of insert and paste operations + + This function performs common part of COP_INSERT and COP_PASTE. + + There are two ways in which insertion/paste can be requested: + + . by directly supplying reiser4_item_data. In this case, op -> + u.insert.type is set to COPT_ITEM_DATA. + + . by supplying child pointer to which is to inserted into parent. In this + case op -> u.insert.type == COPT_CHILD. + + . by supplying key of new item/unit. This is currently only used during + extent insertion + + This is required, because when new node is allocated we don't know at what + position pointer to it is to be stored in the parent. Actually, we don't + even know what its parent will be, because parent can be re-balanced + concurrently and new node re-parented, and because parent can be full and + pointer to the new node will go into some other node. + + insert_paste_common() resolves pointer to child node into position in the + parent by calling find_new_child_coord(), that fills + reiser4_item_data. After this, insertion/paste proceeds uniformly. + + Another complication is with finding free space during pasting. It may + happen that while shifting items to the neighbors and newly allocated + nodes, insertion coord can no longer be in the item we wanted to paste + into. At this point, paste becomes (morphs) into insert. Moreover free + space analysis has to be repeated, because amount of space required for + insertion is different from that of paste (item header overhead, etc). + + This function "unifies" different insertion modes (by resolving child + pointer or key into insertion coord), and then calls make_space() to free + enough space in the node by shifting data to the left and right and by + allocating new nodes if necessary. Carry operation knows amount of space + required for its completion. After enough free space is obtained, caller of + this function (carry_{insert,paste,etc.}) performs actual insertion/paste + by calling item plugin method. + +*/ +static int insert_paste_common(carry_op * op /* carry operation being + * performed */ , + carry_level * doing /* current carry level */ , + carry_level * todo /* next carry level */ , + carry_insert_data * cdata /* pointer to + * cdata */ , + coord_t *coord /* insertion/paste coord */ , + reiser4_item_data * data /* data to be + * inserted/pasted */ ) +{ + assert("nikita-981", op != NULL); + assert("nikita-980", todo != NULL); + assert("nikita-979", (op->op == COP_INSERT) || (op->op == COP_PASTE) + || (op->op == COP_EXTENT)); + + if (op->u.insert.type == COPT_PASTE_RESTARTED) { + /* nothing to do. Fall through to make_space(). */ + ; + } else if (op->u.insert.type == COPT_KEY) { + node_search_result intra_node; + znode *node; + /* Problem with doing batching at the lowest level, is that + operations here are given by coords where modification is + to be performed, and one modification can invalidate coords + of all following operations. + + So, we are implementing yet another type for operation that + will use (the only) "locator" stable across shifting of + data between nodes, etc.: key (COPT_KEY). + + This clause resolves key to the coord in the node. + + But node can change also. Probably some pieces have to be + added to the lock_carry_node(), to lock node by its key. + + */ + /* NOTE-NIKITA Lookup bias is fixed to FIND_EXACT. Complain + if you need something else. */ + op->u.insert.d->coord = coord; + node = reiser4_carry_real(op->node); + intra_node = node_plugin_by_node(node)->lookup + (node, op->u.insert.d->key, FIND_EXACT, + op->u.insert.d->coord); + if ((intra_node != NS_FOUND) && (intra_node != NS_NOT_FOUND)) { + warning("nikita-1715", "Intra node lookup failure: %i", + intra_node); + return intra_node; + } + } else if (op->u.insert.type == COPT_CHILD) { + /* if we are asked to insert pointer to the child into + internal node, first convert pointer to the child into + coord within parent node. + */ + znode *child; + int result; + + op->u.insert.d = cdata; + op->u.insert.d->coord = coord; + op->u.insert.d->data = data; + op->u.insert.d->coord->node = reiser4_carry_real(op->node); + result = find_new_child_coord(op); + child = reiser4_carry_real(op->u.insert.child); + if (result != NS_NOT_FOUND) { + warning("nikita-993", + "Cannot find a place for child pointer: %i", + result); + return result; + } + /* This only happens when we did multiple insertions at + the previous level, trying to insert single item and + it so happened, that insertion of pointers to all new + nodes before this one already caused parent node to + split (may be several times). + + I am going to come up with better solution. + + You are not expected to understand this. + -- v6root/usr/sys/ken/slp.c + + Basically, what happens here is the following: carry came + to the parent level and is about to insert internal item + pointing to the child node that it just inserted in the + level below. Position where internal item is to be inserted + was found by find_new_child_coord() above, but node of the + current carry operation (that is, parent node of child + inserted on the previous level), was determined earlier in + the lock_carry_level/lock_carry_node. It could so happen + that other carry operations already performed on the parent + level already split parent node, so that insertion point + moved into another node. Handle this by creating new carry + node for insertion point if necessary. + */ + if (reiser4_carry_real(op->node) != + op->u.insert.d->coord->node) { + pool_ordering direction; + znode *z1; + znode *z2; + reiser4_key k1; + reiser4_key k2; + + /* + * determine in what direction insertion point + * moved. Do this by comparing delimiting keys. + */ + z1 = op->u.insert.d->coord->node; + z2 = reiser4_carry_real(op->node); + if (keyle(leftmost_key_in_node(z1, &k1), + leftmost_key_in_node(z2, &k2))) + /* insertion point moved to the left */ + direction = POOLO_BEFORE; + else + /* insertion point moved to the right */ + direction = POOLO_AFTER; + + op->node = reiser4_add_carry_skip(doing, + direction, op->node); + if (IS_ERR(op->node)) + return PTR_ERR(op->node); + op->node->node = op->u.insert.d->coord->node; + op->node->free = 1; + result = lock_carry_node(doing, op->node); + if (result != 0) + return result; + } + + /* + * set up key of an item being inserted: we are inserting + * internal item and its key is (by the very definition of + * search tree) is leftmost key in the child node. + */ + write_lock_dk(znode_get_tree(child)); + op->u.insert.d->key = leftmost_key_in_node(child, + znode_get_ld_key(child)); + write_unlock_dk(znode_get_tree(child)); + op->u.insert.d->data->arg = op->u.insert.brother; + } else { + assert("vs-243", op->u.insert.d->coord != NULL); + op->u.insert.d->coord->node = reiser4_carry_real(op->node); + } + + /* find free space. */ + return make_space(op, doing, todo); +} + +/* handle carry COP_INSERT operation. + + Insert new item into node. New item can be given in one of two ways: + + - by passing &tree_coord and &reiser4_item_data as part of @op. This is + only applicable at the leaf/twig level. + + - by passing a child node pointer to which is to be inserted by this + operation. + +*/ +static int carry_insert(carry_op * op /* operation to perform */ , + carry_level * doing /* queue of operations @op + * is part of */ , + carry_level * todo /* queue where new operations + * are accumulated */ ) +{ + znode *node; + carry_insert_data cdata; + coord_t coord; + reiser4_item_data data; + carry_plugin_info info; + int result; + + assert("nikita-1036", op != NULL); + assert("nikita-1037", todo != NULL); + assert("nikita-1038", op->op == COP_INSERT); + + coord_init_zero(&coord); + + /* perform common functionality of insert and paste. */ + result = insert_paste_common(op, doing, todo, &cdata, &coord, &data); + if (result != 0) + return result; + + node = op->u.insert.d->coord->node; + assert("nikita-1039", node != NULL); + assert("nikita-1040", node_plugin_by_node(node) != NULL); + + assert("nikita-949", + space_needed_for_op(node, op) <= znode_free_space(node)); + + /* ask node layout to create new item. */ + info.doing = doing; + info.todo = todo; + result = node_plugin_by_node(node)->create_item + (op->u.insert.d->coord, op->u.insert.d->key, op->u.insert.d->data, + &info); + doing->restartable = 0; + znode_make_dirty(node); + + return result; +} + +/* + * Flow insertion code. COP_INSERT_FLOW is special tree operation that is + * supplied with a "flow" (that is, a stream of data) and inserts it into tree + * by slicing into multiple items. + */ + +#define flow_insert_point(op) ((op)->u.insert_flow.insert_point) +#define flow_insert_flow(op) ((op)->u.insert_flow.flow) +#define flow_insert_data(op) ((op)->u.insert_flow.data) + +static size_t item_data_overhead(carry_op * op) +{ + if (flow_insert_data(op)->iplug->b.estimate == NULL) + return 0; + return (flow_insert_data(op)->iplug->b. + estimate(NULL /* estimate insertion */ , flow_insert_data(op)) - + flow_insert_data(op)->length); +} + +/* FIXME-VS: this is called several times during one make_flow_for_insertion + and it will always return the same result. Some optimization could be made + by calculating this value once at the beginning and passing it around. That + would reduce some flexibility in future changes +*/ +static int can_paste(coord_t *, const reiser4_key *, const reiser4_item_data *); +static size_t flow_insertion_overhead(carry_op * op) +{ + znode *node; + size_t insertion_overhead; + + node = flow_insert_point(op)->node; + insertion_overhead = 0; + if (node->nplug->item_overhead && + !can_paste(flow_insert_point(op), &flow_insert_flow(op)->key, + flow_insert_data(op))) + insertion_overhead = + node->nplug->item_overhead(node, NULL) + + item_data_overhead(op); + return insertion_overhead; +} + +/* how many bytes of flow does fit to the node */ +static int what_can_fit_into_node(carry_op * op) +{ + size_t free, overhead; + + overhead = flow_insertion_overhead(op); + free = znode_free_space(flow_insert_point(op)->node); + if (free <= overhead) + return 0; + free -= overhead; + /* FIXME: flow->length is loff_t only to not get overflowed in case of + expandign truncate */ + if (free < op->u.insert_flow.flow->length) + return free; + return (int)op->u.insert_flow.flow->length; +} + +/* in make_space_for_flow_insertion we need to check either whether whole flow + fits into a node or whether minimal fraction of flow fits into a node */ +static int enough_space_for_whole_flow(carry_op * op) +{ + return (unsigned)what_can_fit_into_node(op) == + op->u.insert_flow.flow->length; +} + +#define MIN_FLOW_FRACTION 1 +static int enough_space_for_min_flow_fraction(carry_op * op) +{ + //assert("vs-902", coord_is_after_rightmost(flow_insert_point(op))); + + return what_can_fit_into_node(op) >= MIN_FLOW_FRACTION; +} + +/* this returns 0 if left neighbor was obtained successfully and everything + upto insertion point including it were shifted and left neighbor still has + some free space to put minimal fraction of flow into it */ +static int +make_space_by_shift_left(carry_op * op, carry_level * doing, carry_level * todo) +{ + carry_node *left; + znode *orig; + + left = find_left_neighbor(op, doing); + if (unlikely(IS_ERR(left))) { + warning("vs-899", + "make_space_by_shift_left: " + "error accessing left neighbor: %li", PTR_ERR(left)); + return 1; + } + if (left == NULL) + /* left neighbor either does not exist or is unformatted + node */ + return 1; + + orig = flow_insert_point(op)->node; + /* try to shift content of node @orig from its head upto insert point + including insertion point into the left neighbor */ + carry_shift_data(LEFT_SIDE, flow_insert_point(op), + reiser4_carry_real(left), doing, todo, + 1/* including insert point */); + if (reiser4_carry_real(left) != flow_insert_point(op)->node) { + /* insertion point did not move */ + return 1; + } + + /* insertion point is set after last item in the node */ + assert("vs-900", coord_is_after_rightmost(flow_insert_point(op))); + + if (!enough_space_for_min_flow_fraction(op)) { + /* insertion point node does not have enough free space to put + even minimal portion of flow into it, therefore, move + insertion point back to orig node (before first item) */ + coord_init_before_first_item(flow_insert_point(op), orig); + return 1; + } + + /* part of flow is to be written to the end of node */ + op->node = left; + return 0; +} + +/* this returns 0 if right neighbor was obtained successfully and everything to + the right of insertion point was shifted to it and node got enough free + space to put minimal fraction of flow into it */ +static int +make_space_by_shift_right(carry_op * op, carry_level * doing, + carry_level * todo) +{ + carry_node *right; + + right = find_right_neighbor(op, doing); + if (unlikely(IS_ERR(right))) { + warning("nikita-1065", "shift_right_excluding_insert_point: " + "error accessing right neighbor: %li", PTR_ERR(right)); + return 1; + } + if (right) { + /* shift everything possible on the right of but excluding + insertion coord into the right neighbor */ + carry_shift_data(RIGHT_SIDE, flow_insert_point(op), + reiser4_carry_real(right), doing, todo, + 0/* not including insert point */); + } else { + /* right neighbor either does not exist or is unformatted + node */ + ; + } + if (coord_is_after_rightmost(flow_insert_point(op))) { + if (enough_space_for_min_flow_fraction(op)) { + /* part of flow is to be written to the end of node */ + return 0; + } + } + + /* new node is to be added if insert point node did not get enough + space for whole flow */ + return 1; +} + +/* this returns 0 when insert coord is set at the node end and fraction of flow + fits into that node */ +static int +make_space_by_new_nodes(carry_op * op, carry_level * doing, carry_level * todo) +{ + int result; + znode *node; + carry_node *new; + + node = flow_insert_point(op)->node; + + if (op->u.insert_flow.new_nodes == CARRY_FLOW_NEW_NODES_LIMIT) + return RETERR(-E_NODE_FULL); + /* add new node after insert point node */ + new = add_new_znode(node, op->node, doing, todo); + if (unlikely(IS_ERR(new))) + return PTR_ERR(new); + result = lock_carry_node(doing, new); + zput(reiser4_carry_real(new)); + if (unlikely(result)) + return result; + op->u.insert_flow.new_nodes++; + if (!coord_is_after_rightmost(flow_insert_point(op))) { + carry_shift_data(RIGHT_SIDE, flow_insert_point(op), + reiser4_carry_real(new), doing, todo, + 0/* not including insert point */); + assert("vs-901", + coord_is_after_rightmost(flow_insert_point(op))); + + if (enough_space_for_min_flow_fraction(op)) + return 0; + if (op->u.insert_flow.new_nodes == CARRY_FLOW_NEW_NODES_LIMIT) + return RETERR(-E_NODE_FULL); + + /* add one more new node */ + new = add_new_znode(node, op->node, doing, todo); + if (unlikely(IS_ERR(new))) + return PTR_ERR(new); + result = lock_carry_node(doing, new); + zput(reiser4_carry_real(new)); + if (unlikely(result)) + return result; + op->u.insert_flow.new_nodes++; + } + + /* move insertion point to new node */ + coord_init_before_first_item(flow_insert_point(op), + reiser4_carry_real(new)); + op->node = new; + return 0; +} + +static int +make_space_for_flow_insertion(carry_op * op, carry_level * doing, + carry_level * todo) +{ + __u32 flags = op->u.insert_flow.flags; + + if (enough_space_for_whole_flow(op)) { + /* whole flow fits into insert point node */ + return 0; + } + if ((flags & COPI_SWEEP) && + enough_space_for_min_flow_fraction(op)) + /* use the rest of space in the current node */ + return 0; + + if (!(flags & COPI_DONT_SHIFT_LEFT) + && (make_space_by_shift_left(op, doing, todo) == 0)) { + /* insert point is shifted to left neighbor of original insert + point node and is set after last unit in that node. It has + enough space to fit at least minimal fraction of flow. */ + return 0; + } + + if (enough_space_for_whole_flow(op)) { + /* whole flow fits into insert point node */ + return 0; + } + + if (!(flags & COPI_DONT_SHIFT_RIGHT) + && (make_space_by_shift_right(op, doing, todo) == 0)) { + /* insert point is still set to the same node, but there is + nothing to the right of insert point. */ + return 0; + } + + if (enough_space_for_whole_flow(op)) { + /* whole flow fits into insert point node */ + return 0; + } + + return make_space_by_new_nodes(op, doing, todo); +} + +/* implements COP_INSERT_FLOW operation */ +static int +carry_insert_flow(carry_op * op, carry_level * doing, carry_level * todo) +{ + int result; + flow_t *f; + coord_t *insert_point; + node_plugin *nplug; + carry_plugin_info info; + znode *orig_node; + lock_handle *orig_lh; + + f = op->u.insert_flow.flow; + result = 0; + + /* carry system needs this to work */ + info.doing = doing; + info.todo = todo; + + orig_node = flow_insert_point(op)->node; + orig_lh = doing->tracked; + + while (f->length) { + result = make_space_for_flow_insertion(op, doing, todo); + if (result) + break; + + insert_point = flow_insert_point(op); + nplug = node_plugin_by_node(insert_point->node); + + /* compose item data for insertion/pasting */ + flow_insert_data(op)->data = f->data; + flow_insert_data(op)->length = what_can_fit_into_node(op); + + if (can_paste(insert_point, &f->key, flow_insert_data(op))) { + /* insert point is set to item of file we are writing to + and we have to append to it */ + assert("vs-903", insert_point->between == AFTER_UNIT); + nplug->change_item_size(insert_point, + flow_insert_data(op)->length); + flow_insert_data(op)->iplug->b.paste(insert_point, + flow_insert_data + (op), &info); + } else { + /* new item must be inserted */ + pos_in_node_t new_pos; + flow_insert_data(op)->length += item_data_overhead(op); + + /* FIXME-VS: this is because node40_create_item changes + insert_point for obscure reasons */ + switch (insert_point->between) { + case AFTER_ITEM: + new_pos = insert_point->item_pos + 1; + break; + case EMPTY_NODE: + new_pos = 0; + break; + case BEFORE_ITEM: + assert("vs-905", insert_point->item_pos == 0); + new_pos = 0; + break; + default: + impossible("vs-906", + "carry_insert_flow: invalid coord"); + new_pos = 0; + break; + } + + nplug->create_item(insert_point, &f->key, + flow_insert_data(op), &info); + coord_set_item_pos(insert_point, new_pos); + } + coord_init_after_item_end(insert_point); + doing->restartable = 0; + znode_make_dirty(insert_point->node); + + move_flow_forward(f, (unsigned)flow_insert_data(op)->length); + } + + if (orig_node != flow_insert_point(op)->node) { + /* move lock to new insert point */ + done_lh(orig_lh); + init_lh(orig_lh); + result = + longterm_lock_znode(orig_lh, flow_insert_point(op)->node, + ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI); + } + + return result; +} + +/* implements COP_DELETE operation + + Remove pointer to @op -> u.delete.child from it's parent. + + This function also handles killing of a tree root is last pointer from it + was removed. This is complicated by our handling of "twig" level: root on + twig level is never killed. + +*/ +static int carry_delete(carry_op * op /* operation to be performed */ , + carry_level * doing UNUSED_ARG /* current carry + * level */ , + carry_level * todo/* next carry level */) +{ + int result; + coord_t coord; + coord_t coord2; + znode *parent; + znode *child; + carry_plugin_info info; + reiser4_tree *tree; + + /* + * This operation is called to delete internal item pointing to the + * child node that was removed by carry from the tree on the previous + * tree level. + */ + + assert("nikita-893", op != NULL); + assert("nikita-894", todo != NULL); + assert("nikita-895", op->op == COP_DELETE); + + coord_init_zero(&coord); + coord_init_zero(&coord2); + + parent = reiser4_carry_real(op->node); + child = op->u.delete.child ? + reiser4_carry_real(op->u.delete.child) : op->node->node; + tree = znode_get_tree(child); + read_lock_tree(tree); + + /* + * @parent was determined when carry entered parent level + * (lock_carry_level/lock_carry_node). Since then, actual parent of + * @child node could change due to other carry operations performed on + * the parent level. Check for this. + */ + + if (znode_parent(child) != parent) { + /* NOTE-NIKITA add stat counter for this. */ + parent = znode_parent(child); + assert("nikita-2581", find_carry_node(doing, parent)); + } + read_unlock_tree(tree); + + assert("nikita-1213", znode_get_level(parent) > LEAF_LEVEL); + + /* Twig level horrors: tree should be of height at least 2. So, last + pointer from the root at twig level is preserved even if child is + empty. This is ugly, but so it was architectured. + */ + + if (znode_is_root(parent) && + znode_get_level(parent) <= REISER4_MIN_TREE_HEIGHT && + node_num_items(parent) == 1) { + /* Delimiting key manipulations. */ + write_lock_dk(tree); + znode_set_ld_key(child, znode_set_ld_key(parent, reiser4_min_key())); + znode_set_rd_key(child, znode_set_rd_key(parent, reiser4_max_key())); + ZF_SET(child, JNODE_DKSET); + write_unlock_dk(tree); + + /* @child escaped imminent death! */ + ZF_CLR(child, JNODE_HEARD_BANSHEE); + return 0; + } + + /* convert child pointer to the coord_t */ + result = find_child_ptr(parent, child, &coord); + if (result != NS_FOUND) { + warning("nikita-994", "Cannot find child pointer: %i", result); + print_coord_content("coord", &coord); + return result; + } + + coord_dup(&coord2, &coord); + info.doing = doing; + info.todo = todo; + { + /* + * Actually kill internal item: prepare structure with + * arguments for ->cut_and_kill() method... + */ + + struct carry_kill_data kdata; + kdata.params.from = &coord; + kdata.params.to = &coord2; + kdata.params.from_key = NULL; + kdata.params.to_key = NULL; + kdata.params.smallest_removed = NULL; + kdata.params.truncate = 1; + kdata.flags = op->u.delete.flags; + kdata.inode = NULL; + kdata.left = NULL; + kdata.right = NULL; + kdata.buf = NULL; + /* ... and call it. */ + result = node_plugin_by_node(parent)->cut_and_kill(&kdata, + &info); + } + doing->restartable = 0; + + /* check whether root should be killed violently */ + if (znode_is_root(parent) && + /* don't kill roots at and lower than twig level */ + znode_get_level(parent) > REISER4_MIN_TREE_HEIGHT && + node_num_items(parent) == 1) + result = reiser4_kill_tree_root(coord.node); + + return result < 0 ? result : 0; +} + +/* implements COP_CUT opration + + Cuts part or whole content of node. + +*/ +static int carry_cut(carry_op * op /* operation to be performed */ , + carry_level * doing /* current carry level */ , + carry_level * todo/* next carry level */) +{ + int result; + carry_plugin_info info; + node_plugin *nplug; + + assert("nikita-896", op != NULL); + assert("nikita-897", todo != NULL); + assert("nikita-898", op->op == COP_CUT); + + info.doing = doing; + info.todo = todo; + + nplug = node_plugin_by_node(reiser4_carry_real(op->node)); + if (op->u.cut_or_kill.is_cut) + result = nplug->cut(op->u.cut_or_kill.u.cut, &info); + else + result = nplug->cut_and_kill(op->u.cut_or_kill.u.kill, &info); + + doing->restartable = 0; + return result < 0 ? result : 0; +} + +/* helper function for carry_paste(): returns true if @op can be continued as + paste */ +static int +can_paste(coord_t *icoord, const reiser4_key * key, + const reiser4_item_data * data) +{ + coord_t circa; + item_plugin *new_iplug; + item_plugin *old_iplug; + int result = 0; /* to keep gcc shut */ + + assert("", icoord->between != AT_UNIT); + + /* obviously, one cannot paste when node is empty---there is nothing + to paste into. */ + if (node_is_empty(icoord->node)) + return 0; + /* if insertion point is at the middle of the item, then paste */ + if (!coord_is_between_items(icoord)) + return 1; + coord_dup(&circa, icoord); + circa.between = AT_UNIT; + + old_iplug = item_plugin_by_coord(&circa); + new_iplug = data->iplug; + + /* check whether we can paste to the item @icoord is "at" when we + ignore ->between field */ + if (old_iplug == new_iplug && item_can_contain_key(&circa, key, data)) + result = 1; + else if (icoord->between == BEFORE_UNIT + || icoord->between == BEFORE_ITEM) { + /* otherwise, try to glue to the item at the left, if any */ + coord_dup(&circa, icoord); + if (coord_set_to_left(&circa)) { + result = 0; + coord_init_before_item(icoord); + } else { + old_iplug = item_plugin_by_coord(&circa); + result = (old_iplug == new_iplug) + && item_can_contain_key(icoord, key, data); + if (result) { + coord_dup(icoord, &circa); + icoord->between = AFTER_UNIT; + } + } + } else if (icoord->between == AFTER_UNIT + || icoord->between == AFTER_ITEM) { + coord_dup(&circa, icoord); + /* otherwise, try to glue to the item at the right, if any */ + if (coord_set_to_right(&circa)) { + result = 0; + coord_init_after_item(icoord); + } else { + int (*cck) (const coord_t *, const reiser4_key *, + const reiser4_item_data *); + + old_iplug = item_plugin_by_coord(&circa); + + cck = old_iplug->b.can_contain_key; + if (cck == NULL) + /* item doesn't define ->can_contain_key + method? So it is not expandable. */ + result = 0; + else { + result = (old_iplug == new_iplug) + && cck(&circa /*icoord */ , key, data); + if (result) { + coord_dup(icoord, &circa); + icoord->between = BEFORE_UNIT; + } + } + } + } else + impossible("nikita-2513", "Nothing works"); + if (result) { + if (icoord->between == BEFORE_ITEM) { + assert("vs-912", icoord->unit_pos == 0); + icoord->between = BEFORE_UNIT; + } else if (icoord->between == AFTER_ITEM) { + coord_init_after_item_end(icoord); + } + } + return result; +} + +/* implements COP_PASTE operation + + Paste data into existing item. This is complicated by the fact that after + we shifted something to the left or right neighbors trying to free some + space, item we were supposed to paste into can be in different node than + insertion coord. If so, we are no longer doing paste, but insert. See + comments in insert_paste_common(). + +*/ +static int carry_paste(carry_op * op /* operation to be performed */ , + carry_level * doing UNUSED_ARG /* current carry + * level */ , + carry_level * todo/* next carry level */) +{ + znode *node; + carry_insert_data cdata; + coord_t dcoord; + reiser4_item_data data; + int result; + int real_size; + item_plugin *iplug; + carry_plugin_info info; + coord_t *coord; + + assert("nikita-982", op != NULL); + assert("nikita-983", todo != NULL); + assert("nikita-984", op->op == COP_PASTE); + + coord_init_zero(&dcoord); + + result = insert_paste_common(op, doing, todo, &cdata, &dcoord, &data); + if (result != 0) + return result; + + coord = op->u.insert.d->coord; + + /* handle case when op -> u.insert.coord doesn't point to the item + of required type. restart as insert. */ + if (!can_paste(coord, op->u.insert.d->key, op->u.insert.d->data)) { + op->op = COP_INSERT; + op->u.insert.type = COPT_PASTE_RESTARTED; + result = op_dispatch_table[COP_INSERT].handler(op, doing, todo); + + return result; + } + + node = coord->node; + iplug = item_plugin_by_coord(coord); + assert("nikita-992", iplug != NULL); + + assert("nikita-985", node != NULL); + assert("nikita-986", node_plugin_by_node(node) != NULL); + + assert("nikita-987", + space_needed_for_op(node, op) <= znode_free_space(node)); + + assert("nikita-1286", coord_is_existing_item(coord)); + + /* + * if item is expanded as a result of this operation, we should first + * change item size, than call ->b.paste item method. If item is + * shrunk, it should be done other way around: first call ->b.paste + * method, then reduce item size. + */ + + real_size = space_needed_for_op(node, op); + if (real_size > 0) + node->nplug->change_item_size(coord, real_size); + + doing->restartable = 0; + info.doing = doing; + info.todo = todo; + + result = iplug->b.paste(coord, op->u.insert.d->data, &info); + + if (real_size < 0) + node->nplug->change_item_size(coord, real_size); + + /* if we pasted at the beginning of the item, update item's key. */ + if (coord->unit_pos == 0 && coord->between != AFTER_UNIT) + node->nplug->update_item_key(coord, op->u.insert.d->key, &info); + + znode_make_dirty(node); + return result; +} + +/* handle carry COP_EXTENT operation. */ +static int carry_extent(carry_op * op /* operation to perform */ , + carry_level * doing /* queue of operations @op + * is part of */ , + carry_level * todo /* queue where new operations + * are accumulated */ ) +{ + znode *node; + carry_insert_data cdata; + coord_t coord; + reiser4_item_data data; + carry_op *delete_dummy; + carry_op *insert_extent; + int result; + carry_plugin_info info; + + assert("nikita-1751", op != NULL); + assert("nikita-1752", todo != NULL); + assert("nikita-1753", op->op == COP_EXTENT); + + /* extent insertion overview: + + extents live on the TWIG LEVEL, which is level one above the leaf + one. This complicates extent insertion logic somewhat: it may + happen (and going to happen all the time) that in logical key + ordering extent has to be placed between items I1 and I2, located + at the leaf level, but I1 and I2 are in the same formatted leaf + node N1. To insert extent one has to + + (1) reach node N1 and shift data between N1, its neighbors and + possibly newly allocated nodes until I1 and I2 fall into different + nodes. Since I1 and I2 are still neighboring items in logical key + order, they will be necessary utmost items in their respective + nodes. + + (2) After this new extent item is inserted into node on the twig + level. + + Fortunately this process can reuse almost all code from standard + insertion procedure (viz. make_space() and insert_paste_common()), + due to the following observation: make_space() only shifts data up + to and excluding or including insertion point. It never + "over-moves" through insertion point. Thus, one can use + make_space() to perform step (1). All required for this is just to + instruct free_space_shortage() to keep make_space() shifting data + until insertion point is at the node border. + + */ + + /* perform common functionality of insert and paste. */ + result = insert_paste_common(op, doing, todo, &cdata, &coord, &data); + if (result != 0) + return result; + + node = op->u.extent.d->coord->node; + assert("nikita-1754", node != NULL); + assert("nikita-1755", node_plugin_by_node(node) != NULL); + assert("nikita-1700", coord_wrt(op->u.extent.d->coord) != COORD_INSIDE); + + /* NOTE-NIKITA add some checks here. Not assertions, -EIO. Check that + extent fits between items. */ + + info.doing = doing; + info.todo = todo; + + /* there is another complication due to placement of extents on the + twig level: extents are "rigid" in the sense that key-range + occupied by extent cannot grow indefinitely to the right as it is + for the formatted leaf nodes. Because of this when search finds two + adjacent extents on the twig level, it has to "drill" to the leaf + level, creating new node. Here we are removing this node. + */ + if (node_is_empty(node)) { + delete_dummy = node_post_carry(&info, COP_DELETE, node, 1); + if (IS_ERR(delete_dummy)) + return PTR_ERR(delete_dummy); + delete_dummy->u.delete.child = NULL; + delete_dummy->u.delete.flags = DELETE_RETAIN_EMPTY; + ZF_SET(node, JNODE_HEARD_BANSHEE); + } + + /* proceed with inserting extent item into parent. We are definitely + inserting rather than pasting if we get that far. */ + insert_extent = node_post_carry(&info, COP_INSERT, node, 1); + if (IS_ERR(insert_extent)) + /* @delete_dummy will be automatically destroyed on the level + exiting */ + return PTR_ERR(insert_extent); + /* NOTE-NIKITA insertion by key is simplest option here. Another + possibility is to insert on the left or right of already existing + item. + */ + insert_extent->u.insert.type = COPT_KEY; + insert_extent->u.insert.d = op->u.extent.d; + assert("nikita-1719", op->u.extent.d->key != NULL); + insert_extent->u.insert.d->data->arg = op->u.extent.d->coord; + insert_extent->u.insert.flags = + znode_get_tree(node)->carry.new_extent_flags; + + /* + * if carry was asked to track lock handle we should actually track + * lock handle on the twig node rather than on the leaf where + * operation was started from. Transfer tracked lock handle. + */ + if (doing->track_type) { + assert("nikita-3242", doing->tracked != NULL); + assert("nikita-3244", todo->tracked == NULL); + todo->tracked = doing->tracked; + todo->track_type = CARRY_TRACK_NODE; + doing->tracked = NULL; + doing->track_type = 0; + } + + return 0; +} + +/* update key in @parent between pointers to @left and @right. + + Find coords of @left and @right and update delimiting key between them. + This is helper function called by carry_update(). Finds position of + internal item involved. Updates item key. Updates delimiting keys of child + nodes involved. +*/ +static int update_delimiting_key(znode * parent /* node key is updated + * in */ , + znode * left /* child of @parent */ , + znode * right /* child of @parent */ , + carry_level * doing /* current carry + * level */ , + carry_level * todo /* parent carry + * level */ , + const char **error_msg /* place to + * store error + * message */ ) +{ + coord_t left_pos; + coord_t right_pos; + int result; + reiser4_key ldkey; + carry_plugin_info info; + + assert("nikita-1177", right != NULL); + /* find position of right left child in a parent */ + result = find_child_ptr(parent, right, &right_pos); + if (result != NS_FOUND) { + *error_msg = "Cannot find position of right child"; + return result; + } + + if ((left != NULL) && !coord_is_leftmost_unit(&right_pos)) { + /* find position of the left child in a parent */ + result = find_child_ptr(parent, left, &left_pos); + if (result != NS_FOUND) { + *error_msg = "Cannot find position of left child"; + return result; + } + assert("nikita-1355", left_pos.node != NULL); + } else + left_pos.node = NULL; + + /* check that they are separated by exactly one key and are basically + sane */ + if (REISER4_DEBUG) { + if ((left_pos.node != NULL) + && !coord_is_existing_unit(&left_pos)) { + *error_msg = "Left child is bastard"; + return RETERR(-EIO); + } + if (!coord_is_existing_unit(&right_pos)) { + *error_msg = "Right child is bastard"; + return RETERR(-EIO); + } + if (left_pos.node != NULL && + !coord_are_neighbors(&left_pos, &right_pos)) { + *error_msg = "Children are not direct siblings"; + return RETERR(-EIO); + } + } + *error_msg = NULL; + + info.doing = doing; + info.todo = todo; + + /* + * If child node is not empty, new key of internal item is a key of + * leftmost item in the child node. If the child is empty, take its + * right delimiting key as a new key of the internal item. Precise key + * in the latter case is not important per se, because the child (and + * the internal item) are going to be killed shortly anyway, but we + * have to preserve correct order of keys in the parent node. + */ + + if (!ZF_ISSET(right, JNODE_HEARD_BANSHEE)) + leftmost_key_in_node(right, &ldkey); + else { + read_lock_dk(znode_get_tree(parent)); + ldkey = *znode_get_rd_key(right); + read_unlock_dk(znode_get_tree(parent)); + } + node_plugin_by_node(parent)->update_item_key(&right_pos, &ldkey, &info); + doing->restartable = 0; + znode_make_dirty(parent); + return 0; +} + +/* implements COP_UPDATE opration + + Update delimiting keys. + +*/ +static int carry_update(carry_op * op /* operation to be performed */ , + carry_level * doing /* current carry level */ , + carry_level * todo/* next carry level */) +{ + int result; + carry_node *missing UNUSED_ARG; + znode *left; + znode *right; + carry_node *lchild; + carry_node *rchild; + const char *error_msg; + reiser4_tree *tree; + + /* + * This operation is called to update key of internal item. This is + * necessary when carry shifted of cut data on the child + * level. Arguments of this operation are: + * + * @right --- child node. Operation should update key of internal + * item pointing to @right. + * + * @left --- left neighbor of @right. This parameter is optional. + */ + + assert("nikita-902", op != NULL); + assert("nikita-903", todo != NULL); + assert("nikita-904", op->op == COP_UPDATE); + + lchild = op->u.update.left; + rchild = op->node; + + if (lchild != NULL) { + assert("nikita-1001", lchild->parent); + assert("nikita-1003", !lchild->left); + left = reiser4_carry_real(lchild); + } else + left = NULL; + + tree = znode_get_tree(rchild->node); + read_lock_tree(tree); + right = znode_parent(rchild->node); + read_unlock_tree(tree); + + if (right != NULL) { + result = update_delimiting_key(right, + lchild ? lchild->node : NULL, + rchild->node, + doing, todo, &error_msg); + } else { + error_msg = "Cannot find node to update key in"; + result = RETERR(-EIO); + } + /* operation will be reposted to the next level by the + ->update_item_key() method of node plugin, if necessary. */ + + if (result != 0) { + warning("nikita-999", "Error updating delimiting key: %s (%i)", + error_msg ? : "", result); + } + return result; +} + +/* move items from @node during carry */ +static int carry_shift_data(sideof side /* in what direction to move data */ , + coord_t *insert_coord /* coord where new item + * is to be inserted */, + znode * node /* node which data are moved from */ , + carry_level * doing /* active carry queue */ , + carry_level * todo /* carry queue where new + * operations are to be put + * in */ , + unsigned int including_insert_coord_p + /* true if @insertion_coord can be moved */ ) +{ + int result; + znode *source; + carry_plugin_info info; + node_plugin *nplug; + + source = insert_coord->node; + + info.doing = doing; + info.todo = todo; + + nplug = node_plugin_by_node(node); + result = nplug->shift(insert_coord, node, + (side == LEFT_SIDE) ? SHIFT_LEFT : SHIFT_RIGHT, 0, + (int)including_insert_coord_p, &info); + /* the only error ->shift() method of node plugin can return is + -ENOMEM due to carry node/operation allocation. */ + assert("nikita-915", result >= 0 || result == -ENOMEM); + if (result > 0) { + /* + * if some number of bytes was actually shifted, mark nodes + * dirty, and carry level as non-restartable. + */ + doing->restartable = 0; + znode_make_dirty(source); + znode_make_dirty(node); + } + + assert("nikita-2077", coord_check(insert_coord)); + return 0; +} + +typedef carry_node *(*carry_iterator) (carry_node * node); +static carry_node *find_dir_carry(carry_node * node, carry_level * level, + carry_iterator iterator); + +static carry_node *pool_level_list_prev(carry_node *node) +{ + return list_entry(node->header.level_linkage.prev, carry_node, header.level_linkage); +} + +/* look for the left neighbor of given carry node in a carry queue. + + This is used by find_left_neighbor(), but I am not sure that this + really gives any advantage. More statistics required. + +*/ +carry_node *find_left_carry(carry_node * node /* node to find left neighbor + * of */ , + carry_level * level/* level to scan */) +{ + return find_dir_carry(node, level, + (carry_iterator) pool_level_list_prev); +} + +static carry_node *pool_level_list_next(carry_node *node) +{ + return list_entry(node->header.level_linkage.next, carry_node, header.level_linkage); +} + +/* look for the right neighbor of given carry node in a + carry queue. + + This is used by find_right_neighbor(), but I am not sure that this + really gives any advantage. More statistics required. + +*/ +carry_node *find_right_carry(carry_node * node /* node to find right neighbor + * of */ , + carry_level * level/* level to scan */) +{ + return find_dir_carry(node, level, + (carry_iterator) pool_level_list_next); +} + +/* look for the left or right neighbor of given carry node in a carry + queue. + + Helper function used by find_{left|right}_carry(). +*/ +static carry_node *find_dir_carry(carry_node * node /* node to start + * scanning from */ , + carry_level * level /* level to scan */ , + carry_iterator iterator /* operation to + * move to the + * next node */) +{ + carry_node *neighbor; + + assert("nikita-1059", node != NULL); + assert("nikita-1060", level != NULL); + + /* scan list of carry nodes on this list dir-ward, skipping all + carry nodes referencing the same znode. */ + neighbor = node; + while (1) { + neighbor = iterator(neighbor); + if (carry_node_end(level, neighbor)) + /* list head is reached */ + return NULL; + if (reiser4_carry_real(neighbor) != reiser4_carry_real(node)) + return neighbor; + } +} + +/* + * Memory reservation estimation. + * + * Carry process proceeds through tree levels upwards. Carry assumes that it + * takes tree in consistent state (e.g., that search tree invariants hold), + * and leaves tree consistent after it finishes. This means that when some + * error occurs carry cannot simply return if there are pending carry + * operations. Generic solution for this problem is carry-undo either as + * transaction manager feature (requiring checkpoints and isolation), or + * through some carry specific mechanism. + * + * Our current approach is to panic if carry hits an error while tree is + * inconsistent. Unfortunately -ENOMEM can easily be triggered. To work around + * this "memory reservation" mechanism was added. + * + * Memory reservation is implemented by perthread-pages.diff patch from + * core-patches. Its API is defined in + * + * int perthread_pages_reserve(int nrpages, gfp_t gfp); + * void perthread_pages_release(int nrpages); + * int perthread_pages_count(void); + * + * carry estimates its worst case memory requirements at the entry, reserved + * enough memory, and released unused pages before returning. + * + * Code below estimates worst case memory requirements for a given carry + * queue. This is dome by summing worst case memory requirements for each + * operation in the queue. + * + */ + +/* + * Memory memory requirements of many operations depends on the tree + * height. For example, item insertion requires new node to be inserted at + * each tree level in the worst case. What tree height should be used for + * estimation? Current tree height is wrong, because tree height can change + * between the time when estimation was done and the time when operation is + * actually performed. Maximal possible tree height (REISER4_MAX_ZTREE_HEIGHT) + * is also not desirable, because it would lead to the huge over-estimation + * all the time. Plausible solution is "capped tree height": if current tree + * height is less than some TREE_HEIGHT_CAP constant, capped tree height is + * TREE_HEIGHT_CAP, otherwise it's current tree height. Idea behind this is + * that if tree height is TREE_HEIGHT_CAP or larger, it's extremely unlikely + * to be increased even more during short interval of time. + */ +#define TREE_HEIGHT_CAP (5) + +/* return capped tree height for the @tree. See comment above. */ +static int cap_tree_height(reiser4_tree * tree) +{ + return max_t(int, tree->height, TREE_HEIGHT_CAP); +} + +/* return capped tree height for the current tree. */ +static int capped_height(void) +{ + return cap_tree_height(current_tree); +} + +/* return number of pages required to store given number of bytes */ +static int bytes_to_pages(int bytes) +{ + return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; +} + +/* how many pages are required to allocate znodes during item insertion. */ +static int carry_estimate_znodes(void) +{ + /* + * Note, that there we have some problem here: there is no way to + * reserve pages specifically for the given slab. This means that + * these pages can be hijacked for some other end. + */ + + /* in the worst case we need 3 new znode on each tree level */ + return bytes_to_pages(capped_height() * sizeof(znode) * 3); +} + +/* + * how many pages are required to load bitmaps. One bitmap per level. + */ +static int carry_estimate_bitmaps(void) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DONT_LOAD_BITMAP)) { + int bytes; + + bytes = capped_height() * (0 + /* bnode should be added, but + * it is private to bitmap.c, + * skip for now. */ + 2 * sizeof(jnode)); + /* working and commit jnodes */ + return bytes_to_pages(bytes) + 2; /* and their contents */ + } else + /* bitmaps were pre-loaded during mount */ + return 0; +} + +/* worst case item insertion memory requirements */ +static int carry_estimate_insert(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + capped_height() + /* new block on each level */ + 1 + /* and possibly extra new block at the leaf level */ + 3; /* loading of leaves into memory */ +} + +/* worst case item deletion memory requirements */ +static int carry_estimate_delete(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + 3; /* loading of leaves into memory */ +} + +/* worst case tree cut memory requirements */ +static int carry_estimate_cut(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + 3; /* loading of leaves into memory */ +} + +/* worst case memory requirements of pasting into item */ +static int carry_estimate_paste(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + capped_height() + /* new block on each level */ + 1 + /* and possibly extra new block at the leaf level */ + 3; /* loading of leaves into memory */ +} + +/* worst case memory requirements of extent insertion */ +static int carry_estimate_extent(carry_op * op, carry_level * level) +{ + return carry_estimate_insert(op, level) + /* insert extent */ + carry_estimate_delete(op, level); /* kill leaf */ +} + +/* worst case memory requirements of key update */ +static int carry_estimate_update(carry_op * op, carry_level * level) +{ + return 0; +} + +/* worst case memory requirements of flow insertion */ +static int carry_estimate_insert_flow(carry_op * op, carry_level * level) +{ + int newnodes; + + newnodes = min(bytes_to_pages(op->u.insert_flow.flow->length), + CARRY_FLOW_NEW_NODES_LIMIT); + /* + * roughly estimate insert_flow as a sequence of insertions. + */ + return newnodes * carry_estimate_insert(op, level); +} + +/* This is dispatch table for carry operations. It can be trivially + abstracted into useful plugin: tunable balancing policy is a good + thing. */ +carry_op_handler op_dispatch_table[COP_LAST_OP] = { + [COP_INSERT] = { + .handler = carry_insert, + .estimate = carry_estimate_insert} + , + [COP_DELETE] = { + .handler = carry_delete, + .estimate = carry_estimate_delete} + , + [COP_CUT] = { + .handler = carry_cut, + .estimate = carry_estimate_cut} + , + [COP_PASTE] = { + .handler = carry_paste, + .estimate = carry_estimate_paste} + , + [COP_EXTENT] = { + .handler = carry_extent, + .estimate = carry_estimate_extent} + , + [COP_UPDATE] = { + .handler = carry_update, + .estimate = carry_estimate_update} + , + [COP_INSERT_FLOW] = { + .handler = carry_insert_flow, + .estimate = carry_estimate_insert_flow} +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/carry_ops.h b/fs/reiser4/carry_ops.h new file mode 100644 index 000000000000..bda0e5c90eec --- /dev/null +++ b/fs/reiser4/carry_ops.h @@ -0,0 +1,43 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* implementation of carry operations. See carry_ops.c for details. */ + +#if !defined(__CARRY_OPS_H__) +#define __CARRY_OPS_H__ + +#include "forward.h" +#include "znode.h" +#include "carry.h" + +/* carry operation handlers */ +typedef struct carry_op_handler { + /* perform operation */ + int (*handler) (carry_op * op, carry_level * doing, carry_level * todo); + /* estimate memory requirements for @op */ + int (*estimate) (carry_op * op, carry_level * level); +} carry_op_handler; + +/* This is dispatch table for carry operations. It can be trivially + abstracted into useful plugin: tunable balancing policy is a good + thing. */ +extern carry_op_handler op_dispatch_table[COP_LAST_OP]; + +unsigned int space_needed(const znode * node, const coord_t *coord, + const reiser4_item_data * data, int inserting); +extern carry_node *find_left_carry(carry_node * node, carry_level * level); +extern carry_node *find_right_carry(carry_node * node, carry_level * level); + +/* __CARRY_OPS_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/checksum.c b/fs/reiser4/checksum.c new file mode 100644 index 000000000000..2a35f4260db7 --- /dev/null +++ b/fs/reiser4/checksum.c @@ -0,0 +1,33 @@ +#include +#include "debug.h" +#include "checksum.h" + +int reiser4_init_csum_tfm(struct crypto_shash **tfm) +{ + struct crypto_shash *new_tfm; + + new_tfm = crypto_alloc_shash("crc32c", 0, 0); + if (IS_ERR(new_tfm)) { + warning("intelfx-81", "Could not load crc32c driver"); + return PTR_ERR(new_tfm); + } + + *tfm = new_tfm; + return 0; +} + +void reiser4_done_csum_tfm(struct crypto_shash *tfm) +{ + crypto_free_shash(tfm); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/checksum.h b/fs/reiser4/checksum.h new file mode 100644 index 000000000000..ff6812f759ad --- /dev/null +++ b/fs/reiser4/checksum.h @@ -0,0 +1,39 @@ +#ifndef __CHECKSUM__ +#define __CHECKSUM__ + +#include + +int reiser4_init_csum_tfm(struct crypto_shash **tfm); +void reiser4_done_csum_tfm(struct crypto_shash *tfm); +u32 static inline reiser4_crc32c(struct crypto_shash *tfm, + u32 crc, const void *address, + unsigned int length) +{ + struct { + struct shash_desc shash; + char ctx[4]; + } desc; + int err; + + desc.shash.tfm = tfm; + desc.shash.flags = 0; + *(u32 *)desc.ctx = crc; + + err = crypto_shash_update(&desc.shash, address, length); + BUG_ON(err); + return *(u32 *)desc.ctx; +} + +#endif /* __CHECKSUM__ */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ + diff --git a/fs/reiser4/context.c b/fs/reiser4/context.c new file mode 100644 index 000000000000..40fa203e81c9 --- /dev/null +++ b/fs/reiser4/context.c @@ -0,0 +1,288 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Manipulation of reiser4_context */ + +/* + * global context used during system call. Variable of this type is allocated + * on the stack at the beginning of the reiser4 part of the system call and + * pointer to it is stored in the current->fs_context. This allows us to avoid + * passing pointer to current transaction and current lockstack (both in + * one-to-one mapping with threads) all over the call chain. + * + * It's kind of like those global variables the prof used to tell you not to + * use in CS1, except thread specific.;-) Nikita, this was a good idea. + * + * In some situations it is desirable to have ability to enter reiser4_context + * more than once for the same thread (nested contexts). For example, there + * are some functions that can be called either directly from VFS/VM or from + * already active reiser4 context (->writepage, for example). + * + * In such situations "child" context acts like dummy: all activity is + * actually performed in the top level context, and get_current_context() + * always returns top level context. + * Of course, reiser4_init_context()/reiser4_done_context() have to be properly + * nested any way. + * + * Note that there is an important difference between reiser4 uses + * ->fs_context and the way other file systems use it. Other file systems + * (ext3 and reiserfs) use ->fs_context only for the duration of _transaction_ + * (this is why ->fs_context was initially called ->journal_info). This means, + * that when ext3 or reiserfs finds that ->fs_context is not NULL on the entry + * to the file system, they assume that some transaction is already underway, + * and usually bail out, because starting nested transaction would most likely + * lead to the deadlock. This gives false positives with reiser4, because we + * set ->fs_context before starting transaction. + */ + +#include "debug.h" +#include "super.h" +#include "context.h" +#include "vfs_ops.h" /* for reiser4_throttle_write() */ + +#include /* for current_is_pdflush() */ +#include + +static void _reiser4_init_context(reiser4_context * context, + struct super_block *super) +{ + memset(context, 0, sizeof(*context)); + + context->super = super; + context->magic = context_magic; + context->outer = current->journal_info; + current->journal_info = (void *)context; + context->nr_children = 0; + context->gfp_mask = GFP_KERNEL; + + init_lock_stack(&context->stack); + + reiser4_txn_begin(context); + + /* initialize head of tap list */ + INIT_LIST_HEAD(&context->taps); +#if REISER4_DEBUG + context->task = current; +#endif + grab_space_enable(); +} + +/* initialize context and bind it to the current thread + + This function should be called at the beginning of reiser4 part of + syscall. +*/ +reiser4_context * reiser4_init_context(struct super_block *super) +{ + reiser4_context *context; + + assert("nikita-2662", !in_interrupt() && !in_irq()); + assert("nikita-3357", super != NULL); + assert("nikita-3358", super->s_op == NULL || is_reiser4_super(super)); + + context = get_current_context_check(); + if (context && context->super == super) { + context = (reiser4_context *) current->journal_info; + context->nr_children++; + return context; + } + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (context == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + _reiser4_init_context(context, super); + return context; +} + +/* this is used in scan_mgr which is called with spinlock held and in + reiser4_fill_super magic */ +void init_stack_context(reiser4_context *context, struct super_block *super) +{ + assert("nikita-2662", !in_interrupt() && !in_irq()); + assert("nikita-3357", super != NULL); + assert("nikita-3358", super->s_op == NULL || is_reiser4_super(super)); + assert("vs-12", !is_in_reiser4_context()); + + _reiser4_init_context(context, super); + context->on_stack = 1; + return; +} + +/* cast lock stack embedded into reiser4 context up to its container */ +reiser4_context *get_context_by_lock_stack(lock_stack * owner) +{ + return container_of(owner, reiser4_context, stack); +} + +/* true if there is already _any_ reiser4 context for the current thread */ +int is_in_reiser4_context(void) +{ + reiser4_context *ctx; + + ctx = current->journal_info; + return ctx != NULL && ((unsigned long)ctx->magic) == context_magic; +} + +/* + * call balance dirty pages for the current context. + * + * File system is expected to call balance_dirty_pages_ratelimited() whenever + * it dirties a page. reiser4 does this for unformatted nodes (that is, during + * write---this covers vast majority of all dirty traffic), but we cannot do + * this immediately when formatted node is dirtied, because long term lock is + * usually held at that time. To work around this, dirtying of formatted node + * simply increases ->nr_marked_dirty counter in the current reiser4 + * context. When we are about to leave this context, + * balance_dirty_pages_ratelimited() is called, if necessary. + * + * This introduces another problem: sometimes we do not want to run + * balance_dirty_pages_ratelimited() when leaving a context, for example + * because some important lock (like ->i_mutex on the parent directory) is + * held. To achieve this, ->nobalance flag can be set in the current context. + */ +static void reiser4_throttle_write_at(reiser4_context *context) +{ + reiser4_super_info_data *sbinfo = get_super_private(context->super); + + /* + * call balance_dirty_pages_ratelimited() to process formatted nodes + * dirtied during this system call. Do that only if we are not in mount + * and there were nodes dirtied in this context and we are not in + * writepage (to avoid deadlock) and not in pdflush + */ + if (sbinfo != NULL && sbinfo->fake != NULL && + context->nr_marked_dirty != 0 && + !(current->flags & PF_MEMALLOC) && + !current_is_flush_bd_task()) + reiser4_throttle_write(sbinfo->fake); +} + +/* release resources associated with context. + + This function should be called at the end of "session" with reiser4, + typically just before leaving reiser4 driver back to VFS. + + This is good place to put some degugging consistency checks, like that + thread released all locks and closed transcrash etc. + +*/ +static void reiser4_done_context(reiser4_context * context) + /* context being released */ +{ + assert("nikita-860", context != NULL); + assert("nikita-859", context->magic == context_magic); + assert("vs-646", (reiser4_context *) current->journal_info == context); + assert("zam-686", !in_interrupt() && !in_irq()); + + /* only do anything when leaving top-level reiser4 context. All nested + * contexts are just dummies. */ + if (context->nr_children == 0) { + assert("jmacd-673", context->trans == NULL); + assert("jmacd-1002", lock_stack_isclean(&context->stack)); + assert("nikita-1936", reiser4_no_counters_are_held()); + assert("nikita-2626", list_empty_careful(reiser4_taps_list())); + assert("zam-1004", ergo(get_super_private(context->super), + get_super_private(context->super)->delete_mutex_owner != + current)); + + /* release all grabbed but as yet unused blocks */ + if (context->grabbed_blocks != 0) + all_grabbed2free(); + + /* + * synchronize against longterm_unlock_znode(): + * wake_up_requestor() wakes up requestors without holding + * zlock (otherwise they will immediately bump into that lock + * after wake up on another CPU). To work around (rare) + * situation where requestor has been woken up asynchronously + * and managed to run until completion (and destroy its + * context and lock stack) before wake_up_requestor() called + * wake_up() on it, wake_up_requestor() synchronize on lock + * stack spin lock. It has actually been observed that spin + * lock _was_ locked at this point, because + * wake_up_requestor() took interrupt. + */ + spin_lock_stack(&context->stack); + spin_unlock_stack(&context->stack); + + assert("zam-684", context->nr_children == 0); + /* restore original ->fs_context value */ + current->journal_info = context->outer; + if (context->on_stack == 0) + kfree(context); + } else { + context->nr_children--; +#if REISER4_DEBUG + assert("zam-685", context->nr_children >= 0); +#endif + } +} + +/* + * exit reiser4 context. Call balance_dirty_pages_at() if necessary. Close + * transaction. Call done_context() to do context related book-keeping. + */ +void reiser4_exit_context(reiser4_context * context) +{ + assert("nikita-3021", reiser4_schedulable()); + + if (context->nr_children == 0) { + if (!context->nobalance) + reiser4_throttle_write_at(context); + + /* if filesystem is mounted with -o sync or -o dirsync - commit + transaction. FIXME: TXNH_DONT_COMMIT is used to avoid + commiting on exit_context when inode semaphore is held and + to have ktxnmgrd to do commit instead to get better + concurrent filesystem accesses. But, when one mounts with -o + sync, he cares more about reliability than about + performance. So, for now we have this simple mount -o sync + support. */ + if (context->super->s_flags & (MS_SYNCHRONOUS | MS_DIRSYNC)) { + txn_atom *atom; + + atom = get_current_atom_locked_nocheck(); + if (atom) { + atom->flags |= ATOM_FORCE_COMMIT; + context->trans->flags &= ~TXNH_DONT_COMMIT; + spin_unlock_atom(atom); + } + } + reiser4_txn_end(context); + } + reiser4_done_context(context); +} + +void reiser4_ctx_gfp_mask_set(void) +{ + reiser4_context *ctx; + + ctx = get_current_context(); + if (ctx->entd == 0 && + list_empty(&ctx->stack.locks) && + ctx->trans->atom == NULL) + ctx->gfp_mask = GFP_KERNEL; + else + ctx->gfp_mask = GFP_NOFS; +} + +void reiser4_ctx_gfp_mask_force(gfp_t mask) +{ + reiser4_context *ctx; + ctx = get_current_context(); + + assert("edward-1454", ctx != NULL); + + ctx->gfp_mask = mask; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/context.h b/fs/reiser4/context.h new file mode 100644 index 000000000000..57b6817157a3 --- /dev/null +++ b/fs/reiser4/context.h @@ -0,0 +1,233 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Reiser4 context. See context.c for details. */ + +#if !defined( __REISER4_CONTEXT_H__ ) +#define __REISER4_CONTEXT_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "tap.h" +#include "lock.h" + +#include /* for __u?? */ +#include /* for struct super_block */ +#include +#include /* for struct task_struct */ + +/* reiser4 per-thread context */ +struct reiser4_context { + /* magic constant. For identification of reiser4 contexts. */ + __u32 magic; + + /* current lock stack. See lock.[ch]. This is where list of all + locks taken by current thread is kept. This is also used in + deadlock detection. */ + lock_stack stack; + + /* current transcrash. */ + txn_handle *trans; + /* transaction handle embedded into reiser4_context. ->trans points + * here by default. */ + txn_handle trans_in_ctx; + + /* super block we are working with. To get the current tree + use &get_super_private (reiser4_get_current_sb ())->tree. */ + struct super_block *super; + + /* parent fs activation */ + struct fs_activation *outer; + + /* per-thread grabbed (for further allocation) blocks counter */ + reiser4_block_nr grabbed_blocks; + + /* list of taps currently monitored. See tap.c */ + struct list_head taps; + + /* grabbing space is enabled */ + unsigned int grab_enabled:1; + /* should be set when we are write dirty nodes to disk in jnode_flush or + * reiser4_write_logs() */ + unsigned int writeout_mode:1; + /* true, if current thread is an ent thread */ + unsigned int entd:1; + /* true, if balance_dirty_pages() should not be run when leaving this + * context. This is used to avoid lengthly balance_dirty_pages() + * operation when holding some important resource, like directory + * ->i_mutex */ + unsigned int nobalance:1; + + /* this bit is used on reiser4_done_context to decide whether context is + kmalloc-ed and has to be kfree-ed */ + unsigned int on_stack:1; + + /* count non-trivial jnode_set_dirty() calls */ + unsigned long nr_marked_dirty; + /* + * reiser4_writeback_inodes calls (via generic_writeback_sb_inodes) + * reiser4_writepages_dispatch for each of dirty inodes. + * Reiser4_writepages_dispatch captures pages. When number of pages + * captured in one reiser4_writeback_inodes reaches some threshold - + * some atoms get flushed + */ + int nr_captured; + int nr_children; /* number of child contexts */ + struct page *locked_page; /* page that should be unlocked in + * reiser4_dirty_inode() before taking + * a longterm lock (to not violate + * reiser4 lock ordering) */ +#if REISER4_DEBUG + /* debugging information about reiser4 locks held by the current + * thread */ + reiser4_lock_cnt_info locks; + struct task_struct *task; /* so we can easily find owner of the stack */ + + /* + * disk space grabbing debugging support + */ + /* how many disk blocks were grabbed by the first call to + * reiser4_grab_space() in this context */ + reiser4_block_nr grabbed_initially; + + /* list of all threads doing flush currently */ + struct list_head flushers_link; + /* information about last error encountered by reiser4 */ + err_site err; +#endif + void *vp; + gfp_t gfp_mask; +}; + +extern reiser4_context *get_context_by_lock_stack(lock_stack *); + +/* Debugging helps. */ +#if REISER4_DEBUG +extern void print_contexts(void); +#endif + +#define current_tree (&(get_super_private(reiser4_get_current_sb())->tree)) +#define current_blocksize reiser4_get_current_sb()->s_blocksize +#define current_blocksize_bits reiser4_get_current_sb()->s_blocksize_bits + +extern reiser4_context *reiser4_init_context(struct super_block *); +extern void init_stack_context(reiser4_context *, struct super_block *); +extern void reiser4_exit_context(reiser4_context *); + +/* magic constant we store in reiser4_context allocated at the stack. Used to + catch accesses to staled or uninitialized contexts. */ +#define context_magic ((__u32) 0x4b1b5d0b) + +extern int is_in_reiser4_context(void); + +/* + * return reiser4_context for the thread @tsk + */ +static inline reiser4_context *get_context(const struct task_struct *tsk) +{ + assert("vs-1682", + ((reiser4_context *) tsk->journal_info)->magic == context_magic); + return (reiser4_context *) tsk->journal_info; +} + +/* + * return reiser4 context of the current thread, or NULL if there is none. + */ +static inline reiser4_context *get_current_context_check(void) +{ + if (is_in_reiser4_context()) + return get_context(current); + else + return NULL; +} + +static inline reiser4_context *get_current_context(void); /* __attribute__((const)); */ + +/* return context associated with current thread */ +static inline reiser4_context *get_current_context(void) +{ + return get_context(current); +} + +static inline gfp_t reiser4_ctx_gfp_mask_get(void) +{ + reiser4_context *ctx; + + ctx = get_current_context_check(); + return (ctx == NULL) ? GFP_KERNEL : ctx->gfp_mask; +} + +void reiser4_ctx_gfp_mask_set(void); +void reiser4_ctx_gfp_mask_force (gfp_t mask); + +/* + * true if current thread is in the write-out mode. Thread enters write-out + * mode during jnode_flush and reiser4_write_logs(). + */ +static inline int is_writeout_mode(void) +{ + return get_current_context()->writeout_mode; +} + +/* + * enter write-out mode + */ +static inline void writeout_mode_enable(void) +{ + assert("zam-941", !get_current_context()->writeout_mode); + get_current_context()->writeout_mode = 1; +} + +/* + * leave write-out mode + */ +static inline void writeout_mode_disable(void) +{ + assert("zam-942", get_current_context()->writeout_mode); + get_current_context()->writeout_mode = 0; +} + +static inline void grab_space_enable(void) +{ + get_current_context()->grab_enabled = 1; +} + +static inline void grab_space_disable(void) +{ + get_current_context()->grab_enabled = 0; +} + +static inline void grab_space_set_enabled(int enabled) +{ + get_current_context()->grab_enabled = enabled; +} + +static inline int is_grab_enabled(reiser4_context * ctx) +{ + return ctx->grab_enabled; +} + +/* mark transaction handle in @ctx as TXNH_DONT_COMMIT, so that no commit or + * flush would be performed when it is closed. This is necessary when handle + * has to be closed under some coarse semaphore, like i_mutex of + * directory. Commit will be performed by ktxnmgrd. */ +static inline void context_set_commit_async(reiser4_context * context) +{ + context->nobalance = 1; + context->trans->flags |= TXNH_DONT_COMMIT; +} + +/* __REISER4_CONTEXT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/coord.c b/fs/reiser4/coord.c new file mode 100644 index 000000000000..5c34e0aba68d --- /dev/null +++ b/fs/reiser4/coord.c @@ -0,0 +1,928 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "tree.h" +#include "plugin/item/item.h" +#include "znode.h" +#include "coord.h" + +/* Internal constructor. */ +static inline void +coord_init_values(coord_t *coord, const znode * node, pos_in_node_t item_pos, + pos_in_node_t unit_pos, between_enum between) +{ + coord->node = (znode *) node; + coord_set_item_pos(coord, item_pos); + coord->unit_pos = unit_pos; + coord->between = between; + ON_DEBUG(coord->plug_v = 0); + ON_DEBUG(coord->body_v = 0); + + /*ON_TRACE (TRACE_COORDS, "init coord %p node %p: %u %u %s\n", coord, + node, item_pos, unit_pos, coord_tween_tostring (between)); */ +} + +/* after shifting of node content, coord previously set properly may become + invalid, try to "normalize" it. */ +void coord_normalize(coord_t *coord) +{ + znode *node; + + node = coord->node; + assert("vs-683", node); + + coord_clear_iplug(coord); + + if (node_is_empty(node)) { + coord_init_first_unit(coord, node); + } else if ((coord->between == AFTER_ITEM) + || (coord->between == AFTER_UNIT)) { + return; + } else if (coord->item_pos == coord_num_items(coord) + && coord->between == BEFORE_ITEM) { + coord_dec_item_pos(coord); + coord->between = AFTER_ITEM; + } else if (coord->unit_pos == coord_num_units(coord) + && coord->between == BEFORE_UNIT) { + coord->unit_pos--; + coord->between = AFTER_UNIT; + } else if (coord->item_pos == coord_num_items(coord) + && coord->unit_pos == 0 && coord->between == BEFORE_UNIT) { + coord_dec_item_pos(coord); + coord->unit_pos = 0; + coord->between = AFTER_ITEM; + } +} + +/* Copy a coordinate. */ +void coord_dup(coord_t *coord, const coord_t *old_coord) +{ + assert("jmacd-9800", coord_check(old_coord)); + coord_dup_nocheck(coord, old_coord); +} + +/* Copy a coordinate without check. Useful when old_coord->node is not + loaded. As in cbk_tree_lookup -> connect_znode -> connect_one_side */ +void coord_dup_nocheck(coord_t *coord, const coord_t *old_coord) +{ + coord->node = old_coord->node; + coord_set_item_pos(coord, old_coord->item_pos); + coord->unit_pos = old_coord->unit_pos; + coord->between = old_coord->between; + coord->iplugid = old_coord->iplugid; + ON_DEBUG(coord->plug_v = old_coord->plug_v); + ON_DEBUG(coord->body_v = old_coord->body_v); +} + +/* Initialize an invalid coordinate. */ +void coord_init_invalid(coord_t *coord, const znode * node) +{ + coord_init_values(coord, node, 0, 0, INVALID_COORD); +} + +void coord_init_first_unit_nocheck(coord_t *coord, const znode * node) +{ + coord_init_values(coord, node, 0, 0, AT_UNIT); +} + +/* Initialize a coordinate to point at the first unit of the first item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +void coord_init_first_unit(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, 0, 0, (is_empty ? EMPTY_NODE : AT_UNIT)); + + assert("jmacd-9801", coord_check(coord)); +} + +/* Initialize a coordinate to point at the last unit of the last item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +void coord_init_last_unit(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, + (is_empty ? 0 : node_num_items(node) - 1), 0, + (is_empty ? EMPTY_NODE : AT_UNIT)); + if (!is_empty) + coord->unit_pos = coord_last_unit_pos(coord); + assert("jmacd-9802", coord_check(coord)); +} + +/* Initialize a coordinate to before the first item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +void coord_init_before_first_item(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, 0, 0, + (is_empty ? EMPTY_NODE : BEFORE_UNIT)); + + assert("jmacd-9803", coord_check(coord)); +} + +/* Initialize a coordinate to after the last item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +void coord_init_after_last_item(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, + (is_empty ? 0 : node_num_items(node) - 1), 0, + (is_empty ? EMPTY_NODE : AFTER_ITEM)); + + assert("jmacd-9804", coord_check(coord)); +} + +/* Initialize a coordinate to after last unit in the item. Coord must be set + already to existing item */ +void coord_init_after_item_end(coord_t *coord) +{ + coord->between = AFTER_UNIT; + coord->unit_pos = coord_last_unit_pos(coord); +} + +/* Initialize a coordinate to before the item. Coord must be set already to + existing item */ +void coord_init_before_item(coord_t *coord) +{ + coord->unit_pos = 0; + coord->between = BEFORE_ITEM; +} + +/* Initialize a coordinate to after the item. Coord must be set already to + existing item */ +void coord_init_after_item(coord_t *coord) +{ + coord->unit_pos = 0; + coord->between = AFTER_ITEM; +} + +/* Initialize a coordinate by 0s. Used in places where init_coord was used and + it was not clear how actually */ +void coord_init_zero(coord_t *coord) +{ + memset(coord, 0, sizeof(*coord)); +} + +/* Return the number of units at the present item. + Asserts coord_is_existing_item(). */ +unsigned coord_num_units(const coord_t *coord) +{ + assert("jmacd-9806", coord_is_existing_item(coord)); + + return item_plugin_by_coord(coord)->b.nr_units(coord); +} + +/* Returns true if the coord was initializewd by coord_init_invalid (). */ +/* Audited by: green(2002.06.15) */ +int coord_is_invalid(const coord_t *coord) +{ + return coord->between == INVALID_COORD; +} + +/* Returns true if the coordinate is positioned at an existing item, not before + or after an item. It may be placed at, before, or after any unit within the + item, whether existing or not. */ +int coord_is_existing_item(const coord_t *coord) +{ + switch (coord->between) { + case EMPTY_NODE: + case BEFORE_ITEM: + case AFTER_ITEM: + case INVALID_COORD: + return 0; + + case BEFORE_UNIT: + case AT_UNIT: + case AFTER_UNIT: + return coord->item_pos < coord_num_items(coord); + } + + impossible("jmacd-9900", "unreachable coord: %p", coord); + return 0; +} + +/* Returns true if the coordinate is positioned at an existing unit, not before + or after a unit. */ +/* Audited by: green(2002.06.15) */ +int coord_is_existing_unit(const coord_t *coord) +{ + switch (coord->between) { + case EMPTY_NODE: + case BEFORE_UNIT: + case AFTER_UNIT: + case BEFORE_ITEM: + case AFTER_ITEM: + case INVALID_COORD: + return 0; + + case AT_UNIT: + return (coord->item_pos < coord_num_items(coord) + && coord->unit_pos < coord_num_units(coord)); + } + + impossible("jmacd-9902", "unreachable"); + return 0; +} + +/* Returns true if the coordinate is positioned at the first unit of the first + item. Not true for empty nodes nor coordinates positioned before the first + item. */ +/* Audited by: green(2002.06.15) */ +int coord_is_leftmost_unit(const coord_t *coord) +{ + return (coord->between == AT_UNIT && coord->item_pos == 0 + && coord->unit_pos == 0); +} + +#if REISER4_DEBUG +/* For assertions only, checks for a valid coordinate. */ +int coord_check(const coord_t *coord) +{ + if (coord->node == NULL) + return 0; + if (znode_above_root(coord->node)) + return 1; + + switch (coord->between) { + default: + case INVALID_COORD: + return 0; + case EMPTY_NODE: + if (!node_is_empty(coord->node)) + return 0; + return coord->item_pos == 0 && coord->unit_pos == 0; + + case BEFORE_UNIT: + case AFTER_UNIT: + if (node_is_empty(coord->node) && (coord->item_pos == 0) + && (coord->unit_pos == 0)) + return 1; + case AT_UNIT: + break; + case AFTER_ITEM: + case BEFORE_ITEM: + /* before/after item should not set unit_pos. */ + if (coord->unit_pos != 0) + return 0; + break; + } + + if (coord->item_pos >= node_num_items(coord->node)) + return 0; + + /* FIXME-VS: we are going to check unit_pos. This makes no sense when + between is set either AFTER_ITEM or BEFORE_ITEM */ + if (coord->between == AFTER_ITEM || coord->between == BEFORE_ITEM) + return 1; + + if (coord_is_iplug_set(coord) && + coord->unit_pos > + item_plugin_by_coord(coord)->b.nr_units(coord) - 1) + return 0; + return 1; +} +#endif + +/* Adjust coordinate boundaries based on the number of items prior to + coord_next/prev. Returns 1 if the new position is does not exist. */ +static int coord_adjust_items(coord_t *coord, unsigned items, int is_next) +{ + /* If the node is invalid, leave it. */ + if (coord->between == INVALID_COORD) + return 1; + + /* If the node is empty, set it appropriately. */ + if (items == 0) { + coord->between = EMPTY_NODE; + coord_set_item_pos(coord, 0); + coord->unit_pos = 0; + return 1; + } + + /* If it was empty and it no longer is, set to BEFORE/AFTER_ITEM. */ + if (coord->between == EMPTY_NODE) { + coord->between = (is_next ? BEFORE_ITEM : AFTER_ITEM); + coord_set_item_pos(coord, 0); + coord->unit_pos = 0; + return 0; + } + + /* If the item_pos is out-of-range, set it appropriatly. */ + if (coord->item_pos >= items) { + coord->between = AFTER_ITEM; + coord_set_item_pos(coord, items - 1); + coord->unit_pos = 0; + /* If is_next, return 1 (can't go any further). */ + return is_next; + } + + return 0; +} + +/* Advances the coordinate by one unit to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing unit. */ +int coord_next_unit(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 1) == 1) + return 1; + + switch (coord->between) { + case BEFORE_UNIT: + /* Now it is positioned at the same unit. */ + coord->between = AT_UNIT; + return 0; + + case AFTER_UNIT: + case AT_UNIT: + /* If it was at or after a unit and there are more units in this + item, advance to the next one. */ + if (coord->unit_pos < coord_last_unit_pos(coord)) { + coord->unit_pos += 1; + coord->between = AT_UNIT; + return 0; + } + + /* Otherwise, it is crossing an item boundary and treated as if + it was after the current item. */ + coord->between = AFTER_ITEM; + coord->unit_pos = 0; + /* FALLTHROUGH */ + + case AFTER_ITEM: + /* Check for end-of-node. */ + if (coord->item_pos == items - 1) + return 1; + + coord_inc_item_pos(coord); + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case BEFORE_ITEM: + /* The adjust_items checks ensure that we are valid here. */ + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case INVALID_COORD: + case EMPTY_NODE: + /* Handled in coord_adjust_items(). */ + break; + } + + impossible("jmacd-9902", "unreachable"); + return 0; +} + +/* Advances the coordinate by one item to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing item. */ +int coord_next_item(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 1) == 1) + return 1; + + switch (coord->between) { + case AFTER_UNIT: + case AT_UNIT: + case BEFORE_UNIT: + case AFTER_ITEM: + /* Check for end-of-node. */ + if (coord->item_pos == items - 1) { + coord->between = AFTER_ITEM; + coord->unit_pos = 0; + coord_clear_iplug(coord); + return 1; + } + + /* Anywhere in an item, go to the next one. */ + coord->between = AT_UNIT; + coord_inc_item_pos(coord); + coord->unit_pos = 0; + return 0; + + case BEFORE_ITEM: + /* The out-of-range check ensures that we are valid here. */ + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + case INVALID_COORD: + case EMPTY_NODE: + /* Handled in coord_adjust_items(). */ + break; + } + + impossible("jmacd-9903", "unreachable"); + return 0; +} + +/* Advances the coordinate by one unit to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing unit. */ +int coord_prev_unit(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 0) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + case BEFORE_UNIT: + if (coord->unit_pos > 0) { + coord->unit_pos -= 1; + coord->between = AT_UNIT; + return 0; + } + + if (coord->item_pos == 0) { + coord->between = BEFORE_ITEM; + return 1; + } + + coord_dec_item_pos(coord); + coord->unit_pos = coord_last_unit_pos(coord); + coord->between = AT_UNIT; + return 0; + + case AFTER_UNIT: + /* What if unit_pos is out-of-range? */ + assert("jmacd-5442", + coord->unit_pos <= coord_last_unit_pos(coord)); + coord->between = AT_UNIT; + return 0; + + case BEFORE_ITEM: + if (coord->item_pos == 0) + return 1; + + coord_dec_item_pos(coord); + /* FALLTHROUGH */ + + case AFTER_ITEM: + coord->between = AT_UNIT; + coord->unit_pos = coord_last_unit_pos(coord); + return 0; + + case INVALID_COORD: + case EMPTY_NODE: + break; + } + + impossible("jmacd-9904", "unreachable"); + return 0; +} + +/* Advances the coordinate by one item to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing item. */ +int coord_prev_item(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 0) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + case AFTER_UNIT: + case BEFORE_UNIT: + case BEFORE_ITEM: + + if (coord->item_pos == 0) { + coord->between = BEFORE_ITEM; + coord->unit_pos = 0; + return 1; + } + + coord_dec_item_pos(coord); + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case AFTER_ITEM: + coord->between = AT_UNIT; + coord->unit_pos = 0; + return 0; + + case INVALID_COORD: + case EMPTY_NODE: + break; + } + + impossible("jmacd-9905", "unreachable"); + return 0; +} + +/* Calls either coord_init_first_unit or coord_init_last_unit depending on + sideof argument. */ +void coord_init_sideof_unit(coord_t *coord, const znode * node, sideof dir) +{ + assert("jmacd-9821", dir == LEFT_SIDE || dir == RIGHT_SIDE); + if (dir == LEFT_SIDE) { + coord_init_first_unit(coord, node); + } else { + coord_init_last_unit(coord, node); + } +} + +/* Calls either coord_is_before_leftmost or coord_is_after_rightmost depending + on sideof argument. */ +/* Audited by: green(2002.06.15) */ +int coord_is_after_sideof_unit(coord_t *coord, sideof dir) +{ + assert("jmacd-9822", dir == LEFT_SIDE || dir == RIGHT_SIDE); + if (dir == LEFT_SIDE) { + return coord_is_before_leftmost(coord); + } else { + return coord_is_after_rightmost(coord); + } +} + +/* Calls either coord_next_unit or coord_prev_unit depending on sideof argument. + */ +/* Audited by: green(2002.06.15) */ +int coord_sideof_unit(coord_t *coord, sideof dir) +{ + assert("jmacd-9823", dir == LEFT_SIDE || dir == RIGHT_SIDE); + if (dir == LEFT_SIDE) { + return coord_prev_unit(coord); + } else { + return coord_next_unit(coord); + } +} + +#if REISER4_DEBUG +int coords_equal(const coord_t *c1, const coord_t *c2) +{ + assert("nikita-2840", c1 != NULL); + assert("nikita-2841", c2 != NULL); + + return + c1->node == c2->node && + c1->item_pos == c2->item_pos && + c1->unit_pos == c2->unit_pos && c1->between == c2->between; +} +#endif /* REISER4_DEBUG */ + +/* If coord_is_after_rightmost return NCOORD_ON_THE_RIGHT, if + coord_is_after_leftmost return NCOORD_ON_THE_LEFT, otherwise return + NCOORD_INSIDE. */ +/* Audited by: green(2002.06.15) */ +coord_wrt_node coord_wrt(const coord_t *coord) +{ + if (coord_is_before_leftmost(coord)) + return COORD_ON_THE_LEFT; + + if (coord_is_after_rightmost(coord)) + return COORD_ON_THE_RIGHT; + + return COORD_INSIDE; +} + +/* Returns true if the coordinate is positioned after the last item or after the + last unit of the last item or it is an empty node. */ +/* Audited by: green(2002.06.15) */ +int coord_is_after_rightmost(const coord_t *coord) +{ + assert("jmacd-7313", coord_check(coord)); + + switch (coord->between) { + case INVALID_COORD: + case AT_UNIT: + case BEFORE_UNIT: + case BEFORE_ITEM: + return 0; + + case EMPTY_NODE: + return 1; + + case AFTER_ITEM: + return (coord->item_pos == node_num_items(coord->node) - 1); + + case AFTER_UNIT: + return ((coord->item_pos == node_num_items(coord->node) - 1) && + coord->unit_pos == coord_last_unit_pos(coord)); + } + + impossible("jmacd-9908", "unreachable"); + return 0; +} + +/* Returns true if the coordinate is positioned before the first item or it is + an empty node. */ +int coord_is_before_leftmost(const coord_t *coord) +{ + /* FIXME-VS: coord_check requires node to be loaded whereas it is not + necessary to check if coord is set before leftmost + assert ("jmacd-7313", coord_check (coord)); */ + switch (coord->between) { + case INVALID_COORD: + case AT_UNIT: + case AFTER_ITEM: + case AFTER_UNIT: + return 0; + + case EMPTY_NODE: + return 1; + + case BEFORE_ITEM: + case BEFORE_UNIT: + return (coord->item_pos == 0) && (coord->unit_pos == 0); + } + + impossible("jmacd-9908", "unreachable"); + return 0; +} + +/* Returns true if the coordinate is positioned after a item, before a item, + after the last unit of an item, before the first unit of an item, or at an + empty node. */ +/* Audited by: green(2002.06.15) */ +int coord_is_between_items(const coord_t *coord) +{ + assert("jmacd-7313", coord_check(coord)); + + switch (coord->between) { + case INVALID_COORD: + case AT_UNIT: + return 0; + + case AFTER_ITEM: + case BEFORE_ITEM: + case EMPTY_NODE: + return 1; + + case BEFORE_UNIT: + return coord->unit_pos == 0; + + case AFTER_UNIT: + return coord->unit_pos == coord_last_unit_pos(coord); + } + + impossible("jmacd-9908", "unreachable"); + return 0; +} + +#if REISER4_DEBUG +/* Returns true if the coordinates are positioned at adjacent units, regardless + of before-after or item boundaries. */ +int coord_are_neighbors(coord_t *c1, coord_t *c2) +{ + coord_t *left; + coord_t *right; + + assert("nikita-1241", c1 != NULL); + assert("nikita-1242", c2 != NULL); + assert("nikita-1243", c1->node == c2->node); + assert("nikita-1244", coord_is_existing_unit(c1)); + assert("nikita-1245", coord_is_existing_unit(c2)); + + left = right = NULL; + switch (coord_compare(c1, c2)) { + case COORD_CMP_ON_LEFT: + left = c1; + right = c2; + break; + case COORD_CMP_ON_RIGHT: + left = c2; + right = c1; + break; + case COORD_CMP_SAME: + return 0; + default: + wrong_return_value("nikita-1246", "compare_coords()"); + } + assert("vs-731", left && right); + if (left->item_pos == right->item_pos) { + return left->unit_pos + 1 == right->unit_pos; + } else if (left->item_pos + 1 == right->item_pos) { + return (left->unit_pos == coord_last_unit_pos(left)) + && (right->unit_pos == 0); + } else { + return 0; + } +} +#endif /* REISER4_DEBUG */ + +/* Assuming two coordinates are positioned in the same node, return + COORD_CMP_ON_RIGHT, COORD_CMP_ON_LEFT, or COORD_CMP_SAME depending on c1's + position relative to c2. */ +/* Audited by: green(2002.06.15) */ +coord_cmp coord_compare(coord_t *c1, coord_t *c2) +{ + assert("vs-209", c1->node == c2->node); + assert("vs-194", coord_is_existing_unit(c1) + && coord_is_existing_unit(c2)); + + if (c1->item_pos > c2->item_pos) + return COORD_CMP_ON_RIGHT; + if (c1->item_pos < c2->item_pos) + return COORD_CMP_ON_LEFT; + if (c1->unit_pos > c2->unit_pos) + return COORD_CMP_ON_RIGHT; + if (c1->unit_pos < c2->unit_pos) + return COORD_CMP_ON_LEFT; + return COORD_CMP_SAME; +} + +/* If the coordinate is between items, shifts it to the right. Returns 0 on + success and non-zero if there is no position to the right. */ +int coord_set_to_right(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 1) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + return 0; + + case BEFORE_ITEM: + case BEFORE_UNIT: + coord->between = AT_UNIT; + return 0; + + case AFTER_UNIT: + if (coord->unit_pos < coord_last_unit_pos(coord)) { + coord->unit_pos += 1; + coord->between = AT_UNIT; + return 0; + } else { + + coord->unit_pos = 0; + + if (coord->item_pos == items - 1) { + coord->between = AFTER_ITEM; + return 1; + } + + coord_inc_item_pos(coord); + coord->between = AT_UNIT; + return 0; + } + + case AFTER_ITEM: + if (coord->item_pos == items - 1) + return 1; + + coord_inc_item_pos(coord); + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case EMPTY_NODE: + return 1; + + case INVALID_COORD: + break; + } + + impossible("jmacd-9920", "unreachable"); + return 0; +} + +/* If the coordinate is between items, shifts it to the left. Returns 0 on + success and non-zero if there is no position to the left. */ +int coord_set_to_left(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 0) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + return 0; + + case AFTER_UNIT: + coord->between = AT_UNIT; + return 0; + + case AFTER_ITEM: + coord->between = AT_UNIT; + coord->unit_pos = coord_last_unit_pos(coord); + return 0; + + case BEFORE_UNIT: + if (coord->unit_pos > 0) { + coord->unit_pos -= 1; + coord->between = AT_UNIT; + return 0; + } else { + + if (coord->item_pos == 0) { + coord->between = BEFORE_ITEM; + return 1; + } + + coord->unit_pos = coord_last_unit_pos(coord); + coord_dec_item_pos(coord); + coord->between = AT_UNIT; + return 0; + } + + case BEFORE_ITEM: + if (coord->item_pos == 0) + return 1; + + coord_dec_item_pos(coord); + coord->unit_pos = coord_last_unit_pos(coord); + coord->between = AT_UNIT; + return 0; + + case EMPTY_NODE: + return 1; + + case INVALID_COORD: + break; + } + + impossible("jmacd-9920", "unreachable"); + return 0; +} + +static const char *coord_tween_tostring(between_enum n) +{ + switch (n) { + case BEFORE_UNIT: + return "before unit"; + case BEFORE_ITEM: + return "before item"; + case AT_UNIT: + return "at unit"; + case AFTER_UNIT: + return "after unit"; + case AFTER_ITEM: + return "after item"; + case EMPTY_NODE: + return "empty node"; + case INVALID_COORD: + return "invalid"; + default: + { + static char buf[30]; + + sprintf(buf, "unknown: %i", n); + return buf; + } + } +} + +void print_coord(const char *mes, const coord_t *coord, int node) +{ + if (coord == NULL) { + printk("%s: null\n", mes); + return; + } + printk("%s: item_pos = %d, unit_pos %d, tween=%s, iplug=%d\n", + mes, coord->item_pos, coord->unit_pos, + coord_tween_tostring(coord->between), coord->iplugid); +} + +int +item_utmost_child_real_block(const coord_t *coord, sideof side, + reiser4_block_nr * blk) +{ + return item_plugin_by_coord(coord)->f.utmost_child_real_block(coord, + side, + blk); +} + +int item_utmost_child(const coord_t *coord, sideof side, jnode ** child) +{ + return item_plugin_by_coord(coord)->f.utmost_child(coord, side, child); +} + +/* @count bytes of flow @f got written, update correspondingly f->length, + f->data and f->key */ +void move_flow_forward(flow_t *f, unsigned count) +{ + if (f->data) + f->data += count; + f->length -= count; + set_key_offset(&f->key, get_key_offset(&f->key) + count); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/coord.h b/fs/reiser4/coord.h new file mode 100644 index 000000000000..a1dd724fc464 --- /dev/null +++ b/fs/reiser4/coord.h @@ -0,0 +1,399 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Coords */ + +#if !defined(__REISER4_COORD_H__) +#define __REISER4_COORD_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" + +/* insertions happen between coords in the tree, so we need some means + of specifying the sense of betweenness. */ +typedef enum { + BEFORE_UNIT, /* Note: we/init_coord depends on this value being zero. */ + AT_UNIT, + AFTER_UNIT, + BEFORE_ITEM, + AFTER_ITEM, + INVALID_COORD, + EMPTY_NODE, +} between_enum; + +/* location of coord w.r.t. its node */ +typedef enum { + COORD_ON_THE_LEFT = -1, + COORD_ON_THE_RIGHT = +1, + COORD_INSIDE = 0 +} coord_wrt_node; + +typedef enum { + COORD_CMP_SAME = 0, COORD_CMP_ON_LEFT = -1, COORD_CMP_ON_RIGHT = +1 +} coord_cmp; + +struct coord { + /* node in a tree */ + /* 0 */ znode *node; + + /* position of item within node */ + /* 4 */ pos_in_node_t item_pos; + /* position of unit within item */ + /* 6 */ pos_in_node_t unit_pos; + /* optimization: plugin of item is stored in coord_t. Until this was + implemented, item_plugin_by_coord() was major CPU consumer. ->iplugid + is invalidated (set to 0xff) on each modification of ->item_pos, + and all such modifications are funneled through coord_*_item_pos() + functions below. + */ + /* 8 */ char iplugid; + /* position of coord w.r.t. to neighboring items and/or units. + Values are taken from &between_enum above. + */ + /* 9 */ char between; + /* padding. It will be added by the compiler anyway to conform to the + * C language alignment requirements. We keep it here to be on the + * safe side and to have a clear picture of the memory layout of this + * structure. */ + /* 10 */ __u16 pad; + /* 12 */ int offset; +#if REISER4_DEBUG + unsigned long plug_v; + unsigned long body_v; +#endif +}; + +#define INVALID_PLUGID ((char)((1 << 8) - 1)) +#define INVALID_OFFSET -1 + +static inline void coord_clear_iplug(coord_t *coord) +{ + assert("nikita-2835", coord != NULL); + coord->iplugid = INVALID_PLUGID; + coord->offset = INVALID_OFFSET; +} + +static inline int coord_is_iplug_set(const coord_t *coord) +{ + assert("nikita-2836", coord != NULL); + return coord->iplugid != INVALID_PLUGID; +} + +static inline void coord_set_item_pos(coord_t *coord, pos_in_node_t pos) +{ + assert("nikita-2478", coord != NULL); + coord->item_pos = pos; + coord_clear_iplug(coord); +} + +static inline void coord_dec_item_pos(coord_t *coord) +{ + assert("nikita-2480", coord != NULL); + --coord->item_pos; + coord_clear_iplug(coord); +} + +static inline void coord_inc_item_pos(coord_t *coord) +{ + assert("nikita-2481", coord != NULL); + ++coord->item_pos; + coord_clear_iplug(coord); +} + +static inline void coord_add_item_pos(coord_t *coord, int delta) +{ + assert("nikita-2482", coord != NULL); + coord->item_pos += delta; + coord_clear_iplug(coord); +} + +static inline void coord_invalid_item_pos(coord_t *coord) +{ + assert("nikita-2832", coord != NULL); + coord->item_pos = (unsigned short)~0; + coord_clear_iplug(coord); +} + +/* Reverse a direction. */ +static inline sideof sideof_reverse(sideof side) +{ + return side == LEFT_SIDE ? RIGHT_SIDE : LEFT_SIDE; +} + +/* NOTE: There is a somewhat odd mixture of the following opposed terms: + + "first" and "last" + "next" and "prev" + "before" and "after" + "leftmost" and "rightmost" + + But I think the chosen names are decent the way they are. +*/ + +/* COORD INITIALIZERS */ + +/* Initialize an invalid coordinate. */ +extern void coord_init_invalid(coord_t *coord, const znode * node); + +extern void coord_init_first_unit_nocheck(coord_t *coord, const znode * node); + +/* Initialize a coordinate to point at the first unit of the first item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +extern void coord_init_first_unit(coord_t *coord, const znode * node); + +/* Initialize a coordinate to point at the last unit of the last item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +extern void coord_init_last_unit(coord_t *coord, const znode * node); + +/* Initialize a coordinate to before the first item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +extern void coord_init_before_first_item(coord_t *coord, const znode * node); + +/* Initialize a coordinate to after the last item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +extern void coord_init_after_last_item(coord_t *coord, const znode * node); + +/* Initialize a coordinate to after last unit in the item. Coord must be set + already to existing item */ +void coord_init_after_item_end(coord_t *coord); + +/* Initialize a coordinate to before the item. Coord must be set already to + existing item */ +void coord_init_before_item(coord_t *); +/* Initialize a coordinate to after the item. Coord must be set already to + existing item */ +void coord_init_after_item(coord_t *); + +/* Calls either coord_init_first_unit or coord_init_last_unit depending on + sideof argument. */ +extern void coord_init_sideof_unit(coord_t *coord, const znode * node, + sideof dir); + +/* Initialize a coordinate by 0s. Used in places where init_coord was used and + it was not clear how actually + FIXME-VS: added by vs (2002, june, 8) */ +extern void coord_init_zero(coord_t *coord); + +/* COORD METHODS */ + +/* after shifting of node content, coord previously set properly may become + invalid, try to "normalize" it. */ +void coord_normalize(coord_t *coord); + +/* Copy a coordinate. */ +extern void coord_dup(coord_t *coord, const coord_t *old_coord); + +/* Copy a coordinate without check. */ +void coord_dup_nocheck(coord_t *coord, const coord_t *old_coord); + +unsigned coord_num_units(const coord_t *coord); + +/* Return the last valid unit number at the present item (i.e., + coord_num_units() - 1). */ +static inline unsigned coord_last_unit_pos(const coord_t *coord) +{ + return coord_num_units(coord) - 1; +} + +#if REISER4_DEBUG +/* For assertions only, checks for a valid coordinate. */ +extern int coord_check(const coord_t *coord); + +extern unsigned long znode_times_locked(const znode * z); + +static inline void coord_update_v(coord_t *coord) +{ + coord->plug_v = coord->body_v = znode_times_locked(coord->node); +} +#endif + +extern int coords_equal(const coord_t *c1, const coord_t *c2); + +extern void print_coord(const char *mes, const coord_t *coord, int print_node); + +/* If coord_is_after_rightmost return NCOORD_ON_THE_RIGHT, if + coord_is_after_leftmost return NCOORD_ON_THE_LEFT, otherwise return + NCOORD_INSIDE. */ +extern coord_wrt_node coord_wrt(const coord_t *coord); + +/* Returns true if the coordinates are positioned at adjacent units, regardless + of before-after or item boundaries. */ +extern int coord_are_neighbors(coord_t *c1, coord_t *c2); + +/* Assuming two coordinates are positioned in the same node, return + NCOORD_CMP_ON_RIGHT, NCOORD_CMP_ON_LEFT, or NCOORD_CMP_SAME depending on c1's + position relative to c2. */ +extern coord_cmp coord_compare(coord_t *c1, coord_t *c2); + +/* COORD PREDICATES */ + +/* Returns true if the coord was initializewd by coord_init_invalid (). */ +extern int coord_is_invalid(const coord_t *coord); + +/* Returns true if the coordinate is positioned at an existing item, not before + or after an item. It may be placed at, before, or after any unit within the + item, whether existing or not. If this is true you can call methods of the + item plugin. */ +extern int coord_is_existing_item(const coord_t *coord); + +/* Returns true if the coordinate is positioned after a item, before a item, + after the last unit of an item, before the first unit of an item, or at an + empty node. */ +extern int coord_is_between_items(const coord_t *coord); + +/* Returns true if the coordinate is positioned at an existing unit, not before + or after a unit. */ +extern int coord_is_existing_unit(const coord_t *coord); + +/* Returns true if the coordinate is positioned at an empty node. */ +extern int coord_is_empty(const coord_t *coord); + +/* Returns true if the coordinate is positioned at the first unit of the first + item. Not true for empty nodes nor coordinates positioned before the first + item. */ +extern int coord_is_leftmost_unit(const coord_t *coord); + +/* Returns true if the coordinate is positioned after the last item or after the + last unit of the last item or it is an empty node. */ +extern int coord_is_after_rightmost(const coord_t *coord); + +/* Returns true if the coordinate is positioned before the first item or it is + an empty node. */ +extern int coord_is_before_leftmost(const coord_t *coord); + +/* Calls either coord_is_before_leftmost or coord_is_after_rightmost depending + on sideof argument. */ +extern int coord_is_after_sideof_unit(coord_t *coord, sideof dir); + +/* COORD MODIFIERS */ + +/* Advances the coordinate by one unit to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing unit. */ +extern int coord_next_unit(coord_t *coord); + +/* Advances the coordinate by one item to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing item. */ +extern int coord_next_item(coord_t *coord); + +/* Advances the coordinate by one unit to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing unit. */ +extern int coord_prev_unit(coord_t *coord); + +/* Advances the coordinate by one item to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing item. */ +extern int coord_prev_item(coord_t *coord); + +/* If the coordinate is between items, shifts it to the right. Returns 0 on + success and non-zero if there is no position to the right. */ +extern int coord_set_to_right(coord_t *coord); + +/* If the coordinate is between items, shifts it to the left. Returns 0 on + success and non-zero if there is no position to the left. */ +extern int coord_set_to_left(coord_t *coord); + +/* If the coordinate is at an existing unit, set to after that unit. Returns 0 + on success and non-zero if the unit did not exist. */ +extern int coord_set_after_unit(coord_t *coord); + +/* Calls either coord_next_unit or coord_prev_unit depending on sideof + argument. */ +extern int coord_sideof_unit(coord_t *coord, sideof dir); + +/* iterate over all units in @node */ +#define for_all_units(coord, node) \ + for (coord_init_before_first_item((coord), (node)) ; \ + coord_next_unit(coord) == 0 ;) + +/* iterate over all items in @node */ +#define for_all_items(coord, node) \ + for (coord_init_before_first_item((coord), (node)) ; \ + coord_next_item(coord) == 0 ;) + +/* COORD/ITEM METHODS */ + +extern int item_utmost_child_real_block(const coord_t *coord, sideof side, + reiser4_block_nr * blk); +extern int item_utmost_child(const coord_t *coord, sideof side, + jnode ** child); + +/* a flow is a sequence of bytes being written to or read from the tree. The + tree will slice the flow into items while storing it into nodes, but all of + that is hidden from anything outside the tree. */ + +struct flow { + reiser4_key key; /* key of start of flow's sequence of bytes */ + loff_t length; /* length of flow's sequence of bytes */ + char *data; /* start of flow's sequence of bytes */ + int user; /* if 1 data is user space, 0 - kernel space */ + rw_op op; /* NIKITA-FIXME-HANS: comment is where? */ +}; + +void move_flow_forward(flow_t *f, unsigned count); + +/* &reiser4_item_data - description of data to be inserted or pasted + + Q: articulate the reasons for the difference between this and flow. + + A: Becides flow we insert into tree other things: stat data, directory + entry, etc. To insert them into tree one has to provide this structure. If + one is going to insert flow - he can use insert_flow, where this structure + does not have to be created +*/ +struct reiser4_item_data { + /* actual data to be inserted. If NULL, ->create_item() will not + do xmemcpy itself, leaving this up to the caller. This can + save some amount of unnecessary memory copying, for example, + during insertion of stat data. + + */ + char *data; + /* 1 if 'char * data' contains pointer to user space and 0 if it is + kernel space */ + int user; + /* amount of data we are going to insert or paste */ + int length; + /* "Arg" is opaque data that is passed down to the + ->create_item() method of node layout, which in turn + hands it to the ->create_hook() of item being created. This + arg is currently used by: + + . ->create_hook() of internal item + (fs/reiser4/plugin/item/internal.c:internal_create_hook()), + . ->paste() method of directory item. + . ->create_hook() of extent item + + For internal item, this is left "brother" of new node being + inserted and it is used to add new node into sibling list + after parent to it was just inserted into parent. + + While ->arg does look somewhat of unnecessary compication, + it actually saves a lot of headache in many places, because + all data necessary to insert or paste new data into tree are + collected in one place, and this eliminates a lot of extra + argument passing and storing everywhere. + + */ + void *arg; + /* plugin of item we are inserting */ + item_plugin *iplug; +}; + +/* __REISER4_COORD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/debug.c b/fs/reiser4/debug.c new file mode 100644 index 000000000000..96c95085e81a --- /dev/null +++ b/fs/reiser4/debug.c @@ -0,0 +1,309 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Debugging facilities. */ + +/* + * This file contains generic debugging functions used by reiser4. Roughly + * following: + * + * panicking: reiser4_do_panic(), reiser4_print_prefix(). + * + * locking: + * reiser4_schedulable(), reiser4_lock_counters(), print_lock_counters(), + * reiser4_no_counters_are_held(), reiser4_commit_check_locks() + * + * error code monitoring (see comment before RETERR macro): + * reiser4_return_err(), reiser4_report_err(). + * + * stack back-tracing: fill_backtrace() + * + * miscellaneous: reiser4_preempt_point(), call_on_each_assert(), + * reiser4_debugtrap(). + * + */ + +#include "reiser4.h" +#include "context.h" +#include "super.h" +#include "txnmgr.h" +#include "znode.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* signal_pending() */ + +#if 0 +#if REISER4_DEBUG +static void reiser4_report_err(void); +#else +#define reiser4_report_err() noop +#endif +#endif /* 0 */ + +/* + * global buffer where message given to reiser4_panic is formatted. + */ +static char panic_buf[REISER4_PANIC_MSG_BUFFER_SIZE]; + +/* + * lock protecting consistency of panic_buf under concurrent panics + */ +static DEFINE_SPINLOCK(panic_guard); + +/* Your best friend. Call it on each occasion. This is called by + fs/reiser4/debug.h:reiser4_panic(). */ +void reiser4_do_panic(const char *format/* format string */ , ... /* rest */) +{ + static int in_panic = 0; + va_list args; + + /* + * check for recursive panic. + */ + if (in_panic == 0) { + in_panic = 1; + + spin_lock(&panic_guard); + va_start(args, format); + vsnprintf(panic_buf, sizeof(panic_buf), format, args); + va_end(args); + printk(KERN_EMERG "reiser4 panicked cowardly: %s", panic_buf); + spin_unlock(&panic_guard); + + /* + * if kernel debugger is configured---drop in. Early dropping + * into kgdb is not always convenient, because panic message + * is not yet printed most of the times. But: + * + * (1) message can be extracted from printk_buf[] + * (declared static inside of printk()), and + * + * (2) sometimes serial/kgdb combo dies while printing + * long panic message, so it's more prudent to break into + * debugger earlier. + * + */ + DEBUGON(1); + } + /* to make gcc happy about noreturn attribute */ + panic("%s", panic_buf); +} + +#if 0 +void +reiser4_print_prefix(const char *level, int reperr, const char *mid, + const char *function, const char *file, int lineno) +{ + const char *comm; + int pid; + + if (unlikely(in_interrupt() || in_irq())) { + comm = "interrupt"; + pid = 0; + } else { + comm = current->comm; + pid = current->pid; + } + printk("%sreiser4[%.16s(%i)]: %s (%s:%i)[%s]:\n", + level, comm, pid, function, file, lineno, mid); + if (reperr) + reiser4_report_err(); +} +#endif /* 0 */ + +/* Preemption point: this should be called periodically during long running + operations (carry, allocate, and squeeze are best examples) */ +int reiser4_preempt_point(void) +{ + assert("nikita-3008", reiser4_schedulable()); + cond_resched(); + return signal_pending(current); +} + +#if REISER4_DEBUG +/* Debugging aid: return struct where information about locks taken by current + thread is accumulated. This can be used to formulate lock ordering + constraints and various assertions. + +*/ +reiser4_lock_cnt_info *reiser4_lock_counters(void) +{ + reiser4_context *ctx = get_current_context(); + assert("jmacd-1123", ctx != NULL); + return &ctx->locks; +} + +/* + * print human readable information about locks held by the reiser4 context. + */ +static void print_lock_counters(const char *prefix, + const reiser4_lock_cnt_info * info) +{ + printk("%s: jnode: %i, tree: %i (r:%i,w:%i), dk: %i (r:%i,w:%i)\n" + "jload: %i, " + "txnh: %i, atom: %i, stack: %i, txnmgr: %i, " + "ktxnmgrd: %i, fq: %i\n" + "inode: %i, " + "cbk_cache: %i (r:%i,w%i), " + "eflush: %i, " + "zlock: %i,\n" + "spin: %i, long: %i inode_sem: (r:%i,w:%i)\n" + "d: %i, x: %i, t: %i\n", prefix, + info->spin_locked_jnode, + info->rw_locked_tree, info->read_locked_tree, + info->write_locked_tree, + info->rw_locked_dk, info->read_locked_dk, info->write_locked_dk, + info->spin_locked_jload, + info->spin_locked_txnh, + info->spin_locked_atom, info->spin_locked_stack, + info->spin_locked_txnmgr, info->spin_locked_ktxnmgrd, + info->spin_locked_fq, + info->spin_locked_inode, + info->rw_locked_cbk_cache, + info->read_locked_cbk_cache, + info->write_locked_cbk_cache, + info->spin_locked_super_eflush, + info->spin_locked_zlock, + info->spin_locked, + info->long_term_locked_znode, + info->inode_sem_r, info->inode_sem_w, + info->d_refs, info->x_refs, info->t_refs); +} + +/* check that no spinlocks are held */ +int reiser4_schedulable(void) +{ + if (get_current_context_check() != NULL) { + if (!LOCK_CNT_NIL(spin_locked)) { + print_lock_counters("in atomic", reiser4_lock_counters()); + return 0; + } + } + might_sleep(); + return 1; +} +/* + * return true, iff no locks are held. + */ +int reiser4_no_counters_are_held(void) +{ + reiser4_lock_cnt_info *counters; + + counters = reiser4_lock_counters(); + return + (counters->spin_locked_zlock == 0) && + (counters->spin_locked_jnode == 0) && + (counters->rw_locked_tree == 0) && + (counters->read_locked_tree == 0) && + (counters->write_locked_tree == 0) && + (counters->rw_locked_dk == 0) && + (counters->read_locked_dk == 0) && + (counters->write_locked_dk == 0) && + (counters->spin_locked_txnh == 0) && + (counters->spin_locked_atom == 0) && + (counters->spin_locked_stack == 0) && + (counters->spin_locked_txnmgr == 0) && + (counters->spin_locked_inode == 0) && + (counters->spin_locked == 0) && + (counters->long_term_locked_znode == 0) && + (counters->inode_sem_r == 0) && + (counters->inode_sem_w == 0) && (counters->d_refs == 0); +} + +/* + * return true, iff transaction commit can be done under locks held by the + * current thread. + */ +int reiser4_commit_check_locks(void) +{ + reiser4_lock_cnt_info *counters; + int inode_sem_r; + int inode_sem_w; + int result; + + /* + * inode's read/write semaphore is the only reiser4 lock that can be + * held during commit. + */ + + counters = reiser4_lock_counters(); + inode_sem_r = counters->inode_sem_r; + inode_sem_w = counters->inode_sem_w; + + counters->inode_sem_r = counters->inode_sem_w = 0; + result = reiser4_no_counters_are_held(); + counters->inode_sem_r = inode_sem_r; + counters->inode_sem_w = inode_sem_w; + return result; +} + +/* + * fill "error site" in the current reiser4 context. See comment before RETERR + * macro for more details. + */ +void reiser4_return_err(int code, const char *file, int line) +{ + if (code < 0 && is_in_reiser4_context()) { + reiser4_context *ctx = get_current_context(); + + if (ctx != NULL) { + ctx->err.code = code; + ctx->err.file = file; + ctx->err.line = line; + } + } +} + +#if 0 +/* + * report error information recorder by reiser4_return_err(). + */ +static void reiser4_report_err(void) +{ + reiser4_context *ctx = get_current_context_check(); + + if (ctx != NULL) { + if (ctx->err.code != 0) { + printk("code: %i at %s:%i\n", + ctx->err.code, ctx->err.file, ctx->err.line); + } + } +} +#endif /* 0 */ + +#endif /* REISER4_DEBUG */ + +#if KERNEL_DEBUGGER + +/* + * this functions just drops into kernel debugger. It is a convenient place to + * put breakpoint in. + */ +void reiser4_debugtrap(void) +{ + /* do nothing. Put break point here. */ +#if defined(CONFIG_KGDB) && !defined(CONFIG_REISER4_FS_MODULE) + extern void kgdb_breakpoint(void); + kgdb_breakpoint(); +#endif +} +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/debug.h b/fs/reiser4/debug.h new file mode 100644 index 000000000000..a2a6c6745ce2 --- /dev/null +++ b/fs/reiser4/debug.h @@ -0,0 +1,353 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Declarations of debug macros. */ + +#if !defined(__FS_REISER4_DEBUG_H__) +#define __FS_REISER4_DEBUG_H__ + +#include "forward.h" +#include "reiser4.h" + +/** + * generic function to produce formatted output, decorating it with + * whatever standard prefixes/postfixes we want. "Fun" is a function + * that will be actually called, can be printk, panic etc. + * This is for use by other debugging macros, not by users. + */ +#define DCALL(lev, fun, reperr, label, format, ...) \ +({ \ + fun(lev "reiser4[%.16s(%i)]: %s (%s:%i)[%s]:\n" format "\n" , \ + current->comm, current->pid, __FUNCTION__, \ + __FILE__, __LINE__, label, ## __VA_ARGS__); \ +}) + +/* + * cause kernel to crash + */ +#define reiser4_panic(mid, format, ...) \ + DCALL("", reiser4_do_panic, 1, mid, format , ## __VA_ARGS__) + +/* print message with indication of current process, file, line and + function */ +#define reiser4_log(label, format, ...) \ + DCALL(KERN_DEBUG, printk, 0, label, format , ## __VA_ARGS__) + +/* Assertion checked during compilation. + If "cond" is false (0) we get duplicate case label in switch. + Use this to check something like famous + cassert (sizeof(struct reiserfs_journal_commit) == 4096) ; + in 3.x journal.c. If cassertion fails you get compiler error, + so no "maintainer-id". +*/ +#define cassert(cond) ({ switch (-1) { case (cond): case 0: break; } }) + +#define noop do {; } while (0) + +#if REISER4_DEBUG +/* version of info that only actually prints anything when _d_ebugging + is on */ +#define dinfo(format, ...) printk(format , ## __VA_ARGS__) +/* macro to catch logical errors. Put it into `default' clause of + switch() statement. */ +#define impossible(label, format, ...) \ + reiser4_panic(label, "impossible: " format , ## __VA_ARGS__) +/* assert assures that @cond is true. If it is not, reiser4_panic() is + called. Use this for checking logical consistency and _never_ call + this to check correctness of external data: disk blocks and user-input . */ +#define assert(label, cond) \ +({ \ + /* call_on_each_assert(); */ \ + if (cond) { \ + /* put negated check to avoid using !(cond) that would lose \ + * warnings for things like assert(a = b); */ \ + ; \ + } else { \ + DEBUGON(1); \ + reiser4_panic(label, "assertion failed: %s", #cond); \ + } \ +}) + +/* like assertion, but @expr is evaluated even if REISER4_DEBUG is off. */ +#define check_me(label, expr) assert(label, (expr)) + +#define ON_DEBUG(exp) exp + +extern int reiser4_schedulable(void); +extern void call_on_each_assert(void); + +#else + +#define dinfo(format, args...) noop +#define impossible(label, format, args...) noop +#define assert(label, cond) noop +#define check_me(label, expr) ((void) (expr)) +#define ON_DEBUG(exp) +#define reiser4_schedulable() might_sleep() + +/* REISER4_DEBUG */ +#endif + +#if REISER4_DEBUG +/* per-thread information about lock acquired by this thread. Used by lock + * ordering checking in spin_macros.h */ +typedef struct reiser4_lock_cnt_info { + int rw_locked_tree; + int read_locked_tree; + int write_locked_tree; + + int rw_locked_dk; + int read_locked_dk; + int write_locked_dk; + + int rw_locked_cbk_cache; + int read_locked_cbk_cache; + int write_locked_cbk_cache; + + int spin_locked_zlock; + int spin_locked_jnode; + int spin_locked_jload; + int spin_locked_txnh; + int spin_locked_atom; + int spin_locked_stack; + int spin_locked_txnmgr; + int spin_locked_ktxnmgrd; + int spin_locked_fq; + int spin_locked_inode; + int spin_locked_super_eflush; + int spin_locked; + int long_term_locked_znode; + + int inode_sem_r; + int inode_sem_w; + + int d_refs; + int x_refs; + int t_refs; +} reiser4_lock_cnt_info; + +extern struct reiser4_lock_cnt_info *reiser4_lock_counters(void); +#define IN_CONTEXT(a, b) (is_in_reiser4_context() ? (a) : (b)) + +/* increment lock-counter @counter, if present */ +#define LOCK_CNT_INC(counter) \ + IN_CONTEXT(++(reiser4_lock_counters()->counter), 0) + +/* decrement lock-counter @counter, if present */ +#define LOCK_CNT_DEC(counter) \ + IN_CONTEXT(--(reiser4_lock_counters()->counter), 0) + +/* check that lock-counter is zero. This is for use in assertions */ +#define LOCK_CNT_NIL(counter) \ + IN_CONTEXT(reiser4_lock_counters()->counter == 0, 1) + +/* check that lock-counter is greater than zero. This is for use in + * assertions */ +#define LOCK_CNT_GTZ(counter) \ + IN_CONTEXT(reiser4_lock_counters()->counter > 0, 1) +#define LOCK_CNT_LT(counter,n) \ + IN_CONTEXT(reiser4_lock_counters()->counter < n, 1) + +#else /* REISER4_DEBUG */ + +/* no-op versions on the above */ + +typedef struct reiser4_lock_cnt_info { +} reiser4_lock_cnt_info; + +#define reiser4_lock_counters() ((reiser4_lock_cnt_info *)NULL) +#define LOCK_CNT_INC(counter) noop +#define LOCK_CNT_DEC(counter) noop +#define LOCK_CNT_NIL(counter) (1) +#define LOCK_CNT_GTZ(counter) (1) +#define LOCK_CNT_LT(counter, n) (1) + +#endif /* REISER4_DEBUG */ + +#define assert_spin_not_locked(lock) BUG_ON(0) +#define assert_rw_write_locked(lock) BUG_ON(0) +#define assert_rw_read_locked(lock) BUG_ON(0) +#define assert_rw_locked(lock) BUG_ON(0) +#define assert_rw_not_write_locked(lock) BUG_ON(0) +#define assert_rw_not_read_locked(lock) BUG_ON(0) +#define assert_rw_not_locked(lock) BUG_ON(0) + +/* flags controlling debugging behavior. Are set through debug_flags=N mount + option. */ +typedef enum { + /* print a lot of information during panic. When this is on all jnodes + * are listed. This can be *very* large output. Usually you don't want + * this. Especially over serial line. */ + REISER4_VERBOSE_PANIC = 0x00000001, + /* print a lot of information during umount */ + REISER4_VERBOSE_UMOUNT = 0x00000002, + /* print gathered statistics on umount */ + REISER4_STATS_ON_UMOUNT = 0x00000004, + /* check node consistency */ + REISER4_CHECK_NODE = 0x00000008 +} reiser4_debug_flags; + +extern int is_in_reiser4_context(void); + +/* + * evaluate expression @e only if with reiser4 context + */ +#define ON_CONTEXT(e) do { \ + if (is_in_reiser4_context()) { \ + e; \ + } } while (0) + +/* + * evaluate expression @e only when within reiser4_context and debugging is + * on. + */ +#define ON_DEBUG_CONTEXT(e) ON_DEBUG(ON_CONTEXT(e)) + +/* + * complain about unexpected function result and crash. Used in "default" + * branches of switch statements and alike to assert that invalid results are + * not silently ignored. + */ +#define wrong_return_value(label, function) \ + impossible(label, "wrong return value from " function) + +/* Issue different types of reiser4 messages to the console */ +#define warning(label, format, ...) \ + DCALL(KERN_WARNING, \ + printk, 1, label, "WARNING: " format , ## __VA_ARGS__) +#define notice(label, format, ...) \ + DCALL(KERN_NOTICE, \ + printk, 1, label, "NOTICE: " format , ## __VA_ARGS__) + +/* mark not yet implemented functionality */ +#define not_yet(label, format, ...) \ + reiser4_panic(label, "NOT YET IMPLEMENTED: " format , ## __VA_ARGS__) + +extern void reiser4_do_panic(const char *format, ...) + __attribute__ ((noreturn, format(printf, 1, 2))); + +extern int reiser4_preempt_point(void); +extern void reiser4_print_stats(void); + +#if REISER4_DEBUG +extern int reiser4_no_counters_are_held(void); +extern int reiser4_commit_check_locks(void); +#else +#define reiser4_no_counters_are_held() (1) +#define reiser4_commit_check_locks() (1) +#endif + +/* true if @i is power-of-two. Useful for rate-limited warnings, etc. */ +#define IS_POW(i) \ +({ \ + typeof(i) __i; \ + \ + __i = (i); \ + !(__i & (__i - 1)); \ +}) + +#define KERNEL_DEBUGGER (1) + +#if KERNEL_DEBUGGER + +extern void reiser4_debugtrap(void); + +/* + * Check condition @cond and drop into kernel debugger (kgdb) if it's true. If + * kgdb is not compiled in, do nothing. + */ +#define DEBUGON(cond) \ +({ \ + if (unlikely(cond)) \ + reiser4_debugtrap(); \ +}) +#else +#define DEBUGON(cond) noop +#endif + +/* + * Error code tracing facility. (Idea is borrowed from XFS code.) + * + * Suppose some strange and/or unexpected code is returned from some function + * (for example, write(2) returns -EEXIST). It is possible to place a + * breakpoint in the reiser4_write(), but it is too late here. How to find out + * in what particular place -EEXIST was generated first? + * + * In reiser4 all places where actual error codes are produced (that is, + * statements of the form + * + * return -EFOO; // (1), or + * + * result = -EFOO; // (2) + * + * are replaced with + * + * return RETERR(-EFOO); // (1a), and + * + * result = RETERR(-EFOO); // (2a) respectively + * + * RETERR() macro fills a backtrace in reiser4_context. This back-trace is + * printed in error and warning messages. Moreover, it's possible to put a + * conditional breakpoint in reiser4_return_err (low-level function called + * by RETERR() to do the actual work) to break into debugger immediately + * when particular error happens. + * + */ + +#if REISER4_DEBUG + +/* + * data-type to store information about where error happened ("error site"). + */ +typedef struct err_site { + int code; /* error code */ + const char *file; /* source file, filled by __FILE__ */ + int line; /* source file line, filled by __LINE__ */ +} err_site; + +extern void reiser4_return_err(int code, const char *file, int line); + +/* + * fill &get_current_context()->err_site with error information. + */ +#define RETERR(code) \ +({ \ + typeof(code) __code; \ + \ + __code = (code); \ + reiser4_return_err(__code, __FILE__, __LINE__); \ + __code; \ +}) + +#else + +/* + * no-op versions of the above + */ + +typedef struct err_site { +} err_site; +#define RETERR(code) code +#endif + +#if REISER4_LARGE_KEY +/* + * conditionally compile arguments only if REISER4_LARGE_KEY is on. + */ +#define ON_LARGE_KEY(...) __VA_ARGS__ +#else +#define ON_LARGE_KEY(...) +#endif + +/* __FS_REISER4_DEBUG_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/dformat.h b/fs/reiser4/dformat.h new file mode 100644 index 000000000000..7316754daeaa --- /dev/null +++ b/fs/reiser4/dformat.h @@ -0,0 +1,73 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Formats of on-disk data and conversion functions. */ + +/* put all item formats in the files describing the particular items, + our model is, everything you need to do to add an item to reiser4, + (excepting the changes to the plugin that uses the item which go + into the file defining that plugin), you put into one file. */ +/* Data on disk are stored in little-endian format. + To declare fields of on-disk structures, use d8, d16, d32 and d64. + d??tocpu() and cputod??() to convert. */ + +#if !defined(__FS_REISER4_DFORMAT_H__) +#define __FS_REISER4_DFORMAT_H__ + +#include "debug.h" + +#include +#include +#include + +typedef __u8 d8; +typedef __le16 d16; +typedef __le32 d32; +typedef __le64 d64; + +#define PACKED __attribute__((packed)) + +/* data-type for block number */ +typedef __u64 reiser4_block_nr; + +/* data-type for block number on disk, disk format */ +typedef __le64 reiser4_dblock_nr; + +/** + * disk_addr_eq - compare disk addresses + * @b1: pointer to block number ot compare + * @b2: pointer to block number ot compare + * + * Returns true if if disk addresses are the same + */ +static inline int disk_addr_eq(const reiser4_block_nr * b1, + const reiser4_block_nr * b2) +{ + assert("nikita-1033", b1 != NULL); + assert("nikita-1266", b2 != NULL); + + return !memcmp(b1, b2, sizeof *b1); +} + +/* structure of master reiser4 super block */ +typedef struct reiser4_master_sb { + char magic[16]; /* "ReIsEr4" */ + __le16 disk_plugin_id; /* id of disk layout plugin */ + __le16 blocksize; + char uuid[16]; /* unique id */ + char label[16]; /* filesystem label */ + __le64 diskmap; /* location of the diskmap. 0 if not present */ +} reiser4_master_sb; + +/* __FS_REISER4_DFORMAT_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/discard.c b/fs/reiser4/discard.c new file mode 100644 index 000000000000..e1b1ea8d1f5b --- /dev/null +++ b/fs/reiser4/discard.c @@ -0,0 +1,179 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* TRIM/discard interoperation subsystem for reiser4. */ + +/* + * This subsystem is responsible for populating an atom's ->discard_set and + * (later) converting it into a series of discard calls to the kernel. + * + * The discard is an in-kernel interface for notifying the storage + * hardware about blocks that are being logically freed by the filesystem. + * This is done via calling the blkdev_issue_discard() function. There are + * restrictions on block ranges: they should constitute at least one erase unit + * in length and be correspondingly aligned. Otherwise a discard request will + * be ignored. + * + * The erase unit size is kept in struct queue_limits as discard_granularity. + * The offset from the partition start to the first erase unit is kept in + * struct queue_limits as discard_alignment. + * + * At atom level, we record numbers of all blocks that happen to be deallocated + * during the transaction. Then we read the generated set, filter out any blocks + * that have since been allocated again and issue discards for everything still + * valid. This is what discard.[ch] is here for. + * + * However, simply iterating through the recorded extents is not enough: + * - if a single extent is smaller than the erase unit, then this particular + * extent won't be discarded even if it is surrounded by enough free blocks + * to constitute a whole erase unit; + * - we won't be able to merge small adjacent extents forming an extent long + * enough to be discarded. + * + * MECHANISM: + * + * During the transaction deallocated extents are recorded in atom's delete + * set. In reiser4, there are two methods to deallocate a block: + * 1. deferred deallocation, enabled by BA_DEFER flag to reiser4_dealloc_block(). + * In this mode, blocks are stored to delete set instead of being marked free + * immediately. After committing the transaction, the delete set is "applied" + * by the block allocator and all these blocks are marked free in memory + * (see reiser4_post_write_back_hook()). + * Space management plugins also read the delete set to update on-disk + * allocation records (see reiser4_pre_commit_hook()). + * 2. immediate deallocation (the opposite). + * In this mode, blocks are marked free immediately. This is used by the + * journal subsystem to manage space used by the journal records, so these + * allocations are not visible to the space management plugins and never hit + * the disk. + * + * When discard is enabled, all immediate deallocations become deferred. This + * is OK because journal's allocations happen after reiser4_pre_commit_hook() + * where the on-disk space allocation records are updated. So, in this mode + * the atom's delete set becomes "the discard set" -- list of blocks that have + * to be considered for discarding. + * + * Discarding is performed before completing deferred deallocations, hence all + * extents in the discard set are still marked as allocated and cannot contain + * any data. Thus we can avoid any checks for blocks directly present in the + * discard set. + * + * For now, we don't perform "padding" of extents to erase unit boundaries. + * This means if extents are not aligned with the device's erase unit lattice, + * the partial erase units at head and tail of extents are truncated by kernel + * (in blkdev_issue_discard()). + * + * So, at commit time the following actions take place: + * - delete sets are merged to form the discard set; + * - elements of the discard set are sorted; + * - the discard set is iterated, joining any adjacent extents; + * - for each extent, a single call to blkdev_issue_discard() is done. + */ + +#include "discard.h" +#include "context.h" +#include "debug.h" +#include "txnmgr.h" +#include "super.h" + +#include +#include +#include + +static int __discard_extent(struct block_device *bdev, sector_t start, + sector_t len) +{ + assert("intelfx-21", bdev != NULL); + + return blkdev_issue_discard(bdev, start, len, reiser4_ctx_gfp_mask_get(), + 0); +} + +static int discard_extent(txn_atom *atom UNUSED_ARG, + const reiser4_block_nr* start, + const reiser4_block_nr* len, + void *data UNUSED_ARG) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct block_device *bdev = sb->s_bdev; + + sector_t extent_start_sec, extent_len_sec; + + const int sec_per_blk = sb->s_blocksize >> 9; + + /* we assume block = N * sector */ + assert("intelfx-7", sec_per_blk > 0); + + /* convert extent to sectors */ + extent_start_sec = *start * sec_per_blk; + extent_len_sec = *len * sec_per_blk; + + /* discard the extent, don't pad it to erase unit boundaries for now */ + return __discard_extent(bdev, extent_start_sec, extent_len_sec); +} + +int discard_atom(txn_atom *atom, struct list_head *processed_set) +{ + int ret; + struct list_head discard_set; + + if (!reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + spin_unlock_atom(atom); + return 0; + } + + assert("intelfx-28", atom != NULL); + assert("intelfx-59", processed_set != NULL); + + if (list_empty(&atom->discard.delete_set)) { + /* Nothing left to discard. */ + spin_unlock_atom(atom); + return 0; + } + + /* Take the delete sets from the atom in order to release atom spinlock. */ + blocknr_list_init(&discard_set); + blocknr_list_merge(&atom->discard.delete_set, &discard_set); + spin_unlock_atom(atom); + + /* Sort the discard list, joining adjacent and overlapping extents. */ + blocknr_list_sort_and_join(&discard_set); + + /* Perform actual dirty work. */ + ret = blocknr_list_iterator(NULL, &discard_set, &discard_extent, NULL, 0); + + /* Add processed extents to the temporary list. */ + blocknr_list_merge(&discard_set, processed_set); + + if (ret != 0) { + return ret; + } + + /* Let's do this again for any new extents in the atom's discard set. */ + return -E_REPEAT; +} + +void discard_atom_post(txn_atom *atom, struct list_head *processed_set) +{ + assert("intelfx-60", atom != NULL); + assert("intelfx-61", processed_set != NULL); + + if (!reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + spin_unlock_atom(atom); + return; + } + + blocknr_list_merge(processed_set, &atom->discard.delete_set); + spin_unlock_atom(atom); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/discard.h b/fs/reiser4/discard.h new file mode 100644 index 000000000000..5f0d0d8c12c3 --- /dev/null +++ b/fs/reiser4/discard.h @@ -0,0 +1,42 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* TRIM/discard interoperation subsystem for reiser4. */ + +#if !defined(__FS_REISER4_DISCARD_H__) +#define __FS_REISER4_DISCARD_H__ + +#include "forward.h" +#include "dformat.h" + +/** + * Issue discard requests for all block extents recorded in @atom's delete sets, + * if discard is enabled. The extents processed are removed from the @atom's + * delete sets and stored in @processed_set. + * + * @atom must be locked on entry and is unlocked on exit. + * @processed_set must be initialized with blocknr_list_init(). + */ +extern int discard_atom(txn_atom *atom, struct list_head *processed_set); + +/** + * Splices @processed_set back to @atom's delete set. + * Must be called after discard_atom() loop, using the same @processed_set. + * + * @atom must be locked on entry and is unlocked on exit. + * @processed_set must be the same as passed to discard_atom(). + */ +extern void discard_atom_post(txn_atom *atom, struct list_head *processed_set); + +/* __FS_REISER4_DISCARD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/dscale.c b/fs/reiser4/dscale.c new file mode 100644 index 000000000000..2f13c4ea6e7b --- /dev/null +++ b/fs/reiser4/dscale.c @@ -0,0 +1,192 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Scalable on-disk integers */ + +/* + * Various on-disk structures contain integer-like structures. Stat-data + * contain [yes, "data" is plural, check the dictionary] file size, link + * count; extent unit contains extent width etc. To accommodate for general + * case enough space is reserved to keep largest possible value. 64 bits in + * all cases above. But in overwhelming majority of cases numbers actually + * stored in these fields will be comparatively small and reserving 8 bytes is + * a waste of precious disk bandwidth. + * + * Scalable integers are one way to solve this problem. dscale_write() + * function stores __u64 value in the given area consuming from 1 to 9 bytes, + * depending on the magnitude of the value supplied. dscale_read() reads value + * previously stored by dscale_write(). + * + * dscale_write() produces format not completely unlike of UTF: two highest + * bits of the first byte are used to store "tag". One of 4 possible tag + * values is chosen depending on the number being encoded: + * + * 0 ... 0x3f => 0 [table 1] + * 0x40 ... 0x3fff => 1 + * 0x4000 ... 0x3fffffff => 2 + * 0x40000000 ... 0xffffffffffffffff => 3 + * + * (see dscale_range() function) + * + * Values in the range 0x40000000 ... 0xffffffffffffffff require 8 full bytes + * to be stored, so in this case there is no place in the first byte to store + * tag. For such values tag is stored in an extra 9th byte. + * + * As _highest_ bits are used for the test (which is natural) scaled integers + * are stored in BIG-ENDIAN format in contrast with the rest of reiser4 which + * uses LITTLE-ENDIAN. + * + */ + +#include "debug.h" +#include "dscale.h" + +/* return tag of scaled integer stored at @address */ +static int gettag(const unsigned char *address) +{ + /* tag is stored in two highest bits */ + return (*address) >> 6; +} + +/* clear tag from value. Clear tag embedded into @value. */ +static void cleartag(__u64 *value, int tag) +{ + /* + * W-w-what ?! + * + * Actually, this is rather simple: @value passed here was read by + * dscale_read(), converted from BIG-ENDIAN, and padded to __u64 by + * zeroes. Tag is still stored in the highest (arithmetically) + * non-zero bits of @value, but relative position of tag within __u64 + * depends on @tag. + * + * For example if @tag is 0, it's stored 2 highest bits of lowest + * byte, and its offset (counting from lowest bit) is 8 - 2 == 6 bits. + * + * If tag is 1, it's stored in two highest bits of 2nd lowest byte, + * and it's offset if (2 * 8) - 2 == 14 bits. + * + * See table 1 above for details. + * + * All these cases are captured by the formula: + */ + *value &= ~(3 << (((1 << tag) << 3) - 2)); + /* + * That is, clear two (3 == 0t11) bits at the offset + * + * 8 * (2 ^ tag) - 2, + * + * that is, two highest bits of (2 ^ tag)-th byte of @value. + */ +} + +/* return tag for @value. See table 1 above for details. */ +static int dscale_range(__u64 value) +{ + if (value > 0x3fffffff) + return 3; + if (value > 0x3fff) + return 2; + if (value > 0x3f) + return 1; + return 0; +} + +/* restore value stored at @adderss by dscale_write() and return number of + * bytes consumed */ +int dscale_read(unsigned char *address, __u64 *value) +{ + int tag; + + /* read tag */ + tag = gettag(address); + switch (tag) { + case 3: + /* In this case tag is stored in an extra byte, skip this byte + * and decode value stored in the next 8 bytes.*/ + *value = __be64_to_cpu(get_unaligned((__be64 *)(address + 1))); + /* worst case: 8 bytes for value itself plus one byte for + * tag. */ + return 9; + case 0: + *value = get_unaligned(address); + break; + case 1: + *value = __be16_to_cpu(get_unaligned((__be16 *)address)); + break; + case 2: + *value = __be32_to_cpu(get_unaligned((__be32 *)address)); + break; + default: + return RETERR(-EIO); + } + /* clear tag embedded into @value */ + cleartag(value, tag); + /* number of bytes consumed is (2 ^ tag)---see table 1. */ + return 1 << tag; +} + +/* number of bytes consumed */ +int dscale_bytes_to_read(unsigned char *address) +{ + int tag; + + tag = gettag(address); + switch (tag) { + case 0: + case 1: + case 2: + return 1 << tag; + case 3: + return 9; + default: + return RETERR(-EIO); + } +} + +/* store @value at @address and return number of bytes consumed */ +int dscale_write(unsigned char *address, __u64 value) +{ + int tag; + int shift; + __be64 v; + unsigned char *valarr; + + tag = dscale_range(value); + v = __cpu_to_be64(value); + valarr = (unsigned char *)&v; + shift = (tag == 3) ? 1 : 0; + memcpy(address + shift, valarr + sizeof v - (1 << tag), 1 << tag); + *address |= (tag << 6); + return shift + (1 << tag); +} + +/* number of bytes required to store @value */ +int dscale_bytes_to_write(__u64 value) +{ + int bytes; + + bytes = 1 << dscale_range(value); + if (bytes == 8) + ++bytes; + return bytes; +} + +/* returns true if @value and @other require the same number of bytes to be + * stored. Used by detect when data structure (like stat-data) has to be + * expanded or contracted. */ +int dscale_fit(__u64 value, __u64 other) +{ + return dscale_range(value) == dscale_range(other); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/dscale.h b/fs/reiser4/dscale.h new file mode 100644 index 000000000000..9fbf7158c149 --- /dev/null +++ b/fs/reiser4/dscale.h @@ -0,0 +1,28 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Scalable on-disk integers. See dscale.h for details. */ + +#if !defined(__FS_REISER4_DSCALE_H__) +#define __FS_REISER4_DSCALE_H__ + +#include "dformat.h" + +extern int dscale_read(unsigned char *address, __u64 *value); +extern int dscale_write(unsigned char *address, __u64 value); +extern int dscale_bytes_to_read(unsigned char *address); +extern int dscale_bytes_to_write(__u64 value); +extern int dscale_fit(__u64 value, __u64 other); + +/* __FS_REISER4_DSCALE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/entd.c b/fs/reiser4/entd.c new file mode 100644 index 000000000000..e6b56ae57dab --- /dev/null +++ b/fs/reiser4/entd.c @@ -0,0 +1,361 @@ +/* Copyright 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Ent daemon. */ + +#include "debug.h" +#include "txnmgr.h" +#include "tree.h" +#include "entd.h" +#include "super.h" +#include "context.h" +#include "reiser4.h" +#include "vfs_ops.h" +#include "page_cache.h" +#include "inode.h" + +#include /* struct task_struct */ +#include +#include +#include +#include /* INITIAL_JIFFIES */ +#include /* bdi_write_congested */ +#include +#include +#include + +#define DEF_PRIORITY 12 +#define MAX_ENTD_ITERS 10 + +static void entd_flush(struct super_block *, struct wbq *); +static int entd(void *arg); + +/* + * set ->comm field of end thread to make its state visible to the user level + */ +#define entd_set_comm(state) \ + snprintf(current->comm, sizeof(current->comm), \ + "ent:%s%s", super->s_id, (state)) + +/** + * reiser4_init_entd - initialize entd context and start kernel daemon + * @super: super block to start ent thread for + * + * Creates entd contexts, starts kernel thread and waits until it + * initializes. + */ +int reiser4_init_entd(struct super_block *super) +{ + entd_context *ctx; + + assert("nikita-3104", super != NULL); + + ctx = get_entd_context(super); + + memset(ctx, 0, sizeof *ctx); + spin_lock_init(&ctx->guard); + init_waitqueue_head(&ctx->wait); +#if REISER4_DEBUG + INIT_LIST_HEAD(&ctx->flushers_list); +#endif + /* lists of writepage requests */ + INIT_LIST_HEAD(&ctx->todo_list); + INIT_LIST_HEAD(&ctx->done_list); + /* start entd */ + ctx->tsk = kthread_run(entd, super, "ent:%s", super->s_id); + if (IS_ERR(ctx->tsk)) + return PTR_ERR(ctx->tsk); + return 0; +} + +static void put_wbq(struct wbq *rq) +{ + iput(rq->mapping->host); + complete(&rq->completion); +} + +/* ent should be locked */ +static struct wbq *__get_wbq(entd_context * ent) +{ + struct wbq *wbq; + + if (list_empty(&ent->todo_list)) + return NULL; + + ent->nr_todo_reqs--; + wbq = list_entry(ent->todo_list.next, struct wbq, link); + list_del_init(&wbq->link); + return wbq; +} + +/* ent thread function */ +static int entd(void *arg) +{ + struct super_block *super; + entd_context *ent; + int done = 0; + + super = arg; + /* do_fork() just copies task_struct into the new + thread. ->fs_context shouldn't be copied of course. This shouldn't + be a problem for the rest of the code though. + */ + current->journal_info = NULL; + + ent = get_entd_context(super); + + while (!done) { + try_to_freeze(); + + spin_lock(&ent->guard); + while (ent->nr_todo_reqs != 0) { + struct wbq *rq; + + assert("", list_empty(&ent->done_list)); + + /* take request from the queue head */ + rq = __get_wbq(ent); + assert("", rq != NULL); + ent->cur_request = rq; + spin_unlock(&ent->guard); + + entd_set_comm("!"); + entd_flush(super, rq); + + put_wbq(rq); + + /* + * wakeup all requestors and iput their inodes + */ + spin_lock(&ent->guard); + while (!list_empty(&ent->done_list)) { + rq = list_entry(ent->done_list.next, struct wbq, link); + list_del_init(&rq->link); + ent->nr_done_reqs--; + spin_unlock(&ent->guard); + assert("", rq->written == 1); + put_wbq(rq); + spin_lock(&ent->guard); + } + } + spin_unlock(&ent->guard); + + entd_set_comm("."); + + { + DEFINE_WAIT(__wait); + + do { + prepare_to_wait(&ent->wait, &__wait, TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + done = 1; + break; + } + if (ent->nr_todo_reqs != 0) + break; + schedule(); + } while (0); + finish_wait(&ent->wait, &__wait); + } + } + BUG_ON(ent->nr_todo_reqs != 0); + return 0; +} + +/** + * reiser4_done_entd - stop entd kernel thread + * @super: super block to stop ent thread for + * + * It is called on umount. Sends stop signal to entd and wait until it handles + * it. + */ +void reiser4_done_entd(struct super_block *super) +{ + entd_context *ent; + + assert("nikita-3103", super != NULL); + + ent = get_entd_context(super); + assert("zam-1055", ent->tsk != NULL); + kthread_stop(ent->tsk); +} + +/* called at the beginning of jnode_flush to register flusher thread with ent + * daemon */ +void reiser4_enter_flush(struct super_block *super) +{ + entd_context *ent; + + assert("zam-1029", super != NULL); + ent = get_entd_context(super); + + assert("zam-1030", ent != NULL); + + spin_lock(&ent->guard); + ent->flushers++; +#if REISER4_DEBUG + list_add(&get_current_context()->flushers_link, &ent->flushers_list); +#endif + spin_unlock(&ent->guard); +} + +/* called at the end of jnode_flush */ +void reiser4_leave_flush(struct super_block *super) +{ + entd_context *ent; + int wake_up_ent; + + assert("zam-1027", super != NULL); + ent = get_entd_context(super); + + assert("zam-1028", ent != NULL); + + spin_lock(&ent->guard); + ent->flushers--; + wake_up_ent = (ent->flushers == 0 && ent->nr_todo_reqs != 0); +#if REISER4_DEBUG + list_del_init(&get_current_context()->flushers_link); +#endif + spin_unlock(&ent->guard); + if (wake_up_ent) + wake_up_process(ent->tsk); +} + +#define ENTD_CAPTURE_APAGE_BURST SWAP_CLUSTER_MAX + +static void entd_flush(struct super_block *super, struct wbq *rq) +{ + reiser4_context ctx; + + init_stack_context(&ctx, super); + ctx.entd = 1; + ctx.gfp_mask = GFP_NOFS; + + rq->wbc->range_start = page_offset(rq->page); + rq->wbc->range_end = rq->wbc->range_start + + (ENTD_CAPTURE_APAGE_BURST << PAGE_SHIFT); + + + rq->mapping->a_ops->writepages(rq->mapping, rq->wbc); + + if (rq->wbc->nr_to_write > 0) { + long result; + struct bdi_writeback *wb; + struct wb_writeback_work work = { + .sb = super, + .sync_mode = WB_SYNC_NONE, + .nr_pages = LONG_MAX, + .range_cyclic = 0, + .reason = WB_REASON_VMSCAN, + }; + rq->wbc->sync_mode = work.sync_mode, + rq->wbc->range_cyclic = work.range_cyclic, + rq->wbc->range_start = 0; + rq->wbc->range_end = LLONG_MAX; + /* + * we don't need to pin superblock for writeback: + * this is implicitly pinned by write_page_by_ent + * (via igrab), so that shutdown_super() will wait + * (on reiser4_put_super) for entd completion. + */ + wb = &inode_to_bdi(rq->mapping->host)->wb; + + spin_lock(&wb->list_lock); + result = generic_writeback_sb_inodes(super, + wb, + rq->wbc, + &work, + true); + spin_unlock(&wb->list_lock); + } + rq->wbc->nr_to_write = ENTD_CAPTURE_APAGE_BURST; + + reiser4_writeout(super, rq->wbc); + context_set_commit_async(&ctx); + reiser4_exit_context(&ctx); +} + +/** + * write_page_by_ent - ask entd thread to flush this page as part of slum + * @page: page to be written + * @wbc: writeback control passed to reiser4_writepage + * + * Creates a request, puts it on entd list of requests, wakeups entd if + * necessary, waits until entd completes with the request. + */ +int write_page_by_ent(struct page *page, struct writeback_control *wbc) +{ + struct super_block *sb; + struct inode *inode; + entd_context *ent; + struct wbq rq; + + assert("", PageLocked(page)); + assert("", page->mapping != NULL); + + sb = page->mapping->host->i_sb; + ent = get_entd_context(sb); + assert("", ent && ent->done == 0); + + /* + * we are going to unlock page and ask ent thread to write the + * page. Re-dirty page before unlocking so that if ent thread fails to + * write it - it will remain dirty + */ + set_page_dirty_notag(page); + account_page_redirty(page); + + /* + * pin inode in memory, unlock page, entd_flush will iput. We can not + * iput here becasue we can not allow delete_inode to be called here + */ + inode = igrab(page->mapping->host); + unlock_page(page); + if (inode == NULL) + /* inode is getting freed */ + return 0; + + /* init wbq */ + INIT_LIST_HEAD(&rq.link); + rq.magic = WBQ_MAGIC; + rq.wbc = wbc; + rq.page = page; + rq.mapping = inode->i_mapping; + rq.node = NULL; + rq.written = 0; + init_completion(&rq.completion); + + /* add request to entd's list of writepage requests */ + spin_lock(&ent->guard); + ent->nr_todo_reqs++; + list_add_tail(&rq.link, &ent->todo_list); + if (ent->nr_todo_reqs == 1) + wake_up_process(ent->tsk); + + spin_unlock(&ent->guard); + + /* wait until entd finishes */ + wait_for_completion(&rq.completion); + + if (rq.written) + /* Eventually ENTD has written the page to disk. */ + return 0; + return 0; +} + +int wbq_available(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + entd_context *ent = get_entd_context(sb); + return ent->nr_todo_reqs; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/entd.h b/fs/reiser4/entd.h new file mode 100644 index 000000000000..4f79a578fba3 --- /dev/null +++ b/fs/reiser4/entd.h @@ -0,0 +1,90 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Ent daemon. */ + +#ifndef __ENTD_H__ +#define __ENTD_H__ + +#include "context.h" + +#include +#include +#include +#include +#include /* for struct task_struct */ + +#define WBQ_MAGIC 0x7876dc76 + +/* write-back request. */ +struct wbq { + int magic; + struct list_head link; /* list head of this list is in entd context */ + struct writeback_control *wbc; + struct page *page; + struct address_space *mapping; + struct completion completion; + jnode *node; /* set if ent thread captured requested page */ + int written; /* set if ent thread wrote requested page */ +}; + +/* ent-thread context. This is used to synchronize starting/stopping ent + * threads. */ +typedef struct entd_context { + /* wait queue that ent thread waits on for more work. It's + * signaled by write_page_by_ent(). */ + wait_queue_head_t wait; + /* spinlock protecting other fields */ + spinlock_t guard; + /* ent thread */ + struct task_struct *tsk; + /* set to indicate that ent thread should leave. */ + int done; + /* counter of active flushers */ + int flushers; + /* + * when reiser4_writepage asks entd to write a page - it adds struct + * wbq to this list + */ + struct list_head todo_list; + /* number of elements on the above list */ + int nr_todo_reqs; + + struct wbq *cur_request; + /* + * when entd writes a page it moves write-back request from todo_list + * to done_list. This list is used at the end of entd iteration to + * wakeup requestors and iput inodes. + */ + struct list_head done_list; + /* number of elements on the above list */ + int nr_done_reqs; + +#if REISER4_DEBUG + /* list of all active flushers */ + struct list_head flushers_list; +#endif +} entd_context; + +extern int reiser4_init_entd(struct super_block *); +extern void reiser4_done_entd(struct super_block *); + +extern void reiser4_enter_flush(struct super_block *); +extern void reiser4_leave_flush(struct super_block *); + +extern int write_page_by_ent(struct page *, struct writeback_control *); +extern int wbq_available(void); +extern void ent_writes_page(struct super_block *, struct page *); + +extern jnode *get_jnode_by_wbq(struct super_block *, struct wbq *); +/* __ENTD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/eottl.c b/fs/reiser4/eottl.c new file mode 100644 index 000000000000..169b8684a33a --- /dev/null +++ b/fs/reiser4/eottl.c @@ -0,0 +1,510 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "tree_mod.h" +#include "carry.h" +#include "tree.h" +#include "super.h" + +#include /* for __u?? */ + +/* + * Extents on the twig level (EOTTL) handling. + * + * EOTTL poses some problems to the tree traversal, that are better explained + * by example. + * + * Suppose we have block B1 on the twig level with the following items: + * + * 0. internal item I0 with key (0:0:0:0) (locality, key-type, object-id, + * offset) + * 1. extent item E1 with key (1:4:100:0), having 10 blocks of 4k each + * 2. internal item I2 with key (10:0:0:0) + * + * We are trying to insert item with key (5:0:0:0). Lookup finds node B1, and + * then intra-node lookup is done. This lookup finished on the E1, because the + * key we are looking for is larger than the key of E1 and is smaller than key + * the of I2. + * + * Here search is stuck. + * + * After some thought it is clear what is wrong here: extents on the twig level + * break some basic property of the *search* tree (on the pretext, that they + * restore property of balanced tree). + * + * Said property is the following: if in the internal node of the search tree + * we have [ ... Key1 Pointer Key2 ... ] then, all data that are or will be + * keyed in the tree with the Key such that Key1 <= Key < Key2 are accessible + * through the Pointer. + * + * This is not true, when Pointer is Extent-Pointer, simply because extent + * cannot expand indefinitely to the right to include any item with + * + * Key1 <= Key <= Key2. + * + * For example, our E1 extent is only responsible for the data with keys + * + * (1:4:100:0) <= key <= (1:4:100:0xffffffffffffffff), and + * + * so, key range + * + * ( (1:4:100:0xffffffffffffffff), (10:0:0:0) ) + * + * is orphaned: there is no way to get there from the tree root. + * + * In other words, extent pointers are different than normal child pointers as + * far as search tree is concerned, and this creates such problems. + * + * Possible solution for this problem is to insert our item into node pointed + * to by I2. There are some problems through: + * + * (1) I2 can be in a different node. + * (2) E1 can be immediately followed by another extent E2. + * + * (1) is solved by calling reiser4_get_right_neighbor() and accounting + * for locks/coords as necessary. + * + * (2) is more complex. Solution here is to insert new empty leaf node and + * insert internal item between E1 and E2 pointing to said leaf node. This is + * further complicated by possibility that E2 is in a different node, etc. + * + * Problems: + * + * (1) if there was internal item I2 immediately on the right of an extent E1 + * we and we decided to insert new item S1 into node N2 pointed to by I2, then + * key of S1 will be less than smallest key in the N2. Normally, search key + * checks that key we are looking for is in the range of keys covered by the + * node key is being looked in. To work around of this situation, while + * preserving useful consistency check new flag CBK_TRUST_DK was added to the + * cbk falgs bitmask. This flag is automatically set on entrance to the + * coord_by_key() and is only cleared when we are about to enter situation + * described above. + * + * (2) If extent E1 is immediately followed by another extent E2 and we are + * searching for the key that is between E1 and E2 we only have to insert new + * empty leaf node when coord_by_key was called for insertion, rather than just + * for lookup. To distinguish these cases, new flag CBK_FOR_INSERT was added to + * the cbk falgs bitmask. This flag is automatically set by coord_by_key calls + * performed by insert_by_key() and friends. + * + * (3) Insertion of new empty leaf node (possibly) requires balancing. In any + * case it requires modification of node content which is only possible under + * write lock. It may well happen that we only have read lock on the node where + * new internal pointer is to be inserted (common case: lookup of non-existent + * stat-data that fells between two extents). If only read lock is held, tree + * traversal is restarted with lock_level modified so that next time we hit + * this problem, write lock will be held. Once we have write lock, balancing + * will be performed. + */ + +/** + * is_next_item_internal - check whether next item is internal + * @coord: coordinate of extent item in twig node + * @key: search key + * @lh: twig node lock handle + * + * Looks at the unit next to @coord. If it is an internal one - 1 is returned, + * @coord is set to that unit. If that unit is in right neighbor, @lh is moved + * to that node, @coord is set to its first unit. If next item is not internal + * or does not exist then 0 is returned, @coord and @lh are left unchanged. 2 + * is returned if search restart has to be done. + */ +static int +is_next_item_internal(coord_t *coord, const reiser4_key * key, + lock_handle * lh) +{ + coord_t next; + lock_handle rn; + int result; + + coord_dup(&next, coord); + if (coord_next_unit(&next) == 0) { + /* next unit is in this node */ + if (item_is_internal(&next)) { + coord_dup(coord, &next); + return 1; + } + assert("vs-3", item_is_extent(&next)); + return 0; + } + + /* + * next unit either does not exist or is in right neighbor. If it is in + * right neighbor we have to check right delimiting key because + * concurrent thread could get their first and insert item with a key + * smaller than @key + */ + read_lock_dk(current_tree); + result = keycmp(key, znode_get_rd_key(coord->node)); + read_unlock_dk(current_tree); + assert("vs-6", result != EQUAL_TO); + if (result == GREATER_THAN) + return 2; + + /* lock right neighbor */ + init_lh(&rn); + result = reiser4_get_right_neighbor(&rn, coord->node, + znode_is_wlocked(coord->node) ? + ZNODE_WRITE_LOCK : ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result == -E_NO_NEIGHBOR) { + /* we are on the rightmost edge of the tree */ + done_lh(&rn); + return 0; + } + + if (result) { + assert("vs-4", result < 0); + done_lh(&rn); + return result; + } + + /* + * check whether concurrent thread managed to insert item with a key + * smaller than @key + */ + read_lock_dk(current_tree); + result = keycmp(key, znode_get_ld_key(rn.node)); + read_unlock_dk(current_tree); + assert("vs-6", result != EQUAL_TO); + if (result == GREATER_THAN) { + done_lh(&rn); + return 2; + } + + result = zload(rn.node); + if (result) { + assert("vs-5", result < 0); + done_lh(&rn); + return result; + } + + coord_init_first_unit(&next, rn.node); + if (item_is_internal(&next)) { + /* + * next unit is in right neighbor and it is an unit of internal + * item. Unlock coord->node. Move @lh to right neighbor. @coord + * is set to the first unit of right neighbor. + */ + coord_dup(coord, &next); + zrelse(rn.node); + done_lh(lh); + move_lh(lh, &rn); + return 1; + } + + /* + * next unit is unit of extent item. Return without chaning @lh and + * @coord. + */ + assert("vs-6", item_is_extent(&next)); + zrelse(rn.node); + done_lh(&rn); + return 0; +} + +/** + * rd_key - calculate key of an item next to the given one + * @coord: position in a node + * @key: storage for result key + * + * @coord is set between items or after the last item in a node. Calculate key + * of item to the right of @coord. + */ +static reiser4_key *rd_key(const coord_t *coord, reiser4_key *key) +{ + coord_t dup; + + assert("nikita-2281", coord_is_between_items(coord)); + coord_dup(&dup, coord); + + if (coord_set_to_right(&dup) == 0) + /* next item is in this node. Return its key. */ + unit_key_by_coord(&dup, key); + else { + /* + * next item either does not exist or is in right + * neighbor. Return znode's right delimiting key. + */ + read_lock_dk(current_tree); + *key = *znode_get_rd_key(coord->node); + read_unlock_dk(current_tree); + } + return key; +} + +/** + * add_empty_leaf - insert empty leaf between two extents + * @insert_coord: position in twig node between two extents + * @lh: twig node lock handle + * @key: left delimiting key of new node + * @rdkey: right delimiting key of new node + * + * Inserts empty leaf node between two extent items. It is necessary when we + * have to insert an item on leaf level between two extents (items on the twig + * level). + */ +static int +add_empty_leaf(coord_t *insert_coord, lock_handle *lh, + const reiser4_key *key, const reiser4_key *rdkey) +{ + int result; + carry_pool *pool; + carry_level *todo; + reiser4_item_data *item; + carry_insert_data *cdata; + carry_op *op; + znode *node; + reiser4_tree *tree; + + assert("vs-49827", znode_contains_key_lock(insert_coord->node, key)); + tree = znode_get_tree(insert_coord->node); + node = reiser4_new_node(insert_coord->node, LEAF_LEVEL); + if (IS_ERR(node)) + return PTR_ERR(node); + + /* setup delimiting keys for node being inserted */ + write_lock_dk(tree); + znode_set_ld_key(node, key); + znode_set_rd_key(node, rdkey); + ON_DEBUG(node->creator = current); + ON_DEBUG(node->first_key = *key); + write_unlock_dk(tree); + + ZF_SET(node, JNODE_ORPHAN); + + /* + * allocate carry_pool, 3 carry_level-s, reiser4_item_data and + * carry_insert_data + */ + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo) + + sizeof(*item) + sizeof(*cdata)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + todo = (carry_level *) (pool + 1); + init_carry_level(todo, pool); + + item = (reiser4_item_data *) (todo + 3); + cdata = (carry_insert_data *) (item + 1); + + op = reiser4_post_carry(todo, COP_INSERT, insert_coord->node, 0); + if (!IS_ERR(op)) { + cdata->coord = insert_coord; + cdata->key = key; + cdata->data = item; + op->u.insert.d = cdata; + op->u.insert.type = COPT_ITEM_DATA; + build_child_ptr_data(node, item); + item->arg = NULL; + /* have @insert_coord to be set at inserted item after + insertion is done */ + todo->track_type = CARRY_TRACK_CHANGE; + todo->tracked = lh; + + result = reiser4_carry(todo, NULL); + if (result == 0) { + /* + * pin node in memory. This is necessary for + * znode_make_dirty() below. + */ + result = zload(node); + if (result == 0) { + lock_handle local_lh; + + /* + * if we inserted new child into tree we have + * to mark it dirty so that flush will be able + * to process it. + */ + init_lh(&local_lh); + result = longterm_lock_znode(&local_lh, node, + ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (result == 0) { + znode_make_dirty(node); + + /* + * when internal item pointing to @node + * was inserted into twig node + * create_hook_internal did not connect + * it properly because its right + * neighbor was not known. Do it + * here + */ + write_lock_tree(tree); + assert("nikita-3312", + znode_is_right_connected(node)); + assert("nikita-2984", + node->right == NULL); + ZF_CLR(node, JNODE_RIGHT_CONNECTED); + write_unlock_tree(tree); + result = + connect_znode(insert_coord, node); + ON_DEBUG(if (result == 0) check_dkeys(node);); + + done_lh(lh); + move_lh(lh, &local_lh); + assert("vs-1676", node_is_empty(node)); + coord_init_first_unit(insert_coord, + node); + } else { + warning("nikita-3136", + "Cannot lock child"); + } + done_lh(&local_lh); + zrelse(node); + } + } + } else + result = PTR_ERR(op); + zput(node); + done_carry_pool(pool); + return result; +} + +/** + * handle_eottl - handle extent-on-the-twig-level cases in tree traversal + * @h: search handle + * @outcome: flag saying whether search has to restart or is done + * + * Handles search on twig level. If this function completes search itself then + * it returns 1. If search has to go one level down then 0 is returned. If + * error happens then LOOKUP_DONE is returned via @outcome and error code is + * saved in @h->result. + */ +int handle_eottl(cbk_handle *h, int *outcome) +{ + int result; + reiser4_key key; + coord_t *coord; + + coord = h->coord; + + if (h->level != TWIG_LEVEL || + (coord_is_existing_item(coord) && item_is_internal(coord))) { + /* Continue to traverse tree downward. */ + return 0; + } + + /* + * make sure that @h->coord is set to twig node and that it is either + * set to extent item or after extent item + */ + assert("vs-356", h->level == TWIG_LEVEL); + assert("vs-357", ({ + coord_t lcoord; + coord_dup(&lcoord, coord); + check_me("vs-733", coord_set_to_left(&lcoord) == 0); + item_is_extent(&lcoord); + } + )); + + if (*outcome == NS_FOUND) { + /* we have found desired key on twig level in extent item */ + h->result = CBK_COORD_FOUND; + *outcome = LOOKUP_DONE; + return 1; + } + + if (!(h->flags & CBK_FOR_INSERT)) { + /* tree traversal is not for insertion. Just return + CBK_COORD_NOTFOUND. */ + h->result = CBK_COORD_NOTFOUND; + *outcome = LOOKUP_DONE; + return 1; + } + + /* take a look at the item to the right of h -> coord */ + result = is_next_item_internal(coord, h->key, h->active_lh); + if (unlikely(result < 0)) { + h->error = "get_right_neighbor failed"; + h->result = result; + *outcome = LOOKUP_DONE; + return 1; + } + if (result == 0) { + /* + * item to the right is also an extent one. Allocate a new node + * and insert pointer to it after item h -> coord. + * + * This is a result of extents being located at the twig + * level. For explanation, see comment just above + * is_next_item_internal(). + */ + znode *loaded; + + if (cbk_lock_mode(h->level, h) != ZNODE_WRITE_LOCK) { + /* + * we got node read locked, restart coord_by_key to + * have write lock on twig level + */ + h->lock_level = TWIG_LEVEL; + h->lock_mode = ZNODE_WRITE_LOCK; + *outcome = LOOKUP_REST; + return 1; + } + + loaded = coord->node; + result = + add_empty_leaf(coord, h->active_lh, h->key, + rd_key(coord, &key)); + if (result) { + h->error = "could not add empty leaf"; + h->result = result; + *outcome = LOOKUP_DONE; + return 1; + } + /* added empty leaf is locked (h->active_lh), its parent node + is unlocked, h->coord is set as EMPTY */ + assert("vs-13", coord->between == EMPTY_NODE); + assert("vs-14", znode_is_write_locked(coord->node)); + assert("vs-15", + WITH_DATA(coord->node, node_is_empty(coord->node))); + assert("vs-16", jnode_is_leaf(ZJNODE(coord->node))); + assert("vs-17", coord->node == h->active_lh->node); + *outcome = LOOKUP_DONE; + h->result = CBK_COORD_NOTFOUND; + return 1; + } else if (result == 1) { + /* + * this is special case mentioned in the comment on + * tree.h:cbk_flags. We have found internal item immediately on + * the right of extent, and we are going to insert new item + * there. Key of item we are going to insert is smaller than + * leftmost key in the node pointed to by said internal item + * (otherwise search wouldn't come to the extent in the first + * place). + * + * This is a result of extents being located at the twig + * level. For explanation, see comment just above + * is_next_item_internal(). + */ + h->flags &= ~CBK_TRUST_DK; + } else { + assert("vs-8", result == 2); + *outcome = LOOKUP_REST; + return 1; + } + assert("vs-362", WITH_DATA(coord->node, item_is_internal(coord))); + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/estimate.c b/fs/reiser4/estimate.c new file mode 100644 index 000000000000..ca2652af1cfe --- /dev/null +++ b/fs/reiser4/estimate.c @@ -0,0 +1,129 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "debug.h" +#include "dformat.h" +#include "tree.h" +#include "carry.h" +#include "inode.h" +#include "plugin/cluster.h" +#include "plugin/item/ctail.h" + +/* This returns how many nodes might get dirty and added nodes if @children + nodes are dirtied + + Amount of internals which will get dirty or get allocated we estimate as 5% + of the childs + 1 balancing. 1 balancing is 2 neighbours, 2 new blocks and + the current block on the leaf level, 2 neighbour nodes + the current (or 1 + neighbour and 1 new and the current) on twig level, 2 neighbour nodes on + upper levels and 1 for a new root. So 5 for leaf level, 3 for twig level, + 2 on upper + 1 for root. + + Do not calculate the current node of the lowest level here - this is overhead + only. + + children is almost always 1 here. Exception is flow insertion +*/ +static reiser4_block_nr +max_balance_overhead(reiser4_block_nr childen, tree_level tree_height) +{ + reiser4_block_nr ten_percent; + + ten_percent = ((103 * childen) >> 10); + + /* If we have too many balancings at the time, tree height can raise on + more then 1. Assume that if tree_height is 5, it can raise on 1 only. + */ + return ((tree_height < 5 ? 5 : tree_height) * 2 + (4 + ten_percent)); +} + +/* this returns maximal possible number of nodes which can be modified plus + number of new nodes which can be required to perform insertion of one item + into the tree */ +/* it is only called when tree height changes, or gets initialized */ +reiser4_block_nr calc_estimate_one_insert(tree_level height) +{ + return 1 + max_balance_overhead(1, height); +} + +reiser4_block_nr estimate_one_insert_item(reiser4_tree * tree) +{ + return tree->estimate_one_insert; +} + +/* this returns maximal possible number of nodes which can be modified plus + number of new nodes which can be required to perform insertion of one unit + into an item in the tree */ +reiser4_block_nr estimate_one_insert_into_item(reiser4_tree * tree) +{ + /* estimate insert into item just like item insertion */ + return tree->estimate_one_insert; +} + +reiser4_block_nr estimate_one_item_removal(reiser4_tree * tree) +{ + /* on item removal reiser4 does not try to pack nodes more complact, so, + only one node may be dirtied on leaf level */ + return tree->estimate_one_insert; +} + +/* on leaf level insert_flow may add CARRY_FLOW_NEW_NODES_LIMIT new nodes and + dirty 3 existing nodes (insert point and both its neighbors). + Max_balance_overhead should estimate number of blocks which may change/get + added on internal levels */ +reiser4_block_nr estimate_insert_flow(tree_level height) +{ + return 3 + CARRY_FLOW_NEW_NODES_LIMIT + max_balance_overhead(3 + + CARRY_FLOW_NEW_NODES_LIMIT, + height); +} + +/* returnes max number of nodes can be occupied by disk cluster */ +static reiser4_block_nr estimate_cluster(struct inode *inode, int unprepped) +{ + int per_cluster; + per_cluster = (unprepped ? 1 : cluster_nrpages(inode)); + return 3 + per_cluster + + max_balance_overhead(3 + per_cluster, + REISER4_MAX_ZTREE_HEIGHT); +} + +/* how many nodes might get dirty and added + during insertion of a disk cluster */ +reiser4_block_nr estimate_insert_cluster(struct inode *inode) +{ + return estimate_cluster(inode, 1); /* 24 */ +} + +/* how many nodes might get dirty and added + during update of a (prepped or unprepped) disk cluster */ +reiser4_block_nr estimate_update_cluster(struct inode *inode) +{ + return estimate_cluster(inode, 0); /* 44, for 64K-cluster */ +} + +/* How many nodes occupied by a disk cluster might get dirty. + Note that this estimation is not precise (i.e. disk cluster + can occupy more nodes). + Q: Why we don't use precise estimation? + A: 1.Because precise estimation is fairly bad: 65536 nodes + for 64K logical cluster, it means 256M of dead space on + a partition + 2.It is a very rare case when disk cluster occupies more + nodes then this estimation returns. +*/ +reiser4_block_nr estimate_dirty_cluster(struct inode *inode) +{ + return cluster_nrpages(inode) + 4; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/export_ops.c b/fs/reiser4/export_ops.c new file mode 100644 index 000000000000..a54957eec5ff --- /dev/null +++ b/fs/reiser4/export_ops.c @@ -0,0 +1,325 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "inode.h" +#include "plugin/plugin.h" + +/* + * Supported file-handle types + */ +typedef enum { + FH_WITH_PARENT = 0x10, /* file handle with parent */ + FH_WITHOUT_PARENT = 0x11 /* file handle without parent */ +} reiser4_fhtype; + +#define NFSERROR (255) + +/* initialize place-holder for object */ +static void object_on_wire_init(reiser4_object_on_wire *o) +{ + o->plugin = NULL; +} + +/* finish with @o */ +static void object_on_wire_done(reiser4_object_on_wire *o) +{ + if (o->plugin != NULL) + o->plugin->wire.done(o); +} + +/* + * read serialized object identity from @addr and store information about + * object in @obj. This is dual to encode_inode(). + */ +static char *decode_inode(struct super_block *s, char *addr, + reiser4_object_on_wire * obj) +{ + file_plugin *fplug; + + /* identifier of object plugin is stored in the first two bytes, + * followed by... */ + fplug = file_plugin_by_disk_id(reiser4_get_tree(s), (d16 *) addr); + if (fplug != NULL) { + addr += sizeof(d16); + obj->plugin = fplug; + assert("nikita-3520", fplug->wire.read != NULL); + /* plugin specific encoding of object identity. */ + addr = fplug->wire.read(addr, obj); + } else + addr = ERR_PTR(RETERR(-EINVAL)); + return addr; +} + +static struct dentry *reiser4_get_dentry(struct super_block *super, + void *data); +/** + * reiser4_decode_fh: decode on-wire object - helper function + * for fh_to_dentry, fh_to_parent export operations; + * @super: super block; + * @addr: onwire object to be decoded; + * + * Returns dentry referring to the object being decoded. + */ +static struct dentry *reiser4_decode_fh(struct super_block * super, + char * addr) +{ + reiser4_object_on_wire object; + + object_on_wire_init(&object); + + addr = decode_inode(super, addr, &object); + if (!IS_ERR(addr)) { + struct dentry *d; + d = reiser4_get_dentry(super, &object); + if (d != NULL && !IS_ERR(d)) + /* FIXME check for -ENOMEM */ + reiser4_get_dentry_fsdata(d)->stateless = 1; + addr = (char *)d; + } + object_on_wire_done(&object); + return (void *)addr; +} + +static struct dentry *reiser4_fh_to_dentry(struct super_block *sb, + struct fid *fid, + int fh_len, int fh_type) +{ + reiser4_context *ctx; + struct dentry *d; + + assert("edward-1536", + fh_type == FH_WITH_PARENT || fh_type == FH_WITHOUT_PARENT); + + ctx = reiser4_init_context(sb); + if (IS_ERR(ctx)) + return (struct dentry *)ctx; + + d = reiser4_decode_fh(sb, (char *)fid->raw); + + reiser4_exit_context(ctx); + return d; +} + +static struct dentry *reiser4_fh_to_parent(struct super_block *sb, + struct fid *fid, + int fh_len, int fh_type) +{ + char * addr; + struct dentry * d; + reiser4_context *ctx; + file_plugin *fplug; + + if (fh_type == FH_WITHOUT_PARENT) + return NULL; + assert("edward-1537", fh_type == FH_WITH_PARENT); + + ctx = reiser4_init_context(sb); + if (IS_ERR(ctx)) + return (struct dentry *)ctx; + addr = (char *)fid->raw; + /* extract 2-bytes file plugin id */ + fplug = file_plugin_by_disk_id(reiser4_get_tree(sb), (d16 *)addr); + if (fplug == NULL) { + d = ERR_PTR(RETERR(-EINVAL)); + goto exit; + } + addr += sizeof(d16); + /* skip previously encoded object */ + addr = fplug->wire.read(addr, NULL /* skip */); + if (IS_ERR(addr)) { + d = (struct dentry *)addr; + goto exit; + } + /* @extract and decode parent object */ + d = reiser4_decode_fh(sb, addr); + exit: + reiser4_exit_context(ctx); + return d; +} + +/* + * Object serialization support. + * + * To support knfsd file system provides export_operations that are used to + * construct and interpret NFS file handles. As a generalization of this, + * reiser4 object plugins have serialization support: it provides methods to + * create on-wire representation of identity of reiser4 object, and + * re-create/locate object given its on-wire identity. + * + */ + +/* + * return number of bytes that on-wire representation of @inode's identity + * consumes. + */ +static int encode_inode_size(struct inode *inode) +{ + assert("nikita-3514", inode != NULL); + assert("nikita-3515", inode_file_plugin(inode) != NULL); + assert("nikita-3516", inode_file_plugin(inode)->wire.size != NULL); + + return inode_file_plugin(inode)->wire.size(inode) + sizeof(d16); +} + +/* + * store on-wire representation of @inode's identity at the area beginning at + * @start. + */ +static char *encode_inode(struct inode *inode, char *start) +{ + assert("nikita-3517", inode != NULL); + assert("nikita-3518", inode_file_plugin(inode) != NULL); + assert("nikita-3519", inode_file_plugin(inode)->wire.write != NULL); + + /* + * first, store two-byte identifier of object plugin, then + */ + save_plugin_id(file_plugin_to_plugin(inode_file_plugin(inode)), + (d16 *) start); + start += sizeof(d16); + /* + * call plugin to serialize object's identity + */ + return inode_file_plugin(inode)->wire.write(inode, start); +} + +/* this returns number of 32 bit long numbers encoded in @lenp. 255 is + * returned if file handle can not be stored */ +/** + * reiser4_encode_fh - encode_fh of export operations + * @dentry: + * @fh: + * @lenp: + * @need_parent: + * + */ +static int +reiser4_encode_fh(struct inode *inode, __u32 *fh, int *lenp, + struct inode *parent) +{ + char *addr; + int need; + int delta; + int result; + bool need_parent; + reiser4_context *ctx; + + /* + * knfsd asks as to serialize @inode, and, optionally its + * parent @parent (if it is non-NULL). + * + * encode_inode() and encode_inode_size() is used to build + * representation of object and its parent. All hard work is done by + * object plugins. + */ + need_parent = (parent != NULL); + addr = (char *)fh; + + need = encode_inode_size(inode); + if (need < 0) + return NFSERROR; + if (need_parent) { + delta = encode_inode_size(parent); + if (delta < 0) + return NFSERROR; + need += delta; + } + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + if (need <= sizeof(__u32) * (*lenp)) { + addr = encode_inode(inode, addr); + if (need_parent) + addr = encode_inode(parent, addr); + + /* store in lenp number of 32bit words required for file + * handle. */ + *lenp = (need + sizeof(__u32) - 1) >> 2; + result = need_parent ? FH_WITH_PARENT : FH_WITHOUT_PARENT; + } else + /* no enough space in file handle */ + result = NFSERROR; + reiser4_exit_context(ctx); + return result; +} + +/** + * reiser4_get_dentry_parent - get_parent of export operations + * @child: + * + */ +static struct dentry *reiser4_get_dentry_parent(struct dentry *child) +{ + struct inode *dir; + dir_plugin *dplug; + struct dentry *result; + reiser4_context *ctx; + + assert("nikita-3527", child != NULL); + + dir = child->d_inode; + assert("nikita-3529", dir != NULL); + + ctx = reiser4_init_context(dir->i_sb); + if (IS_ERR(ctx)) + return (void *)ctx; + + dplug = inode_dir_plugin(dir); + assert("nikita-3531", ergo(dplug != NULL, dplug->get_parent != NULL)); + + if (unlikely(dplug == NULL)) { + reiser4_exit_context(ctx); + return ERR_PTR(RETERR(-ENOTDIR)); + } + result = dplug->get_parent(dir); + reiser4_exit_context(ctx); + return result; +} + +/** + * reiser4_get_dentry - get_dentry of export operations + * @super: + * @data: + * + * + */ +static struct dentry *reiser4_get_dentry(struct super_block *super, void *data) +{ + reiser4_object_on_wire *o; + + assert("nikita-3522", super != NULL); + assert("nikita-3523", data != NULL); + /* + * this is only supposed to be called by + * + * reiser4_decode_fh->find_exported_dentry + * + * so, reiser4_context should be here already. + */ + assert("nikita-3526", is_in_reiser4_context()); + + o = (reiser4_object_on_wire *)data; + assert("nikita-3524", o->plugin != NULL); + assert("nikita-3525", o->plugin->wire.get != NULL); + + return o->plugin->wire.get(super, o); +} + +struct export_operations reiser4_export_operations = { + .encode_fh = reiser4_encode_fh, + .fh_to_dentry = reiser4_fh_to_dentry, + .fh_to_parent = reiser4_fh_to_parent, + .get_parent = reiser4_get_dentry_parent, +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/flush.c b/fs/reiser4/flush.c new file mode 100644 index 000000000000..b908dede8e8b --- /dev/null +++ b/fs/reiser4/flush.c @@ -0,0 +1,3522 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* The design document for this file is at http://www.namesys.com/v4/v4.html. */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/plugin.h" +#include "plugin/object.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "carry.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "page_cache.h" +#include "wander.h" +#include "super.h" +#include "entd.h" +#include "reiser4.h" +#include "flush.h" +#include "writeout.h" + +#include +#include /* for struct super_block */ +#include /* for struct page */ +#include /* for struct bio */ +#include +#include + +/* IMPLEMENTATION NOTES */ + +/* PARENT-FIRST: Some terminology: A parent-first traversal is a way of + assigning a total order to the nodes of the tree in which the parent is + placed before its children, which are ordered (recursively) in left-to-right + order. When we speak of a "parent-first preceder", it describes the node that + "came before in forward parent-first order". When we speak of a "parent-first + follower", it describes the node that "comes next in parent-first order" + (alternatively the node that "came before in reverse parent-first order"). + + The following pseudo-code prints the nodes of a tree in forward parent-first + order: + + void parent_first (node) + { + print_node (node); + if (node->level > leaf) { + for (i = 0; i < num_children; i += 1) { + parent_first (node->child[i]); + } + } + } +*/ + +/* JUST WHAT ARE WE TRYING TO OPTIMIZE, HERE? The idea is to optimize block + allocation so that a left-to-right scan of the tree's data (i.e., the leaves + in left-to-right order) can be accomplished with sequential reads, which + results in reading nodes in their parent-first order. This is a + read-optimization aspect of the flush algorithm, and there is also a + write-optimization aspect, which is that we wish to make large sequential + writes to the disk by allocating or reallocating blocks so that they can be + written in sequence. Sometimes the read-optimization and write-optimization + goals conflict with each other, as we discuss in more detail below. +*/ + +/* STATE BITS: The flush code revolves around the state of the jnodes it covers. + Here are the relevant jnode->state bits and their relevence to flush: + + JNODE_DIRTY: If a node is dirty, it must be flushed. But in order to be + written it must be allocated first. In order to be considered allocated, + the jnode must have exactly one of { JNODE_OVRWR, JNODE_RELOC } set. These + two bits are exclusive, and all dirtied jnodes eventually have one of these + bits set during each transaction. + + JNODE_CREATED: The node was freshly created in its transaction and has no + previous block address, so it is unconditionally assigned to be relocated, + although this is mainly for code-convenience. It is not being 'relocated' + from anything, but in almost every regard it is treated as part of the + relocate set. The JNODE_CREATED bit remains set even after JNODE_RELOC is + set, so the actual relocate can be distinguished from the + created-and-allocated set easily: relocate-set members (belonging to the + preserve-set) have (JNODE_RELOC) set and created-set members which have no + previous location to preserve have (JNODE_RELOC | JNODE_CREATED) set. + + JNODE_OVRWR: The node belongs to atom's overwrite set. The flush algorithm + made the decision to maintain the pre-existing location for this node and + it will be written to the wandered-log. + + JNODE_RELOC: The flush algorithm made the decision to relocate this block + (if it was not created, see note above). A block with JNODE_RELOC set is + eligible for early-flushing and may be submitted during flush_empty_queues. + When the JNODE_RELOC bit is set on a znode, the parent node's internal item + is modified and the znode is rehashed. + + JNODE_SQUEEZABLE: Before shifting everything left, the flush algorithm + scans the node and calls plugin->f.squeeze() method for its items. By this + technology we update disk clusters of cryptcompress objects. Also if + leftmost point that was found by flush scan has this flag (races with + write(), rare case) the flush algorythm makes the decision to pass it to + squalloc() in spite of its flushprepped status for squeezing, not for + repeated allocation. + + JNODE_FLUSH_QUEUED: This bit is set when a call to flush enters the jnode + into its flush queue. This means the jnode is not on any clean or dirty + list, instead it is moved to one of the flush queue (see flush_queue.h) + object private list. This prevents multiple concurrent flushes from + attempting to start flushing from the same node. + + (DEAD STATE BIT) JNODE_FLUSH_BUSY: This bit was set during the bottom-up + squeeze-and-allocate on a node while its children are actively being + squeezed and allocated. This flag was created to avoid submitting a write + request for a node while its children are still being allocated and + squeezed. Then flush queue was re-implemented to allow unlimited number of + nodes be queued. This flag support was commented out in source code because + we decided that there was no reason to submit queued nodes before + jnode_flush() finishes. However, current code calls fq_write() during a + slum traversal and may submit "busy nodes" to disk. Probably we can + re-enable the JNODE_FLUSH_BUSY bit support in future. + + With these state bits, we describe a test used frequently in the code below, + jnode_is_flushprepped()(and the spin-lock-taking jnode_check_flushprepped()). + The test for "flushprepped" returns true if any of the following are true: + + - The node is not dirty + - The node has JNODE_RELOC set + - The node has JNODE_OVRWR set + + If either the node is not dirty or it has already been processed by flush + (and assigned JNODE_OVRWR or JNODE_RELOC), then it is prepped. If + jnode_is_flushprepped() returns true then flush has work to do on that node. +*/ + +/* FLUSH_PREP_ONCE_PER_TRANSACTION: Within a single transaction a node is never + flushprepped twice (unless an explicit call to flush_unprep is made as + described in detail below). For example a node is dirtied, allocated, and + then early-flushed to disk and set clean. Before the transaction commits, the + page is dirtied again and, due to memory pressure, the node is flushed again. + The flush algorithm will not relocate the node to a new disk location, it + will simply write it to the same, previously relocated position again. +*/ + +/* THE BOTTOM-UP VS. TOP-DOWN ISSUE: This code implements a bottom-up algorithm + where we start at a leaf node and allocate in parent-first order by iterating + to the right. At each step of the iteration, we check for the right neighbor. + Before advancing to the right neighbor, we check if the current position and + the right neighbor share the same parent. If they do not share the same + parent, the parent is allocated before the right neighbor. + + This process goes recursively up the tree and squeeze nodes level by level as + long as the right neighbor and the current position have different parents, + then it allocates the right-neighbors-with-different-parents on the way back + down. This process is described in more detail in + flush_squalloc_changed_ancestor and the recursive function + squalloc_one_changed_ancestor. But the purpose here is not to discuss the + specifics of the bottom-up approach as it is to contrast the bottom-up and + top-down approaches. + + The top-down algorithm was implemented earlier (April-May 2002). In the + top-down approach, we find a starting point by scanning left along each level + past dirty nodes, then going up and repeating the process until the left node + and the parent node are clean. We then perform a parent-first traversal from + the starting point, which makes allocating in parent-first order trivial. + After one subtree has been allocated in this manner, we move to the right, + try moving upward, then repeat the parent-first traversal. + + Both approaches have problems that need to be addressed. Both are + approximately the same amount of code, but the bottom-up approach has + advantages in the order it acquires locks which, at the very least, make it + the better approach. At first glance each one makes the other one look + simpler, so it is important to remember a few of the problems with each one. + + Main problem with the top-down approach: When you encounter a clean child + during the parent-first traversal, what do you do? You would like to avoid + searching through a large tree of nodes just to find a few dirty leaves at + the bottom, and there is not an obvious solution. One of the advantages of + the top-down approach is that during the parent-first traversal you check + every child of a parent to see if it is dirty. In this way, the top-down + approach easily handles the main problem of the bottom-up approach: + unallocated children. + + The unallocated children problem is that before writing a node to disk we + must make sure that all of its children are allocated. Otherwise, the writing + the node means extra I/O because the node will have to be written again when + the child is finally allocated. + + WE HAVE NOT YET ELIMINATED THE UNALLOCATED CHILDREN PROBLEM. Except for bugs, + this should not cause any file system corruption, it only degrades I/O + performance because a node may be written when it is sure to be written at + least one more time in the same transaction when the remaining children are + allocated. What follows is a description of how we will solve the problem. +*/ + +/* HANDLING UNALLOCATED CHILDREN: During flush we may allocate a parent node, + then proceeding in parent first order, allocate some of its left-children, + then encounter a clean child in the middle of the parent. We do not allocate + the clean child, but there may remain unallocated (dirty) children to the + right of the clean child. If we were to stop flushing at this moment and + write everything to disk, the parent might still contain unallocated + children. + + We could try to allocate all the descendents of every node that we allocate, + but this is not necessary. Doing so could result in allocating the entire + tree: if the root node is allocated then every unallocated node would have to + be allocated before flushing. Actually, we do not have to write a node just + because we allocate it. It is possible to allocate but not write a node + during flush, when it still has unallocated children. However, this approach + is probably not optimal for the following reason. + + The flush algorithm is designed to allocate nodes in parent-first order in an + attempt to optimize reads that occur in the same order. Thus we are + read-optimizing for a left-to-right scan through all the leaves in the + system, and we are hoping to write-optimize at the same time because those + nodes will be written together in batch. What happens, however, if we assign + a block number to a node in its read-optimized order but then avoid writing + it because it has unallocated children? In that situation, we lose out on the + write-optimization aspect because a node will have to be written again to the + its location on the device, later, which likely means seeking back to that + location. + + So there are tradeoffs. We can choose either: + + A. Allocate all unallocated children to preserve both write-optimization and + read-optimization, but this is not always desirable because it may mean + having to allocate and flush very many nodes at once. + + B. Defer writing nodes with unallocated children, keep their read-optimized + locations, but sacrifice write-optimization because those nodes will be + written again. + + C. Defer writing nodes with unallocated children, but do not keep their + read-optimized locations. Instead, choose to write-optimize them later, when + they are written. To facilitate this, we "undo" the read-optimized allocation + that was given to the node so that later it can be write-optimized, thus + "unpreparing" the flush decision. This is a case where we disturb the + FLUSH_PREP_ONCE_PER_TRANSACTION rule described above. By a call to + flush_unprep() we will: if the node was wandered, unset the JNODE_OVRWR bit; + if the node was relocated, unset the JNODE_RELOC bit, non-deferred-deallocate + its block location, and set the JNODE_CREATED bit, effectively setting the + node back to an unallocated state. + + We will take the following approach in v4.0: for twig nodes we will always + finish allocating unallocated children (A). For nodes with (level > TWIG) + we will defer writing and choose write-optimization (C). + + To summarize, there are several parts to a solution that avoids the problem + with unallocated children: + + FIXME-ZAM: Still no one approach is implemented to eliminate the + "UNALLOCATED CHILDREN" problem because there was an experiment which was done + showed that we have 1-2 nodes with unallocated children for thousands of + written nodes. The experiment was simple like coping/deletion of linux kernel + sources. However the problem can arise in more complex tests. I think we have + jnode_io_hook to insert a check for unallocated children and see what kind of + problem we have. + + 1. When flush reaches a stopping point (e.g. a clean node) it should continue + calling squeeze-and-allocate on any remaining unallocated children. + FIXME: Difficulty to implement: should be simple -- amounts to adding a while + loop to jnode_flush, see comments in that function. + + 2. When flush reaches flush_empty_queue(), some of the (level > TWIG) nodes + may still have unallocated children. If the twig level has unallocated + children it is an assertion failure. If a higher-level node has unallocated + children, then it should be explicitly de-allocated by a call to + flush_unprep(). + FIXME: Difficulty to implement: should be simple. + + 3. (CPU-Optimization) Checking whether a node has unallocated children may + consume more CPU cycles than we would like, and it is possible (but medium + complexity) to optimize this somewhat in the case where large sub-trees are + flushed. The following observation helps: if both the left- and + right-neighbor of a node are processed by the flush algorithm then the node + itself is guaranteed to have all of its children allocated. However, the cost + of this check may not be so expensive after all: it is not needed for leaves + and flush can guarantee this property for twigs. That leaves only (level > + TWIG) nodes that have to be checked, so this optimization only helps if at + least three (level > TWIG) nodes are flushed in one pass, and the savings + will be very small unless there are many more (level > TWIG) nodes. But if + there are many (level > TWIG) nodes then the number of blocks being written + will be very large, so the savings may be insignificant. That said, the idea + is to maintain both the left and right edges of nodes that are processed in + flush. When flush_empty_queue() is called, a relatively simple test will + tell whether the (level > TWIG) node is on the edge. If it is on the edge, + the slow check is necessary, but if it is in the interior then it can be + assumed to have all of its children allocated. FIXME: medium complexity to + implement, but simple to verify given that we must have a slow check anyway. + + 4. (Optional) This part is optional, not for v4.0--flush should work + independently of whether this option is used or not. Called RAPID_SCAN, the + idea is to amend the left-scan operation to take unallocated children into + account. Normally, the left-scan operation goes left as long as adjacent + nodes are dirty up until some large maximum value (FLUSH_SCAN_MAXNODES) at + which point it stops and begins flushing. But scan-left may stop at a + position where there are unallocated children to the left with the same + parent. When RAPID_SCAN is enabled, the ordinary scan-left operation stops + after FLUSH_RELOCATE_THRESHOLD, which is much smaller than + FLUSH_SCAN_MAXNODES, then procedes with a rapid scan. The rapid scan skips + all the interior children of a node--if the leftmost child of a twig is + dirty, check its left neighbor (the rightmost child of the twig to the left). + If the left neighbor of the leftmost child is also dirty, then continue the + scan at the left twig and repeat. This option will cause flush to allocate + more twigs in a single pass, but it also has the potential to write many more + nodes than would otherwise be written without the RAPID_SCAN option. + RAPID_SCAN was partially implemented, code removed August 12, 2002 by JMACD. +*/ + +/* FLUSH CALLED ON NON-LEAF LEVEL. Most of our design considerations assume that + the starting point for flush is a leaf node, but actually the flush code + cares very little about whether or not this is true. It is possible that all + the leaf nodes are flushed and dirty parent nodes still remain, in which case + jnode_flush() is called on a non-leaf argument. Flush doesn't care--it treats + the argument node as if it were a leaf, even when it is not. This is a simple + approach, and there may be a more optimal policy but until a problem with + this approach is discovered, simplest is probably best. + + NOTE: In this case, the ordering produced by flush is parent-first only if + you ignore the leaves. This is done as a matter of simplicity and there is + only one (shaky) justification. When an atom commits, it flushes all leaf + level nodes first, followed by twigs, and so on. With flushing done in this + order, if flush is eventually called on a non-leaf node it means that + (somehow) we reached a point where all leaves are clean and only internal + nodes need to be flushed. If that it the case, then it means there were no + leaves that were the parent-first preceder/follower of the parent. This is + expected to be a rare case, which is why we do nothing special about it. + However, memory pressure may pass an internal node to flush when there are + still dirty leaf nodes that need to be flushed, which could prove our + original assumptions "inoperative". If this needs to be fixed, then + scan_left/right should have special checks for the non-leaf levels. For + example, instead of passing from a node to the left neighbor, it should pass + from the node to the left neighbor's rightmost descendent (if dirty). + +*/ + +/* UNIMPLEMENTED AS YET: REPACKING AND RESIZING. We walk the tree in 4MB-16MB + chunks, dirtying everything and putting it into a transaction. We tell the + allocator to allocate the blocks as far as possible towards one end of the + logical device--the left (starting) end of the device if we are walking from + left to right, the right end of the device if we are walking from right to + left. We then make passes in alternating directions, and as we do this the + device becomes sorted such that tree order and block number order fully + correlate. + + Resizing is done by shifting everything either all the way to the left or all + the way to the right, and then reporting the last block. +*/ + +/* RELOCATE DECISIONS: The code makes a decision to relocate in several places. + This descibes the policy from the highest level: + + The FLUSH_RELOCATE_THRESHOLD parameter: If we count this many consecutive + nodes on the leaf level during flush-scan (right, left), then we + unconditionally decide to relocate leaf nodes. + + Otherwise, there are two contexts in which we make a decision to relocate: + + 1. The REVERSE PARENT-FIRST context: Implemented in reverse_allocate + During the initial stages of flush, after scan-right completes, we want to + ask the question: should we relocate this leaf node and thus dirty the parent + node. Then if the node is a leftmost child its parent is its own parent-first + preceder, thus we repeat the question at the next level up, and so on. In + these cases we are moving in the reverse-parent first direction. + + There is another case which is considered the reverse direction, which comes + at the end of a twig in reverse_relocate_end_of_twig(). As we finish + processing a twig we may reach a point where there is a clean twig to the + right with a dirty leftmost child. In this case, we may wish to relocate the + child by testing if it should be relocated relative to its parent. + + 2. The FORWARD PARENT-FIRST context: Testing for forward relocation is done + in allocate_znode. What distinguishes the forward parent-first case from the + reverse-parent first case is that the preceder has already been allocated in + the forward case, whereas in the reverse case we don't know what the preceder + is until we finish "going in reverse". That simplifies the forward case + considerably, and there we actually use the block allocator to determine + whether, e.g., a block closer to the preceder is available. +*/ + +/* SQUEEZE_LEFT_EDGE: Unimplemented idea for future consideration. The idea is, + once we finish scan-left and find a starting point, if the parent's left + neighbor is dirty then squeeze the parent's left neighbor and the parent. + This may change the flush-starting-node's parent. Repeat until the child's + parent is stable. If the child is a leftmost child, repeat this left-edge + squeezing operation at the next level up. Note that we cannot allocate + extents during this or they will be out of parent-first order. There is also + some difficult coordinate maintenence issues. We can't do a tree search to + find coordinates again (because we hold locks), we have to determine them + from the two nodes being squeezed. Looks difficult, but has potential to + increase space utilization. */ + +/* Flush-scan helper functions. */ +static void scan_init(flush_scan * scan); +static void scan_done(flush_scan * scan); + +/* Flush-scan algorithm. */ +static int scan_left(flush_scan * scan, flush_scan * right, jnode * node, + unsigned limit); +static int scan_right(flush_scan * scan, jnode * node, unsigned limit); +static int scan_common(flush_scan * scan, flush_scan * other); +static int scan_formatted(flush_scan * scan); +static int scan_unformatted(flush_scan * scan, flush_scan * other); +static int scan_by_coord(flush_scan * scan); + +/* Initial flush-point ancestor allocation. */ +static int alloc_pos_and_ancestors(flush_pos_t *pos); +static int alloc_one_ancestor(const coord_t *coord, flush_pos_t *pos); +static int set_preceder(const coord_t *coord_in, flush_pos_t *pos); + +/* Main flush algorithm. + Note on abbreviation: "squeeze and allocate" == "squalloc". */ +static int squalloc(flush_pos_t *pos); + +/* Flush squeeze implementation. */ +static int squeeze_right_non_twig(znode * left, znode * right); +static int shift_one_internal_unit(znode * left, znode * right); + +/* Flush reverse parent-first relocation routines. */ +static int reverse_allocate_parent(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos); + +/* Flush allocate write-queueing functions: */ +static int allocate_znode(znode * node, const coord_t *parent_coord, + flush_pos_t *pos); +static int lock_parent_and_allocate_znode(znode *, flush_pos_t *); + +/* Flush helper functions: */ +static int jnode_lock_parent_coord(jnode * node, + coord_t *coord, + lock_handle * parent_lh, + load_count * parent_zh, + znode_lock_mode mode, int try); +static int neighbor_in_slum(znode * node, lock_handle * right_lock, sideof side, + znode_lock_mode mode, int check_dirty, int expected); +static int znode_same_parents(znode * a, znode * b); + +static int znode_check_flushprepped(znode * node) +{ + return jnode_check_flushprepped(ZJNODE(node)); +} +static void update_znode_dkeys(znode * left, znode * right); + +/* Flush position functions */ +static void pos_init(flush_pos_t *pos); +static int pos_valid(flush_pos_t *pos); +static void pos_done(flush_pos_t *pos); +static int pos_stop(flush_pos_t *pos); + +/* check that @org is first jnode extent unit, if extent is unallocated, + * because all jnodes of unallocated extent are dirty and of the same atom. */ +#define checkchild(scan) \ +assert("nikita-3435", \ + ergo(scan->direction == LEFT_SIDE && \ + (scan->parent_coord.node->level == TWIG_LEVEL) && \ + jnode_is_unformatted(scan->node) && \ + extent_is_unallocated(&scan->parent_coord), \ + extent_unit_index(&scan->parent_coord) == index_jnode(scan->node))) + +/* This flush_cnt variable is used to track the number of concurrent flush + operations, useful for debugging. It is initialized in txnmgr.c out of + laziness (because flush has no static initializer function...) */ +ON_DEBUG(atomic_t flush_cnt;) + +/* check fs backing device for write congestion */ +static int check_write_congestion(void) +{ + struct super_block *sb; + struct backing_dev_info *bdi; + + sb = reiser4_get_current_sb(); + bdi = inode_to_bdi(reiser4_get_super_fake(sb)); + return bdi_write_congested(bdi); +} + +/* conditionally write flush queue */ +static int write_prepped_nodes(flush_pos_t *pos) +{ + int ret; + + assert("zam-831", pos); + assert("zam-832", pos->fq); + + if (!(pos->flags & JNODE_FLUSH_WRITE_BLOCKS)) + return 0; + + if (check_write_congestion()) + return 0; + + ret = reiser4_write_fq(pos->fq, pos->nr_written, + WRITEOUT_SINGLE_STREAM | WRITEOUT_FOR_PAGE_RECLAIM); + return ret; +} + +/* Proper release all flush pos. resources then move flush position to new + locked node */ +static void move_flush_pos(flush_pos_t *pos, lock_handle * new_lock, + load_count * new_load, const coord_t *new_coord) +{ + assert("zam-857", new_lock->node == new_load->node); + + if (new_coord) { + assert("zam-858", new_coord->node == new_lock->node); + coord_dup(&pos->coord, new_coord); + } else { + coord_init_first_unit(&pos->coord, new_lock->node); + } + + if (pos->child) { + jput(pos->child); + pos->child = NULL; + } + + move_load_count(&pos->load, new_load); + done_lh(&pos->lock); + move_lh(&pos->lock, new_lock); +} + +/* delete empty node which link from the parent still exists. */ +static int delete_empty_node(znode * node) +{ + reiser4_key smallest_removed; + + assert("zam-1019", node != NULL); + assert("zam-1020", node_is_empty(node)); + assert("zam-1023", znode_is_wlocked(node)); + + return reiser4_delete_node(node, &smallest_removed, NULL, 1); +} + +/* Prepare flush position for alloc_pos_and_ancestors() and squalloc() */ +static int prepare_flush_pos(flush_pos_t *pos, jnode * org) +{ + int ret; + load_count load; + lock_handle lock; + + init_lh(&lock); + init_load_count(&load); + + if (jnode_is_znode(org)) { + ret = longterm_lock_znode(&lock, JZNODE(org), + ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI); + if (ret) + return ret; + + ret = incr_load_count_znode(&load, JZNODE(org)); + if (ret) + return ret; + + pos->state = + (jnode_get_level(org) == + LEAF_LEVEL) ? POS_ON_LEAF : POS_ON_INTERNAL; + move_flush_pos(pos, &lock, &load, NULL); + } else { + coord_t parent_coord; + ret = jnode_lock_parent_coord(org, &parent_coord, &lock, + &load, ZNODE_WRITE_LOCK, 0); + if (ret) + goto done; + if (!item_is_extent(&parent_coord)) { + /* file was converted to tail, org became HB, we found + internal item */ + ret = -EAGAIN; + goto done; + } + + pos->state = POS_ON_EPOINT; + move_flush_pos(pos, &lock, &load, &parent_coord); + pos->child = jref(org); + if (extent_is_unallocated(&parent_coord) + && extent_unit_index(&parent_coord) != index_jnode(org)) { + /* @org is not first child of its parent unit. This may + happen because longerm lock of its parent node was + released between scan_left and scan_right. For now + work around this having flush to repeat */ + ret = -EAGAIN; + } + } + +done: + done_load_count(&load); + done_lh(&lock); + return ret; +} + +static txmod_plugin *get_txmod_plugin(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + return txmod_plugin_by_id(get_super_private(sb)->txmod); +} + +/* TODO LIST (no particular order): */ +/* I have labelled most of the legitimate FIXME comments in this file with + letters to indicate which issue they relate to. There are a few miscellaneous + FIXMEs with specific names mentioned instead that need to be + inspected/resolved. */ +/* B. There is an issue described in reverse_allocate having to do with an + imprecise is_preceder? check having to do with partially-dirty extents. The + code that sets preceder hints and computes the preceder is basically + untested. Careful testing needs to be done that preceder calculations are + done correctly, since if it doesn't affect correctness we will not catch this + stuff during regular testing. */ +/* C. EINVAL, E_DEADLOCK, E_NO_NEIGHBOR, ENOENT handling. It is unclear which of + these are considered expected but unlikely conditions. Flush currently + returns 0 (i.e., success but no progress, i.e., restart) whenever it receives + any of these in jnode_flush(). Many of the calls that may produce one of + these return values (i.e., longterm_lock_znode, reiser4_get_parent, + reiser4_get_neighbor, ...) check some of these values themselves and, for + instance, stop flushing instead of resulting in a restart. If any of these + results are true error conditions then flush will go into a busy-loop, as we + noticed during testing when a corrupt tree caused find_child_ptr to return + ENOENT. It needs careful thought and testing of corner conditions. +*/ +/* D. Atomicity of flush_prep against deletion and flush concurrency. Suppose a + created block is assigned a block number then early-flushed to disk. It is + dirtied again and flush is called again. Concurrently, that block is deleted, + and the de-allocation of its block number does not need to be deferred, since + it is not part of the preserve set (i.e., it didn't exist before the + transaction). I think there may be a race condition where flush writes the + dirty, created block after the non-deferred deallocated block number is + re-allocated, making it possible to write deleted data on top of non-deleted + data. Its just a theory, but it needs to be thought out. */ +/* F. bio_alloc() failure is not handled gracefully. */ +/* G. Unallocated children. */ +/* H. Add a WANDERED_LIST to the atom to clarify the placement of wandered + blocks. */ +/* I. Rename flush-scan to scan-point, (flush-pos to flush-point?) */ + +/* JNODE_FLUSH: MAIN ENTRY POINT */ +/* This is the main entry point for flushing a jnode and its dirty neighborhood + (dirty neighborhood is named "slum"). Jnode_flush() is called if reiser4 has + to write dirty blocks to disk, it happens when Linux VM decides to reduce + number of dirty pages or as a part of transaction commit. + + Our objective here is to prep and flush the slum the jnode belongs to. We + want to squish the slum together, and allocate the nodes in it as we squish + because allocation of children affects squishing of parents. + + The "argument" @node tells flush where to start. From there, flush finds the + left edge of the slum, and calls squalloc (in which nodes are squeezed and + allocated). To find a "better place" to start squalloc first we perform a + flush_scan. + + Flush-scanning may be performed in both left and right directions, but for + different purposes. When scanning to the left, we are searching for a node + that precedes a sequence of parent-first-ordered nodes which we will then + flush in parent-first order. During flush-scanning, we also take the + opportunity to count the number of consecutive leaf nodes. If this number is + past some threshold (FLUSH_RELOCATE_THRESHOLD), then we make a decision to + reallocate leaf nodes (thus favoring write-optimization). + + Since the flush argument node can be anywhere in a sequence of dirty leaves, + there may also be dirty nodes to the right of the argument. If the scan-left + operation does not count at least FLUSH_RELOCATE_THRESHOLD nodes then we + follow it with a right-scan operation to see whether there is, in fact, + enough nodes to meet the relocate threshold. Each right- and left-scan + operation uses a single flush_scan object. + + After left-scan and possibly right-scan, we prepare a flush_position object + with the starting flush point or parent coordinate, which was determined + using scan-left. + + Next we call the main flush routine, squalloc, which iterates along the leaf + level, squeezing and allocating nodes (and placing them into the flush + queue). + + After squalloc returns we take extra steps to ensure that all the children + of the final twig node are allocated--this involves repeating squalloc + until we finish at a twig with no unallocated children. + + Finally, we call flush_empty_queue to submit write-requests to disk. If we + encounter any above-twig nodes during flush_empty_queue that still have + unallocated children, we flush_unprep them. + + Flush treats several "failure" cases as non-failures, essentially causing + them to start over. E_DEADLOCK is one example. + FIXME:(C) EINVAL, E_NO_NEIGHBOR, ENOENT: these should probably be handled + properly rather than restarting, but there are a bunch of cases to audit. +*/ + +static int +jnode_flush(jnode * node, long nr_to_write, long *nr_written, + flush_queue_t *fq, int flags) +{ + long ret = 0; + flush_scan *right_scan; + flush_scan *left_scan; + flush_pos_t *flush_pos; + int todo; + struct super_block *sb; + reiser4_super_info_data *sbinfo; + jnode *leftmost_in_slum = NULL; + + assert("jmacd-76619", lock_stack_isclean(get_current_lock_stack())); + assert("nikita-3022", reiser4_schedulable()); + + assert("nikita-3185", + get_current_super_private()->delete_mutex_owner != current); + + /* allocate right_scan, left_scan and flush_pos */ + right_scan = + kmalloc(2 * sizeof(*right_scan) + sizeof(*flush_pos), + reiser4_ctx_gfp_mask_get()); + if (right_scan == NULL) + return RETERR(-ENOMEM); + left_scan = right_scan + 1; + flush_pos = (flush_pos_t *) (left_scan + 1); + + sb = reiser4_get_current_sb(); + sbinfo = get_super_private(sb); + + /* Flush-concurrency debug code */ +#if REISER4_DEBUG + atomic_inc(&flush_cnt); +#endif + + reiser4_enter_flush(sb); + + /* Initialize a flush position. */ + pos_init(flush_pos); + + flush_pos->nr_written = nr_written; + flush_pos->fq = fq; + flush_pos->flags = flags; + flush_pos->nr_to_write = nr_to_write; + + scan_init(right_scan); + scan_init(left_scan); + + /* First scan left and remember the leftmost scan position. If the + leftmost position is unformatted we remember its parent_coord. We + scan until counting FLUSH_SCAN_MAXNODES. + + If starting @node is unformatted, at the beginning of left scan its + parent (twig level node, containing extent item) will be long term + locked and lock handle will be stored in the + @right_scan->parent_lock. This lock is used to start the rightward + scan without redoing the tree traversal (necessary to find parent) + and, hence, is kept during leftward scan. As a result, we have to + use try-lock when taking long term locks during the leftward scan. + */ + ret = scan_left(left_scan, right_scan, + node, sbinfo->flush.scan_maxnodes); + if (ret != 0) + goto failed; + + leftmost_in_slum = jref(left_scan->node); + scan_done(left_scan); + + /* Then possibly go right to decide if we will use a policy of + relocating leaves. This is only done if we did not scan past (and + count) enough nodes during the leftward scan. If we do scan right, + we only care to go far enough to establish that at least + FLUSH_RELOCATE_THRESHOLD number of nodes are being flushed. The scan + limit is the difference between left_scan.count and the threshold. */ + + todo = sbinfo->flush.relocate_threshold - left_scan->count; + /* scan right is inherently deadlock prone, because we are + * (potentially) holding a lock on the twig node at this moment. + * FIXME: this is incorrect comment: lock is not held */ + if (todo > 0) { + ret = scan_right(right_scan, node, (unsigned)todo); + if (ret != 0) + goto failed; + } + + /* Only the right-scan count is needed, release any rightward locks + right away. */ + scan_done(right_scan); + + /* ... and the answer is: we should relocate leaf nodes if at least + FLUSH_RELOCATE_THRESHOLD nodes were found. */ + flush_pos->leaf_relocate = JF_ISSET(node, JNODE_REPACK) || + (left_scan->count + right_scan->count >= + sbinfo->flush.relocate_threshold); + + /* Funny business here. We set the 'point' in the flush_position at + prior to starting squalloc regardless of whether the first point is + formatted or unformatted. Without this there would be an invariant, + in the rest of the code, that if the flush_position is unformatted + then flush_position->point is NULL and + flush_position->parent_{lock,coord} is set, and if the flush_position + is formatted then flush_position->point is non-NULL and no parent + info is set. + + This seems lazy, but it makes the initial calls to + reverse_allocate (which ask "is it the pos->point the leftmost + child of its parent") much easier because we know the first child + already. Nothing is broken by this, but the reasoning is subtle. + Holding an extra reference on a jnode during flush can cause us to + see nodes with HEARD_BANSHEE during squalloc, because nodes are not + removed from sibling lists until they have zero reference count. + Flush would never observe a HEARD_BANSHEE node on the left-edge of + flush, nodes are only deleted to the right. So if nothing is broken, + why fix it? + + NOTE-NIKITA actually, flush can meet HEARD_BANSHEE node at any + point and in any moment, because of the concurrent file system + activity (for example, truncate). */ + + /* Check jnode state after flush_scan completed. Having a lock on this + node or its parent (in case of unformatted) helps us in case of + concurrent flushing. */ + if (jnode_check_flushprepped(leftmost_in_slum) + && !jnode_convertible(leftmost_in_slum)) { + ret = 0; + goto failed; + } + + /* Now setup flush_pos using scan_left's endpoint. */ + ret = prepare_flush_pos(flush_pos, leftmost_in_slum); + if (ret) + goto failed; + + if (znode_get_level(flush_pos->coord.node) == LEAF_LEVEL + && node_is_empty(flush_pos->coord.node)) { + znode *empty = flush_pos->coord.node; + + assert("zam-1022", !ZF_ISSET(empty, JNODE_HEARD_BANSHEE)); + ret = delete_empty_node(empty); + goto failed; + } + + if (jnode_check_flushprepped(leftmost_in_slum) + && !jnode_convertible(leftmost_in_slum)) { + ret = 0; + goto failed; + } + + /* Set pos->preceder and (re)allocate pos and its ancestors if it is + needed */ + ret = alloc_pos_and_ancestors(flush_pos); + if (ret) + goto failed; + + /* Do the main rightward-bottom-up squeeze and allocate loop. */ + ret = squalloc(flush_pos); + pos_stop(flush_pos); + if (ret) + goto failed; + + /* FIXME_NFQUCMPD: Here, handle the twig-special case for unallocated + children. First, the pos_stop() and pos_valid() routines should be + modified so that pos_stop() sets a flush_position->stop flag to 1 + without releasing the current position immediately--instead release + it in pos_done(). This is a better implementation than the current + one anyway. + + It is not clear that all fields of the flush_position should not be + released, but at the very least the parent_lock, parent_coord, and + parent_load should remain held because they are hold the last twig + when pos_stop() is called. + + When we reach this point in the code, if the parent_coord is set to + after the last item then we know that flush reached the end of a twig + (and according to the new flush queueing design, we will return now). + If parent_coord is not past the last item, we should check if the + current twig has any unallocated children to the right (we are not + concerned with unallocated children to the left--in that case the + twig itself should not have been allocated). If the twig has + unallocated children to the right, set the parent_coord to that + position and then repeat the call to squalloc. + + Testing for unallocated children may be defined in two ways: if any + internal item has a fake block number, it is unallocated; if any + extent item is unallocated then all of its children are unallocated. + But there is a more aggressive approach: if there are any dirty + children of the twig to the right of the current position, we may + wish to relocate those nodes now. Checking for potential relocation + is more expensive as it requires knowing whether there are any dirty + children that are not unallocated. The extent_needs_allocation should + be used after setting the correct preceder. + + When we reach the end of a twig at this point in the code, if the + flush can continue (when the queue is ready) it will need some + information on the future starting point. That should be stored away + in the flush_handle using a seal, I believe. Holding a jref() on the + future starting point may break other code that deletes that node. + */ + + /* FIXME_NFQUCMPD: Also, we don't want to do any flushing when flush is + called above the twig level. If the VM calls flush above the twig + level, do nothing and return (but figure out why this happens). The + txnmgr should be modified to only flush its leaf-level dirty list. + This will do all the necessary squeeze and allocate steps but leave + unallocated branches and possibly unallocated twigs (when the twig's + leftmost child is not dirty). After flushing the leaf level, the + remaining unallocated nodes should be given write-optimized + locations. (Possibly, the remaining unallocated twigs should be + allocated just before their leftmost child.) + */ + + /* Any failure reaches this point. */ +failed: + + switch (ret) { + case -E_REPEAT: + case -EINVAL: + case -E_DEADLOCK: + case -E_NO_NEIGHBOR: + case -ENOENT: + /* FIXME(C): Except for E_DEADLOCK, these should probably be + handled properly in each case. They already are handled in + many cases. */ + /* Something bad happened, but difficult to avoid... Try again! + */ + ret = 0; + } + + if (leftmost_in_slum) + jput(leftmost_in_slum); + + pos_done(flush_pos); + scan_done(left_scan); + scan_done(right_scan); + kfree(right_scan); + + ON_DEBUG(atomic_dec(&flush_cnt)); + + reiser4_leave_flush(sb); + + return ret; +} + +/* The reiser4 flush subsystem can be turned into "rapid flush mode" means that + * flusher should submit all prepped nodes immediately without keeping them in + * flush queues for long time. The reason for rapid flush mode is to free + * memory as fast as possible. */ + +#if REISER4_USE_RAPID_FLUSH + +/** + * submit all prepped nodes if rapid flush mode is set, + * turn rapid flush mode off. + */ + +static int rapid_flush(flush_pos_t *pos) +{ + if (!wbq_available()) + return 0; + + return write_prepped_nodes(pos); +} + +#else + +#define rapid_flush(pos) (0) + +#endif /* REISER4_USE_RAPID_FLUSH */ + +static jnode *find_flush_start_jnode(jnode *start, txn_atom * atom, + flush_queue_t *fq, int *nr_queued, + int flags) +{ + jnode * node; + + if (start != NULL) { + spin_lock_jnode(start); + if (!jnode_is_flushprepped(start)) { + assert("zam-1056", start->atom == atom); + node = start; + goto enter; + } + spin_unlock_jnode(start); + } + /* + * In this loop we process all already prepped (RELOC or OVRWR) and + * dirtied again nodes. The atom spin lock is not released until all + * dirty nodes processed or not prepped node found in the atom dirty + * lists. + */ + while ((node = find_first_dirty_jnode(atom, flags))) { + spin_lock_jnode(node); +enter: + assert("zam-881", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-898", !JF_ISSET(node, JNODE_OVRWR)); + + if (JF_ISSET(node, JNODE_WRITEBACK)) { + /* move node to the end of atom's writeback list */ + list_move_tail(&node->capture_link, ATOM_WB_LIST(atom)); + + /* + * jnode is not necessarily on dirty list: if it was + * dirtied when it was on flush queue - it does not get + * moved to dirty list + */ + ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), + WB_LIST, 1)); + + } else if (jnode_is_znode(node) + && znode_above_root(JZNODE(node))) { + /* + * A special case for znode-above-root. The above-root + * (fake) znode is captured and dirtied when the tree + * height changes or when the root node is relocated. + * This causes atoms to fuse so that changes at the root + * are serialized. However, this node is never flushed. + * This special case used to be in lock.c to prevent the + * above-root node from ever being captured, but now + * that it is captured we simply prevent it from + * flushing. The log-writer code relies on this to + * properly log superblock modifications of the tree + * height. + */ + jnode_make_wander_nolock(node); + } else if (JF_ISSET(node, JNODE_RELOC)) { + queue_jnode(fq, node); + ++(*nr_queued); + } else + break; + + spin_unlock_jnode(node); + } + return node; +} + +/* Flush some nodes of current atom, usually slum, return -E_REPEAT if there are + * more nodes to flush, return 0 if atom's dirty lists empty and keep current + * atom locked, return other errors as they are. */ +int +flush_current_atom(int flags, long nr_to_write, long *nr_submitted, + txn_atom ** atom, jnode *start) +{ + reiser4_super_info_data *sinfo = get_current_super_private(); + flush_queue_t *fq = NULL; + jnode *node; + int nr_queued; + int ret; + + assert("zam-889", atom != NULL && *atom != NULL); + assert_spin_locked(&((*atom)->alock)); + assert("zam-892", get_current_context()->trans->atom == *atom); + + BUG_ON(rofs_super(get_current_context()->super)); + + nr_to_write = LONG_MAX; + while (1) { + ret = reiser4_fq_by_atom(*atom, &fq); + if (ret != -E_REPEAT) + break; + *atom = get_current_atom_locked(); + } + if (ret) + return ret; + + assert_spin_locked(&((*atom)->alock)); + + /* parallel flushers limit */ + if (sinfo->tmgr.atom_max_flushers != 0) { + while ((*atom)->nr_flushers >= sinfo->tmgr.atom_max_flushers) { + /* An reiser4_atom_send_event() call is inside + reiser4_fq_put_nolock() which is called when flush is + finished and nr_flushers is decremented. */ + reiser4_atom_wait_event(*atom); + *atom = get_current_atom_locked(); + } + } + + /* count ourself as a flusher */ + (*atom)->nr_flushers++; + + writeout_mode_enable(); + + nr_queued = 0; + node = find_flush_start_jnode(start, *atom, fq, &nr_queued, flags); + + if (node == NULL) { + if (nr_queued == 0) { + (*atom)->nr_flushers--; + reiser4_fq_put_nolock(fq); + reiser4_atom_send_event(*atom); + /* current atom remains locked */ + writeout_mode_disable(); + return 0; + } + spin_unlock_atom(*atom); + } else { + jref(node); + BUG_ON((*atom)->super != node->tree->super); + spin_unlock_atom(*atom); + spin_unlock_jnode(node); + BUG_ON(nr_to_write == 0); + ret = jnode_flush(node, nr_to_write, nr_submitted, fq, flags); + jput(node); + } + + ret = + reiser4_write_fq(fq, nr_submitted, + WRITEOUT_SINGLE_STREAM | WRITEOUT_FOR_PAGE_RECLAIM); + + *atom = get_current_atom_locked(); + (*atom)->nr_flushers--; + reiser4_fq_put_nolock(fq); + reiser4_atom_send_event(*atom); + spin_unlock_atom(*atom); + + writeout_mode_disable(); + + if (ret == 0) + ret = -E_REPEAT; + + return ret; +} + +/** + * This function calls txmod->reverse_alloc_formatted() to make a + * reverse-parent-first relocation decision and then, if yes, it marks + * the parent dirty. + */ +static int reverse_allocate_parent(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + + if (!JF_ISSET(ZJNODE(parent_coord->node), JNODE_DIRTY)) { + txmod_plugin *txmod_plug = get_txmod_plugin(); + + if (!txmod_plug->reverse_alloc_formatted) + return 0; + ret = txmod_plug->reverse_alloc_formatted(node, + parent_coord, pos); + if (ret < 0) + return ret; + /* + * FIXME-ZAM: if parent is already relocated - + * we do not want to grab space, right? + */ + if (ret == 1) { + int grabbed; + + grabbed = get_current_context()->grabbed_blocks; + if (reiser4_grab_space_force((__u64) 1, BA_RESERVED) != + 0) + reiser4_panic("umka-1250", + "No space left during flush."); + + assert("jmacd-18923", + znode_is_write_locked(parent_coord->node)); + znode_make_dirty(parent_coord->node); + grabbed2free_mark(grabbed); + } + } + return 0; +} + +/* INITIAL ALLOCATE ANCESTORS STEP (REVERSE PARENT-FIRST ALLOCATION BEFORE + FORWARD PARENT-FIRST LOOP BEGINS) */ + +/* Get the leftmost child for given coord. */ +static int get_leftmost_child_of_unit(const coord_t *coord, jnode ** child) +{ + int ret; + + ret = item_utmost_child(coord, LEFT_SIDE, child); + + if (ret) + return ret; + + if (IS_ERR(*child)) + return PTR_ERR(*child); + + return 0; +} + +/* This step occurs after the left- and right-scans are completed, before + starting the forward parent-first traversal. Here we attempt to allocate + ancestors of the starting flush point, which means continuing in the reverse + parent-first direction to the parent, grandparent, and so on (as long as the + child is a leftmost child). This routine calls a recursive process, + alloc_one_ancestor, which does the real work, except there is special-case + handling here for the first ancestor, which may be a twig. At each level + (here and alloc_one_ancestor), we check for relocation and then, if the child + is a leftmost child, repeat at the next level. On the way back down (the + recursion), we allocate the ancestors in parent-first order. */ +static int alloc_pos_and_ancestors(flush_pos_t *pos) +{ + int ret = 0; + lock_handle plock; + load_count pload; + coord_t pcoord; + + if (znode_check_flushprepped(pos->lock.node)) + return 0; + + coord_init_invalid(&pcoord, NULL); + init_lh(&plock); + init_load_count(&pload); + + if (pos->state == POS_ON_EPOINT) { + /* a special case for pos on twig level, where we already have + a lock on parent node. */ + /* The parent may not be dirty, in which case we should decide + whether to relocate the child now. If decision is made to + relocate the child, the parent is marked dirty. */ + ret = reverse_allocate_parent(pos->child, &pos->coord, pos); + if (ret) + goto exit; + + /* FIXME_NFQUCMPD: We only need to allocate the twig (if child + is leftmost) and the leaf/child, so recursion is not needed. + Levels above the twig will be allocated for + write-optimization before the transaction commits. */ + + /* Do the recursive step, allocating zero or more of our + * ancestors. */ + ret = alloc_one_ancestor(&pos->coord, pos); + + } else { + if (!znode_is_root(pos->lock.node)) { + /* all formatted nodes except tree root */ + ret = + reiser4_get_parent(&plock, pos->lock.node, + ZNODE_WRITE_LOCK); + if (ret) + goto exit; + + ret = incr_load_count_znode(&pload, plock.node); + if (ret) + goto exit; + + ret = + find_child_ptr(plock.node, pos->lock.node, &pcoord); + if (ret) + goto exit; + + ret = reverse_allocate_parent(ZJNODE(pos->lock.node), + &pcoord, + pos); + if (ret) + goto exit; + + ret = alloc_one_ancestor(&pcoord, pos); + if (ret) + goto exit; + } + + ret = allocate_znode(pos->lock.node, &pcoord, pos); + } +exit: + done_load_count(&pload); + done_lh(&plock); + return ret; +} + +/* This is the recursive step described in alloc_pos_and_ancestors, above. + Ignoring the call to set_preceder, which is the next function described, this + checks if the child is a leftmost child and returns if it is not. If the + child is a leftmost child it checks for relocation, possibly dirtying the + parent. Then it performs the recursive step. */ +static int alloc_one_ancestor(const coord_t *coord, flush_pos_t *pos) +{ + int ret = 0; + lock_handle alock; + load_count aload; + coord_t acoord; + + /* As we ascend at the left-edge of the region to flush, take this + opportunity at the twig level to find our parent-first preceder + unless we have already set it. */ + if (pos->preceder.blk == 0) { + ret = set_preceder(coord, pos); + if (ret != 0) + return ret; + } + + /* If the ancestor is clean or already allocated, or if the child is not + a leftmost child, stop going up, even leaving coord->node not + flushprepped. */ + if (znode_check_flushprepped(coord->node) + || !coord_is_leftmost_unit(coord)) + return 0; + + init_lh(&alock); + init_load_count(&aload); + coord_init_invalid(&acoord, NULL); + + /* Only ascend to the next level if it is a leftmost child, but + write-lock the parent in case we will relocate the child. */ + if (!znode_is_root(coord->node)) { + + ret = + jnode_lock_parent_coord(ZJNODE(coord->node), &acoord, + &alock, &aload, ZNODE_WRITE_LOCK, + 0); + if (ret != 0) { + /* FIXME(C): check EINVAL, E_DEADLOCK */ + goto exit; + } + + ret = reverse_allocate_parent(ZJNODE(coord->node), + &acoord, pos); + if (ret != 0) + goto exit; + + /* Recursive call. */ + if (!znode_check_flushprepped(acoord.node)) { + ret = alloc_one_ancestor(&acoord, pos); + if (ret) + goto exit; + } + } + + /* Note: we call allocate with the parent write-locked (except at the + root) in case we relocate the child, in which case it will modify the + parent during this call. */ + ret = allocate_znode(coord->node, &acoord, pos); + +exit: + done_load_count(&aload); + done_lh(&alock); + return ret; +} + +/* During the reverse parent-first alloc_pos_and_ancestors process described + above there is a call to this function at the twig level. During + alloc_pos_and_ancestors we may ask: should this node be relocated (in reverse + parent-first context)? We repeat this process as long as the child is the + leftmost child, eventually reaching an ancestor of the flush point that is + not a leftmost child. The preceder of that ancestors, which is not a leftmost + child, is actually on the leaf level. The preceder of that block is the + left-neighbor of the flush point. The preceder of that block is the rightmost + child of the twig on the left. So, when alloc_pos_and_ancestors passes upward + through the twig level, it stops momentarily to remember the block of the + rightmost child of the twig on the left and sets it to the flush_position's + preceder_hint. + + There is one other place where we may set the flush_position's preceder hint, + which is during scan-left. +*/ +static int set_preceder(const coord_t *coord_in, flush_pos_t *pos) +{ + int ret; + coord_t coord; + lock_handle left_lock; + load_count left_load; + + coord_dup(&coord, coord_in); + + init_lh(&left_lock); + init_load_count(&left_load); + + /* FIXME(B): Same FIXME as in "Find the preceder" in + reverse_allocate. coord_is_leftmost_unit is not the right test + if the unformatted child is in the middle of the first extent unit.*/ + if (!coord_is_leftmost_unit(&coord)) { + coord_prev_unit(&coord); + } else { + ret = + reiser4_get_left_neighbor(&left_lock, coord.node, + ZNODE_READ_LOCK, GN_SAME_ATOM); + if (ret) { + /* If we fail for any reason it doesn't matter because + the preceder is only a hint. We are low-priority at + this point, so this must be the case. */ + if (ret == -E_REPEAT || ret == -E_NO_NEIGHBOR || + ret == -ENOENT || ret == -EINVAL + || ret == -E_DEADLOCK) + ret = 0; + goto exit; + } + + ret = incr_load_count_znode(&left_load, left_lock.node); + if (ret) + goto exit; + + coord_init_last_unit(&coord, left_lock.node); + } + + ret = + item_utmost_child_real_block(&coord, RIGHT_SIDE, + &pos->preceder.blk); +exit: + check_preceder(pos->preceder.blk); + done_load_count(&left_load); + done_lh(&left_lock); + return ret; +} + +/* MAIN SQUEEZE AND ALLOCATE LOOP (THREE BIG FUNCTIONS) */ + +/* This procedure implements the outer loop of the flush algorithm. To put this + in context, here is the general list of steps taken by the flush routine as a + whole: + + 1. Scan-left + 2. Scan-right (maybe) + 3. Allocate initial flush position and its ancestors + 4. + 5. + 6. + + This procedure implements the loop in steps 4 through 6 in the above listing. + + Step 4: if the current flush position is an extent item (position on the twig + level), it allocates the extent (allocate_extent_item_in_place) then shifts + to the next coordinate. If the next coordinate's leftmost child needs + flushprep, we will continue. If the next coordinate is an internal item, we + descend back to the leaf level, otherwise we repeat a step #4 (labeled + ALLOC_EXTENTS below). If the "next coordinate" brings us past the end of the + twig level, then we call reverse_relocate_end_of_twig to possibly dirty the + next (right) twig, prior to step #5 which moves to the right. + + Step 5: calls squalloc_changed_ancestors, which initiates a recursive call up + the tree to allocate any ancestors of the next-right flush position that are + not also ancestors of the current position. Those ancestors (in top-down + order) are the next in parent-first order. We squeeze adjacent nodes on the + way up until the right node and current node share the same parent, then + allocate on the way back down. Finally, this step sets the flush position to + the next-right node. Then repeat steps 4 and 5. +*/ + +/* SQUEEZE CODE */ + +/* squalloc_right_twig helper function, cut a range of extent items from + cut node to->node from the beginning up to coord @to. */ +static int squalloc_right_twig_cut(coord_t *to, reiser4_key * to_key, + znode * left) +{ + coord_t from; + reiser4_key from_key; + + coord_init_first_unit(&from, to->node); + item_key_by_coord(&from, &from_key); + + return cut_node_content(&from, to, &from_key, to_key, NULL); +} + +/* Copy as much of the leading extents from @right to @left, allocating + unallocated extents as they are copied. Returns SQUEEZE_TARGET_FULL or + SQUEEZE_SOURCE_EMPTY when no more can be shifted. If the next item is an + internal item it calls shift_one_internal_unit and may then return + SUBTREE_MOVED. */ +static int squeeze_right_twig(znode * left, znode * right, flush_pos_t *pos) +{ + int ret = SUBTREE_MOVED; + coord_t coord; /* used to iterate over items */ + reiser4_key stop_key; + reiser4_tree *tree; + txmod_plugin *txmod_plug = get_txmod_plugin(); + + assert("jmacd-2008", !node_is_empty(right)); + coord_init_first_unit(&coord, right); + + /* FIXME: can be optimized to cut once */ + while (!node_is_empty(coord.node) && item_is_extent(&coord)) { + ON_DEBUG(void *vp); + + assert("vs-1468", coord_is_leftmost_unit(&coord)); + ON_DEBUG(vp = shift_check_prepare(left, coord.node)); + + /* stop_key is used to find what was copied and what to cut */ + stop_key = *reiser4_min_key(); + ret = txmod_plug->squeeze_alloc_unformatted(left, + &coord, pos, + &stop_key); + if (ret != SQUEEZE_CONTINUE) { + ON_DEBUG(kfree(vp)); + break; + } + assert("vs-1465", !keyeq(&stop_key, reiser4_min_key())); + + /* Helper function to do the cutting. */ + set_key_offset(&stop_key, get_key_offset(&stop_key) - 1); + check_me("vs-1466", + squalloc_right_twig_cut(&coord, &stop_key, left) == 0); + + ON_DEBUG(shift_check(vp, left, coord.node)); + } + /* + * @left and @right nodes participated in the + * implicit shift, determined by the pair of + * functions: + * . squalloc_extent() - append units to the @left + * . squalloc_right_twig_cut() - cut the units from @right + * so update their delimiting keys + */ + tree = znode_get_tree(left); + write_lock_dk(tree); + update_znode_dkeys(left, right); + write_unlock_dk(tree); + + if (node_is_empty(coord.node)) + ret = SQUEEZE_SOURCE_EMPTY; + + if (ret == SQUEEZE_TARGET_FULL) + goto out; + + if (node_is_empty(right)) { + /* The whole right node was copied into @left. */ + assert("vs-464", ret == SQUEEZE_SOURCE_EMPTY); + goto out; + } + + coord_init_first_unit(&coord, right); + + if (!item_is_internal(&coord)) { + /* we do not want to squeeze anything else to left neighbor + because "slum" is over */ + ret = SQUEEZE_TARGET_FULL; + goto out; + } + assert("jmacd-433", item_is_internal(&coord)); + + /* Shift an internal unit. The child must be allocated before shifting + any more extents, so we stop here. */ + ret = shift_one_internal_unit(left, right); + +out: + assert("jmacd-8612", ret < 0 || ret == SQUEEZE_TARGET_FULL + || ret == SUBTREE_MOVED || ret == SQUEEZE_SOURCE_EMPTY); + + if (ret == SQUEEZE_TARGET_FULL) { + /* We submit prepped nodes here and expect that this @left twig + * will not be modified again during this jnode_flush() call. */ + int ret1; + + /* NOTE: seems like io is done under long term locks. */ + ret1 = write_prepped_nodes(pos); + if (ret1 < 0) + return ret1; + } + + return ret; +} + +#if REISER4_DEBUG +static void item_convert_invariant(flush_pos_t *pos) +{ + assert("edward-1225", coord_is_existing_item(&pos->coord)); + if (convert_data_attached(pos)) { + item_plugin *iplug = item_convert_plug(pos); + + assert("edward-1000", + iplug == item_plugin_by_coord(&pos->coord)); + assert("edward-1001", iplug->f.convert != NULL); + } else + assert("edward-1226", pos->child == NULL); +} +#else + +#define item_convert_invariant(pos) noop + +#endif + +/* + * Scan all node's items and apply for each one + * its ->convert() method. This method may: + * . resize the item; + * . kill the item; + * . insert a group of items/nodes on the right, + * which possess the following properties: + * . all new nodes are dirty and not convertible; + * . for all new items ->convert() method is a noop. + * + * NOTE: this function makes the tree unbalanced! + * This intended to be used by flush squalloc() in a + * combination with squeeze procedure. + * + * GLOSSARY + * + * Chained nodes and items. + * Two neighboring nodes @left and @right are chained, + * iff the last item of @left and the first item of @right + * belong to the same item cluster. In this case those + * items are called chained. + */ +static int convert_node(flush_pos_t *pos, znode * node) +{ + int ret = 0; + item_plugin *iplug; + assert("edward-304", pos != NULL); + assert("edward-305", pos->child == NULL); + assert("edward-475", znode_convertible(node)); + assert("edward-669", znode_is_wlocked(node)); + assert("edward-1210", !node_is_empty(node)); + + if (znode_get_level(node) != LEAF_LEVEL) + /* unsupported */ + goto exit; + + coord_init_first_unit(&pos->coord, node); + + while (1) { + ret = 0; + coord_set_to_left(&pos->coord); + item_convert_invariant(pos); + + iplug = item_plugin_by_coord(&pos->coord); + assert("edward-844", iplug != NULL); + + if (iplug->f.convert) { + ret = iplug->f.convert(pos); + if (ret) + goto exit; + } + assert("edward-307", pos->child == NULL); + + if (coord_next_item(&pos->coord)) { + /* + * node is over + */ + if (convert_data_attached(pos)) + /* + * the last item was convertible and + * there still is an unprocesssed flow + */ + if (next_node_is_chained(pos)) { + /* + * next node contains items of + * the same disk cluster, + * so finish with this node + */ + update_chaining_state(pos, 0/* move + to next + node */); + break; + } + else { + /* + * perform one more iteration + * for the same item and the + * rest of flow + */ + update_chaining_state(pos, 1/* this + node */); + } + else + /* + * the last item wasn't convertible, or + * convert date was detached in the last + * iteration, + * go to next node + */ + break; + } else { + /* + * Node is not over, item position got decremented. + */ + if (convert_data_attached(pos)) { + /* + * disk cluster should be increased, so roll + * one item position back and perform the + * iteration with the previous item and the + * rest of attached data + */ + if (iplug != item_plugin_by_coord(&pos->coord)) + set_item_convert_count(pos, 0); + + ret = coord_prev_item(&pos->coord); + assert("edward-1003", !ret); + + update_chaining_state(pos, 1/* this node */); + } + else + /* + * previous item was't convertible, or + * convert date was detached in the last + * iteration, go to next item + */ + ; + } + } + JF_CLR(ZJNODE(node), JNODE_CONVERTIBLE); + znode_make_dirty(node); +exit: + assert("edward-1004", !ret); + return ret; +} + +/* Squeeze and allocate the right neighbor. This is called after @left and + its current children have been squeezed and allocated already. This + procedure's job is to squeeze and items from @right to @left. + + If at the leaf level, use the shift_everything_left memcpy-optimized + version of shifting (squeeze_right_leaf). + + If at the twig level, extents are allocated as they are shifted from @right + to @left (squalloc_right_twig). + + At any other level, shift one internal item and return to the caller + (squalloc_parent_first) so that the shifted-subtree can be processed in + parent-first order. + + When unit of internal item is moved, squeezing stops and SUBTREE_MOVED is + returned. When all content of @right is squeezed, SQUEEZE_SOURCE_EMPTY is + returned. If nothing can be moved into @left anymore, SQUEEZE_TARGET_FULL + is returned. +*/ + +static int squeeze_right_neighbor(flush_pos_t *pos, znode * left, + znode * right) +{ + int ret; + + /* FIXME it is possible to see empty hasn't-heard-banshee node in a + * tree owing to error (for example, ENOSPC) in write */ + /* assert("jmacd-9321", !node_is_empty(left)); */ + assert("jmacd-9322", !node_is_empty(right)); + assert("jmacd-9323", znode_get_level(left) == znode_get_level(right)); + + switch (znode_get_level(left)) { + case TWIG_LEVEL: + /* Shift with extent allocating until either an internal item + is encountered or everything is shifted or no free space + left in @left */ + ret = squeeze_right_twig(left, right, pos); + break; + + default: + /* All other levels can use shift_everything until we implement + per-item flush plugins. */ + ret = squeeze_right_non_twig(left, right); + break; + } + + assert("jmacd-2011", (ret < 0 || + ret == SQUEEZE_SOURCE_EMPTY + || ret == SQUEEZE_TARGET_FULL + || ret == SUBTREE_MOVED)); + return ret; +} + +static int squeeze_right_twig_and_advance_coord(flush_pos_t *pos, + znode * right) +{ + int ret; + + ret = squeeze_right_twig(pos->lock.node, right, pos); + if (ret < 0) + return ret; + if (ret > 0) { + coord_init_after_last_item(&pos->coord, pos->lock.node); + return ret; + } + + coord_init_last_unit(&pos->coord, pos->lock.node); + return 0; +} + +/* forward declaration */ +static int squalloc_upper_levels(flush_pos_t *, znode *, znode *); + +/* do a fast check for "same parents" condition before calling + * squalloc_upper_levels() */ +static inline int check_parents_and_squalloc_upper_levels(flush_pos_t *pos, + znode * left, + znode * right) +{ + if (znode_same_parents(left, right)) + return 0; + + return squalloc_upper_levels(pos, left, right); +} + +/* Check whether the parent of given @right node needs to be processes + ((re)allocated) prior to processing of the child. If @left and @right do not + share at least the parent of the @right is after the @left but before the + @right in parent-first order, we have to (re)allocate it before the @right + gets (re)allocated. */ +static int squalloc_upper_levels(flush_pos_t *pos, znode * left, znode * right) +{ + int ret; + + lock_handle left_parent_lock; + lock_handle right_parent_lock; + + load_count left_parent_load; + load_count right_parent_load; + + init_lh(&left_parent_lock); + init_lh(&right_parent_lock); + + init_load_count(&left_parent_load); + init_load_count(&right_parent_load); + + ret = reiser4_get_parent(&left_parent_lock, left, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + ret = reiser4_get_parent(&right_parent_lock, right, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + /* Check for same parents */ + if (left_parent_lock.node == right_parent_lock.node) + goto out; + + if (znode_check_flushprepped(right_parent_lock.node)) { + /* Keep parent-first order. In the order, the right parent node + stands before the @right node. If it is already allocated, + we set the preceder (next block search start point) to its + block number, @right node should be allocated after it. + + However, preceder is set only if the right parent is on twig + level. The explanation is the following: new branch nodes are + allocated over already allocated children while the tree + grows, it is difficult to keep tree ordered, we assume that + only leaves and twings are correctly allocated. So, only + twigs are used as a preceder for allocating of the rest of + the slum. */ + if (znode_get_level(right_parent_lock.node) == TWIG_LEVEL) { + pos->preceder.blk = + *znode_get_block(right_parent_lock.node); + check_preceder(pos->preceder.blk); + } + goto out; + } + + ret = incr_load_count_znode(&left_parent_load, left_parent_lock.node); + if (ret) + goto out; + + ret = incr_load_count_znode(&right_parent_load, right_parent_lock.node); + if (ret) + goto out; + + ret = + squeeze_right_neighbor(pos, left_parent_lock.node, + right_parent_lock.node); + /* We stop if error. We stop if some items/units were shifted (ret == 0) + * and thus @right changed its parent. It means we have not process + * right_parent node prior to processing of @right. Positive return + * values say that shifting items was not happen because of "empty + * source" or "target full" conditions. */ + if (ret <= 0) + goto out; + + /* parent(@left) and parent(@right) may have different parents also. We + * do a recursive call for checking that. */ + ret = + check_parents_and_squalloc_upper_levels(pos, left_parent_lock.node, + right_parent_lock.node); + if (ret) + goto out; + + /* allocate znode when going down */ + ret = lock_parent_and_allocate_znode(right_parent_lock.node, pos); + +out: + done_load_count(&left_parent_load); + done_load_count(&right_parent_load); + + done_lh(&left_parent_lock); + done_lh(&right_parent_lock); + + return ret; +} + +/* Check the leftmost child "flushprepped" status, also returns true if child + * node was not found in cache. */ +static int leftmost_child_of_unit_check_flushprepped(const coord_t *coord) +{ + int ret; + int prepped; + + jnode *child; + + ret = get_leftmost_child_of_unit(coord, &child); + + if (ret) + return ret; + + if (child) { + prepped = jnode_check_flushprepped(child); + jput(child); + } else { + /* We consider not existing child as a node which slum + processing should not continue to. Not cached node is clean, + so it is flushprepped. */ + prepped = 1; + } + + return prepped; +} + +/* (re)allocate znode with automated getting parent node */ +static int lock_parent_and_allocate_znode(znode * node, flush_pos_t *pos) +{ + int ret; + lock_handle parent_lock; + load_count parent_load; + coord_t pcoord; + + assert("zam-851", znode_is_write_locked(node)); + + init_lh(&parent_lock); + init_load_count(&parent_load); + + ret = reiser4_get_parent(&parent_lock, node, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + ret = incr_load_count_znode(&parent_load, parent_lock.node); + if (ret) + goto out; + + ret = find_child_ptr(parent_lock.node, node, &pcoord); + if (ret) + goto out; + + ret = allocate_znode(node, &pcoord, pos); + +out: + done_load_count(&parent_load); + done_lh(&parent_lock); + return ret; +} + +/* + * Process nodes on the leaf level until unformatted node or + * rightmost node in the slum reached. + * + * This function is a complicated beast, because it calls a + * static machine ->convert_node() for every node, which, in + * turn, scans node's items and does something for each of them. + */ +static int handle_pos_on_formatted(flush_pos_t *pos) +{ + int ret; + lock_handle right_lock; + load_count right_load; + + init_lh(&right_lock); + init_load_count(&right_load); + + if (znode_convertible(pos->lock.node)) { + ret = convert_node(pos, pos->lock.node); + if (ret) + return ret; + } + while (1) { + assert("edward-1635", + ergo(node_is_empty(pos->lock.node), + ZF_ISSET(pos->lock.node, JNODE_HEARD_BANSHEE))); + /* + * First of all, grab a right neighbor + */ + if (convert_data(pos) && convert_data(pos)->right_locked) { + /* + * the right neighbor was locked by convert_node() + * transfer the lock from the "cache". + */ + move_lh(&right_lock, &convert_data(pos)->right_lock); + done_lh(&convert_data(pos)->right_lock); + convert_data(pos)->right_locked = 0; + } + else { + ret = neighbor_in_slum(pos->lock.node, &right_lock, + RIGHT_SIDE, ZNODE_WRITE_LOCK, + 1, 0); + if (ret) { + /* + * There is no right neighbor for some reasons, + * so finish with this level. + */ + assert("edward-1636", + !should_convert_right_neighbor(pos)); + break; + } + } + /* + * Check "flushprepped" status of the right neighbor. + * + * We don't prep(allocate) nodes for flushing twice. This can be + * suboptimal, or it can be optimal. For now we choose to live + * with the risk that it will be suboptimal because it would be + * quite complex to code it to be smarter. + */ + if (znode_check_flushprepped(right_lock.node) + && !znode_convertible(right_lock.node)) { + assert("edward-1005", + !should_convert_right_neighbor(pos)); + pos_stop(pos); + break; + } + ret = incr_load_count_znode(&right_load, right_lock.node); + if (ret) + break; + if (znode_convertible(right_lock.node)) { + assert("edward-1643", + ergo(convert_data(pos), + convert_data(pos)->right_locked == 0)); + + ret = convert_node(pos, right_lock.node); + if (ret) + break; + } + else + assert("edward-1637", + !should_convert_right_neighbor(pos)); + + if (node_is_empty(pos->lock.node)) { + /* + * Current node became empty after conversion + * and, hence, was removed from the tree; + * Advance the current position to the right neighbor. + */ + assert("edward-1638", + ZF_ISSET(pos->lock.node, JNODE_HEARD_BANSHEE)); + move_flush_pos(pos, &right_lock, &right_load, NULL); + continue; + } + if (node_is_empty(right_lock.node)) { + assert("edward-1639", + ZF_ISSET(right_lock.node, JNODE_HEARD_BANSHEE)); + /* + * The right neighbor became empty after + * convertion, and hence it was deleted + * from the tree - skip this. + * Since current node is not empty, + * we'll obtain a correct pointer to + * the next right neighbor + */ + done_load_count(&right_load); + done_lh(&right_lock); + continue; + } + /* + * At this point both, current node and its right + * neigbor are converted and not empty. + * Squeeze them _before_ going upward. + */ + ret = squeeze_right_neighbor(pos, pos->lock.node, + right_lock.node); + if (ret < 0) + break; + if (node_is_empty(right_lock.node)) { + assert("edward-1640", + ZF_ISSET(right_lock.node, JNODE_HEARD_BANSHEE)); + /* + * right neighbor was squeezed completely, + * and hence has been deleted from the tree. + * Skip this. + */ + done_load_count(&right_load); + done_lh(&right_lock); + continue; + } + if (znode_check_flushprepped(right_lock.node)) { + if (should_convert_right_neighbor(pos)) { + /* + * in spite of flushprepped status of the node, + * its right slum neighbor should be converted + */ + assert("edward-953", convert_data(pos)); + assert("edward-954", item_convert_data(pos)); + + move_flush_pos(pos, &right_lock, &right_load, NULL); + continue; + } else { + pos_stop(pos); + break; + } + } + /* + * parent(right_lock.node) has to be processed before + * (right_lock.node) due to "parent-first" allocation + * order + */ + ret = check_parents_and_squalloc_upper_levels(pos, + pos->lock.node, + right_lock.node); + if (ret) + break; + /* + * (re)allocate _after_ going upward + */ + ret = lock_parent_and_allocate_znode(right_lock.node, pos); + if (ret) + break; + if (should_terminate_squalloc(pos)) { + set_item_convert_count(pos, 0); + break; + } + /* + * advance the flush position to the right neighbor + */ + move_flush_pos(pos, &right_lock, &right_load, NULL); + + ret = rapid_flush(pos); + if (ret) + break; + } + check_convert_info(pos); + done_load_count(&right_load); + done_lh(&right_lock); + /* + * This function indicates via pos whether to stop or go to twig or + * continue on current level + */ + return ret; + +} + +/* Process nodes on leaf level until unformatted node or rightmost node in the + * slum reached. */ +static int handle_pos_on_leaf(flush_pos_t *pos) +{ + int ret; + + assert("zam-845", pos->state == POS_ON_LEAF); + + ret = handle_pos_on_formatted(pos); + + if (ret == -E_NO_NEIGHBOR) { + /* cannot get right neighbor, go process extents. */ + pos->state = POS_TO_TWIG; + return 0; + } + + return ret; +} + +/* Process slum on level > 1 */ +static int handle_pos_on_internal(flush_pos_t *pos) +{ + assert("zam-850", pos->state == POS_ON_INTERNAL); + return handle_pos_on_formatted(pos); +} + +/* check whether squalloc should stop before processing given extent */ +static int squalloc_extent_should_stop(flush_pos_t *pos) +{ + assert("zam-869", item_is_extent(&pos->coord)); + + /* pos->child is a jnode handle_pos_on_extent() should start with in + * stead of the first child of the first extent unit. */ + if (pos->child) { + int prepped; + + assert("vs-1383", jnode_is_unformatted(pos->child)); + prepped = jnode_check_flushprepped(pos->child); + pos->pos_in_unit = + jnode_get_index(pos->child) - + extent_unit_index(&pos->coord); + assert("vs-1470", + pos->pos_in_unit < extent_unit_width(&pos->coord)); + assert("nikita-3434", + ergo(extent_is_unallocated(&pos->coord), + pos->pos_in_unit == 0)); + jput(pos->child); + pos->child = NULL; + + return prepped; + } + + pos->pos_in_unit = 0; + if (extent_is_unallocated(&pos->coord)) + return 0; + + return leftmost_child_of_unit_check_flushprepped(&pos->coord); +} + +/* Handle the case when regular reiser4 tree (znodes connected one to its + * neighbors by sibling pointers) is interrupted on leaf level by one or more + * unformatted nodes. By having a lock on twig level and use extent code + * routines to process unformatted nodes we swim around an irregular part of + * reiser4 tree. */ +static int handle_pos_on_twig(flush_pos_t *pos) +{ + int ret; + txmod_plugin *txmod_plug = get_txmod_plugin(); + + assert("zam-844", pos->state == POS_ON_EPOINT); + assert("zam-843", item_is_extent(&pos->coord)); + + /* We decide should we continue slum processing with current extent + unit: if leftmost child of current extent unit is flushprepped + (i.e. clean or already processed by flush) we stop squalloc(). There + is a fast check for unallocated extents which we assume contain all + not flushprepped nodes. */ + /* FIXME: Here we implement simple check, we are only looking on the + leftmost child. */ + ret = squalloc_extent_should_stop(pos); + if (ret != 0) { + pos_stop(pos); + return ret; + } + + while (pos_valid(pos) && coord_is_existing_unit(&pos->coord) + && item_is_extent(&pos->coord)) { + ret = txmod_plug->forward_alloc_unformatted(pos); + if (ret) + break; + coord_next_unit(&pos->coord); + } + + if (coord_is_after_rightmost(&pos->coord)) { + pos->state = POS_END_OF_TWIG; + return 0; + } + if (item_is_internal(&pos->coord)) { + pos->state = POS_TO_LEAF; + return 0; + } + + assert("zam-860", item_is_extent(&pos->coord)); + + /* "slum" is over */ + pos->state = POS_INVALID; + return 0; +} + +/* When we about to return flush position from twig to leaf level we can process + * the right twig node or move position to the leaf. This processes right twig + * if it is possible and jump to leaf level if not. */ +static int handle_pos_end_of_twig(flush_pos_t *pos) +{ + int ret; + lock_handle right_lock; + load_count right_load; + coord_t at_right; + jnode *child = NULL; + + assert("zam-848", pos->state == POS_END_OF_TWIG); + assert("zam-849", coord_is_after_rightmost(&pos->coord)); + + init_lh(&right_lock); + init_load_count(&right_load); + + /* We get a lock on the right twig node even it is not dirty because + * slum continues or discontinues on leaf level not on next twig. This + * lock on the right twig is needed for getting its leftmost child. */ + ret = + reiser4_get_right_neighbor(&right_lock, pos->lock.node, + ZNODE_WRITE_LOCK, GN_SAME_ATOM); + if (ret) + goto out; + + ret = incr_load_count_znode(&right_load, right_lock.node); + if (ret) + goto out; + + /* right twig could be not dirty */ + if (JF_ISSET(ZJNODE(right_lock.node), JNODE_DIRTY)) { + /* If right twig node is dirty we always attempt to squeeze it + * content to the left... */ +became_dirty: + ret = + squeeze_right_twig_and_advance_coord(pos, right_lock.node); + if (ret <= 0) { + /* pos->coord is on internal item, go to leaf level, or + * we have an error which will be caught in squalloc() + */ + pos->state = POS_TO_LEAF; + goto out; + } + + /* If right twig was squeezed completely we wave to re-lock + * right twig. now it is done through the top-level squalloc + * routine. */ + if (node_is_empty(right_lock.node)) + goto out; + + /* ... and prep it if it is not yet prepped */ + if (!znode_check_flushprepped(right_lock.node)) { + /* As usual, process parent before ... */ + ret = + check_parents_and_squalloc_upper_levels(pos, + pos->lock. + node, + right_lock. + node); + if (ret) + goto out; + + /* ... processing the child */ + ret = + lock_parent_and_allocate_znode(right_lock.node, + pos); + if (ret) + goto out; + } + } else { + coord_init_first_unit(&at_right, right_lock.node); + + /* check first child of next twig, should we continue there ? */ + ret = get_leftmost_child_of_unit(&at_right, &child); + if (ret || child == NULL || jnode_check_flushprepped(child)) { + pos_stop(pos); + goto out; + } + + /* check clean twig for possible relocation */ + if (!znode_check_flushprepped(right_lock.node)) { + ret = reverse_allocate_parent(child, &at_right, pos); + if (ret) + goto out; + if (JF_ISSET(ZJNODE(right_lock.node), JNODE_DIRTY)) + goto became_dirty; + } + } + + assert("zam-875", znode_check_flushprepped(right_lock.node)); + + /* Update the preceder by a block number of just processed right twig + * node. The code above could miss the preceder updating because + * allocate_znode() could not be called for this node. */ + pos->preceder.blk = *znode_get_block(right_lock.node); + check_preceder(pos->preceder.blk); + + coord_init_first_unit(&at_right, right_lock.node); + assert("zam-868", coord_is_existing_unit(&at_right)); + + pos->state = item_is_extent(&at_right) ? POS_ON_EPOINT : POS_TO_LEAF; + move_flush_pos(pos, &right_lock, &right_load, &at_right); + +out: + done_load_count(&right_load); + done_lh(&right_lock); + + if (child) + jput(child); + + return ret; +} + +/* Move the pos->lock to leaf node pointed by pos->coord, check should we + * continue there. */ +static int handle_pos_to_leaf(flush_pos_t *pos) +{ + int ret; + lock_handle child_lock; + load_count child_load; + jnode *child; + + assert("zam-846", pos->state == POS_TO_LEAF); + assert("zam-847", item_is_internal(&pos->coord)); + + init_lh(&child_lock); + init_load_count(&child_load); + + ret = get_leftmost_child_of_unit(&pos->coord, &child); + if (ret) + return ret; + if (child == NULL) { + pos_stop(pos); + return 0; + } + + if (jnode_check_flushprepped(child)) { + pos->state = POS_INVALID; + goto out; + } + + ret = + longterm_lock_znode(&child_lock, JZNODE(child), ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (ret) + goto out; + + ret = incr_load_count_znode(&child_load, JZNODE(child)); + if (ret) + goto out; + + ret = allocate_znode(JZNODE(child), &pos->coord, pos); + if (ret) + goto out; + + /* move flush position to leaf level */ + pos->state = POS_ON_LEAF; + move_flush_pos(pos, &child_lock, &child_load, NULL); + + if (node_is_empty(JZNODE(child))) { + ret = delete_empty_node(JZNODE(child)); + pos->state = POS_INVALID; + } +out: + done_load_count(&child_load); + done_lh(&child_lock); + jput(child); + + return ret; +} + +/* move pos from leaf to twig, and move lock from leaf to twig. */ +/* Move pos->lock to upper (twig) level */ +static int handle_pos_to_twig(flush_pos_t *pos) +{ + int ret; + + lock_handle parent_lock; + load_count parent_load; + coord_t pcoord; + + assert("zam-852", pos->state == POS_TO_TWIG); + + init_lh(&parent_lock); + init_load_count(&parent_load); + + ret = + reiser4_get_parent(&parent_lock, pos->lock.node, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + ret = incr_load_count_znode(&parent_load, parent_lock.node); + if (ret) + goto out; + + ret = find_child_ptr(parent_lock.node, pos->lock.node, &pcoord); + if (ret) + goto out; + + assert("zam-870", item_is_internal(&pcoord)); + coord_next_item(&pcoord); + + if (coord_is_after_rightmost(&pcoord)) + pos->state = POS_END_OF_TWIG; + else if (item_is_extent(&pcoord)) + pos->state = POS_ON_EPOINT; + else { + /* Here we understand that getting -E_NO_NEIGHBOR in + * handle_pos_on_leaf() was because of just a reaching edge of + * slum */ + pos_stop(pos); + goto out; + } + + move_flush_pos(pos, &parent_lock, &parent_load, &pcoord); + +out: + done_load_count(&parent_load); + done_lh(&parent_lock); + + return ret; +} + +typedef int (*pos_state_handle_t) (flush_pos_t *); +static pos_state_handle_t flush_pos_handlers[] = { + /* process formatted nodes on leaf level, keep lock on a leaf node */ + [POS_ON_LEAF] = handle_pos_on_leaf, + /* process unformatted nodes, keep lock on twig node, pos->coord points + * to extent currently being processed */ + [POS_ON_EPOINT] = handle_pos_on_twig, + /* move a lock from leaf node to its parent for further processing of + unformatted nodes */ + [POS_TO_TWIG] = handle_pos_to_twig, + /* move a lock from twig to leaf level when a processing of unformatted + * nodes finishes, pos->coord points to the leaf node we jump to */ + [POS_TO_LEAF] = handle_pos_to_leaf, + /* after processing last extent in the twig node, attempting to shift + * items from the twigs right neighbor and process them while shifting*/ + [POS_END_OF_TWIG] = handle_pos_end_of_twig, + /* process formatted nodes on internal level, keep lock on an internal + node */ + [POS_ON_INTERNAL] = handle_pos_on_internal +}; + +/* Advance flush position horizontally, prepare for flushing ((re)allocate, + * squeeze, encrypt) nodes and their ancestors in "parent-first" order */ +static int squalloc(flush_pos_t *pos) +{ + int ret = 0; + + /* maybe needs to be made a case statement with handle_pos_on_leaf as + * first case, for greater CPU efficiency? Measure and see.... -Hans */ + while (pos_valid(pos)) { + ret = flush_pos_handlers[pos->state] (pos); + if (ret < 0) + break; + + ret = rapid_flush(pos); + if (ret) + break; + } + + /* any positive value or -E_NO_NEIGHBOR are legal return codes for + handle_pos* routines, -E_NO_NEIGHBOR means that slum edge was + reached */ + if (ret > 0 || ret == -E_NO_NEIGHBOR) + ret = 0; + + return ret; +} + +static void update_ldkey(znode * node) +{ + reiser4_key ldkey; + + assert_rw_write_locked(&(znode_get_tree(node)->dk_lock)); + if (node_is_empty(node)) + return; + + znode_set_ld_key(node, leftmost_key_in_node(node, &ldkey)); +} + +/* this is to be called after calling of shift node's method to shift data from + @right to @left. It sets left delimiting keys of @left and @right to keys of + first items of @left and @right correspondingly and sets right delimiting key + of @left to first key of @right */ +static void update_znode_dkeys(znode * left, znode * right) +{ + assert_rw_write_locked(&(znode_get_tree(right)->dk_lock)); + assert("vs-1629", (znode_is_write_locked(left) && + znode_is_write_locked(right))); + + /* we need to update left delimiting of left if it was empty before + shift */ + update_ldkey(left); + update_ldkey(right); + if (node_is_empty(right)) + znode_set_rd_key(left, znode_get_rd_key(right)); + else + znode_set_rd_key(left, znode_get_ld_key(right)); +} + +/* try to shift everything from @right to @left. If everything was shifted - + @right is removed from the tree. Result is the number of bytes shifted. */ +static int +shift_everything_left(znode * right, znode * left, carry_level * todo) +{ + coord_t from; + node_plugin *nplug; + carry_plugin_info info; + + coord_init_after_last_item(&from, right); + + nplug = node_plugin_by_node(right); + info.doing = NULL; + info.todo = todo; + return nplug->shift(&from, left, SHIFT_LEFT, + 1 /* delete @right if it becomes empty */ , + 1 + /* move coord @from to node @left if everything will + be shifted */ + , + &info); +} + +/* Shift as much as possible from @right to @left using the memcpy-optimized + shift_everything_left. @left and @right are formatted neighboring nodes on + leaf level. */ +static int squeeze_right_non_twig(znode * left, znode * right) +{ + int ret; + carry_pool *pool; + carry_level *todo; + + assert("nikita-2246", znode_get_level(left) == znode_get_level(right)); + + if (!JF_ISSET(ZJNODE(left), JNODE_DIRTY) || + !JF_ISSET(ZJNODE(right), JNODE_DIRTY)) + return SQUEEZE_TARGET_FULL; + + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + todo = (carry_level *) (pool + 1); + init_carry_level(todo, pool); + + ret = shift_everything_left(right, left, todo); + if (ret > 0) { + /* something was shifted */ + reiser4_tree *tree; + __u64 grabbed; + + znode_make_dirty(left); + znode_make_dirty(right); + + /* update delimiting keys of nodes which participated in + shift. FIXME: it would be better to have this in shift + node's operation. But it can not be done there. Nobody + remembers why, though + */ + tree = znode_get_tree(left); + write_lock_dk(tree); + update_znode_dkeys(left, right); + write_unlock_dk(tree); + + /* Carry is called to update delimiting key and, maybe, to + remove empty node. */ + grabbed = get_current_context()->grabbed_blocks; + ret = reiser4_grab_space_force(tree->height, BA_RESERVED); + assert("nikita-3003", ret == 0); /* reserved space is + exhausted. Ask Hans. */ + ret = reiser4_carry(todo, NULL/* previous level */); + grabbed2free_mark(grabbed); + } else { + /* Shifting impossible, we return appropriate result code */ + ret = + node_is_empty(right) ? SQUEEZE_SOURCE_EMPTY : + SQUEEZE_TARGET_FULL; + } + + done_carry_pool(pool); + + return ret; +} + +#if REISER4_DEBUG +static int sibling_link_is_ok(const znode *left, const znode *right) +{ + int result; + + read_lock_tree(znode_get_tree(left)); + result = (left->right == right && left == right->left); + read_unlock_tree(znode_get_tree(left)); + return result; +} +#endif + +/* Shift first unit of first item if it is an internal one. Return + SQUEEZE_TARGET_FULL if it fails to shift an item, otherwise return + SUBTREE_MOVED. */ +static int shift_one_internal_unit(znode * left, znode * right) +{ + int ret; + carry_pool *pool; + carry_level *todo; + coord_t *coord; + carry_plugin_info *info; + int size, moved; + + assert("nikita-2247", znode_get_level(left) == znode_get_level(right)); + assert("nikita-2435", znode_is_write_locked(left)); + assert("nikita-2436", znode_is_write_locked(right)); + assert("nikita-2434", sibling_link_is_ok(left, right)); + + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo) + + sizeof(*coord) + sizeof(*info) +#if REISER4_DEBUG + + sizeof(*coord) + 2 * sizeof(reiser4_key) +#endif + ); + if (IS_ERR(pool)) + return PTR_ERR(pool); + todo = (carry_level *) (pool + 1); + init_carry_level(todo, pool); + + coord = (coord_t *) (todo + 3); + coord_init_first_unit(coord, right); + info = (carry_plugin_info *) (coord + 1); + +#if REISER4_DEBUG + if (!node_is_empty(left)) { + coord_t *last; + reiser4_key *right_key; + reiser4_key *left_key; + + last = (coord_t *) (info + 1); + right_key = (reiser4_key *) (last + 1); + left_key = right_key + 1; + coord_init_last_unit(last, left); + + assert("nikita-2463", + keyle(item_key_by_coord(last, left_key), + item_key_by_coord(coord, right_key))); + } +#endif + + assert("jmacd-2007", item_is_internal(coord)); + + size = item_length_by_coord(coord); + info->todo = todo; + info->doing = NULL; + + ret = node_plugin_by_node(left)->shift(coord, left, SHIFT_LEFT, + 1 + /* delete @right if it becomes + empty */ + , + 0 + /* do not move coord @coord to + node @left */ + , + info); + + /* If shift returns positive, then we shifted the item. */ + assert("vs-423", ret <= 0 || size == ret); + moved = (ret > 0); + + if (moved) { + /* something was moved */ + reiser4_tree *tree; + int grabbed; + + znode_make_dirty(left); + znode_make_dirty(right); + tree = znode_get_tree(left); + write_lock_dk(tree); + update_znode_dkeys(left, right); + write_unlock_dk(tree); + + /* reserve space for delimiting keys after shifting */ + grabbed = get_current_context()->grabbed_blocks; + ret = reiser4_grab_space_force(tree->height, BA_RESERVED); + assert("nikita-3003", ret == 0); /* reserved space is + exhausted. Ask Hans. */ + + ret = reiser4_carry(todo, NULL/* previous level */); + grabbed2free_mark(grabbed); + } + + done_carry_pool(pool); + + if (ret != 0) { + /* Shift or carry operation failed. */ + assert("jmacd-7325", ret < 0); + return ret; + } + + return moved ? SUBTREE_MOVED : SQUEEZE_TARGET_FULL; +} + +static int allocate_znode(znode * node, + const coord_t *parent_coord, flush_pos_t *pos) +{ + txmod_plugin *plug = get_txmod_plugin(); + /* + * perform znode allocation with znode pinned in memory to avoid races + * with asynchronous emergency flush (which plays with + * JNODE_FLUSH_RESERVED bit). + */ + return WITH_DATA(node, plug->forward_alloc_formatted(node, + parent_coord, + pos)); +} + + +/* JNODE INTERFACE */ + +/* Lock a node (if formatted) and then get its parent locked, set the child's + coordinate in the parent. If the child is the root node, the above_root + znode is returned but the coord is not set. This function may cause atom + fusion, but it is only used for read locks (at this point) and therefore + fusion only occurs when the parent is already dirty. */ +/* Hans adds this note: remember to ask how expensive this operation is vs. + storing parent pointer in jnodes. */ +static int +jnode_lock_parent_coord(jnode * node, + coord_t *coord, + lock_handle * parent_lh, + load_count * parent_zh, + znode_lock_mode parent_mode, int try) +{ + int ret; + + assert("edward-53", jnode_is_unformatted(node) || jnode_is_znode(node)); + assert("edward-54", jnode_is_unformatted(node) + || znode_is_any_locked(JZNODE(node))); + + if (!jnode_is_znode(node)) { + reiser4_key key; + tree_level stop_level = TWIG_LEVEL; + lookup_bias bias = FIND_EXACT; + + assert("edward-168", !(jnode_get_type(node) == JNODE_BITMAP)); + + /* The case when node is not znode, but can have parent coord + (unformatted node, node which represents cluster page, + etc..). Generate a key for the appropriate entry, search + in the tree using coord_by_key, which handles locking for + us. */ + + /* + * nothing is locked at this moment, so, nothing prevents + * concurrent truncate from removing jnode from inode. To + * prevent this spin-lock jnode. jnode can be truncated just + * after call to the jnode_build_key(), but this is ok, + * because coord_by_key() will just fail to find appropriate + * extent. + */ + spin_lock_jnode(node); + if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) { + jnode_build_key(node, &key); + ret = 0; + } else + ret = RETERR(-ENOENT); + spin_unlock_jnode(node); + + if (ret != 0) + return ret; + + if (jnode_is_cluster_page(node)) + stop_level = LEAF_LEVEL; + + assert("jmacd-1812", coord != NULL); + + ret = coord_by_key(jnode_get_tree(node), &key, coord, parent_lh, + parent_mode, bias, stop_level, stop_level, + CBK_UNIQUE, NULL/*ra_info */); + switch (ret) { + case CBK_COORD_NOTFOUND: + assert("edward-1038", + ergo(jnode_is_cluster_page(node), + JF_ISSET(node, JNODE_HEARD_BANSHEE))); + if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) + warning("nikita-3177", "Parent not found"); + return ret; + case CBK_COORD_FOUND: + if (coord->between != AT_UNIT) { + /* FIXME: comment needed */ + done_lh(parent_lh); + if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) { + warning("nikita-3178", + "Found but not happy: %i", + coord->between); + } + return RETERR(-ENOENT); + } + ret = incr_load_count_znode(parent_zh, parent_lh->node); + if (ret != 0) + return ret; + /* if (jnode_is_cluster_page(node)) { + races with write() are possible + check_child_cluster (parent_lh->node); + } + */ + break; + default: + return ret; + } + + } else { + int flags; + znode *z; + + z = JZNODE(node); + /* Formatted node case: */ + assert("jmacd-2061", !znode_is_root(z)); + + flags = GN_ALLOW_NOT_CONNECTED; + if (try) + flags |= GN_TRY_LOCK; + + ret = + reiser4_get_parent_flags(parent_lh, z, parent_mode, flags); + if (ret != 0) + /* -E_REPEAT is ok here, it is handled by the caller. */ + return ret; + + /* Make the child's position "hint" up-to-date. (Unless above + root, which caller must check.) */ + if (coord != NULL) { + + ret = incr_load_count_znode(parent_zh, parent_lh->node); + if (ret != 0) { + warning("jmacd-976812386", + "incr_load_count_znode failed: %d", + ret); + return ret; + } + + ret = find_child_ptr(parent_lh->node, z, coord); + if (ret != 0) { + warning("jmacd-976812", + "find_child_ptr failed: %d", ret); + return ret; + } + } + } + + return 0; +} + +/* Get the (locked) next neighbor of a znode which is dirty and a member of the + same atom. If there is no next neighbor or the neighbor is not in memory or + if there is a neighbor but it is not dirty or not in the same atom, + -E_NO_NEIGHBOR is returned. In some cases the slum may include nodes which + are not dirty, if so @check_dirty should be 0 */ +static int neighbor_in_slum(znode * node, /* starting point */ + lock_handle * lock, /* lock on starting point */ + sideof side, /* left or right direction we + seek the next node in */ + znode_lock_mode mode, /* kind of lock we want */ + int check_dirty, /* true if the neighbor should + be dirty */ + int use_upper_levels /* get neighbor by going though + upper levels */) +{ + int ret; + int flags; + + assert("jmacd-6334", znode_is_connected(node)); + + flags = GN_SAME_ATOM | (side == LEFT_SIDE ? GN_GO_LEFT : 0); + if (use_upper_levels) + flags |= GN_CAN_USE_UPPER_LEVELS; + + ret = reiser4_get_neighbor(lock, node, mode, flags); + if (ret) { + /* May return -ENOENT or -E_NO_NEIGHBOR. */ + /* FIXME(C): check EINVAL, E_DEADLOCK */ + if (ret == -ENOENT) + ret = RETERR(-E_NO_NEIGHBOR); + return ret; + } + if (!check_dirty) + return 0; + /* Check dirty bit of locked znode, no races here */ + if (JF_ISSET(ZJNODE(lock->node), JNODE_DIRTY)) + return 0; + + done_lh(lock); + return RETERR(-E_NO_NEIGHBOR); +} + +/* Return true if two znodes have the same parent. This is called with both + nodes write-locked (for squeezing) so no tree lock is needed. */ +static int znode_same_parents(znode * a, znode * b) +{ + int result; + + assert("jmacd-7011", znode_is_write_locked(a)); + assert("jmacd-7012", znode_is_write_locked(b)); + + /* We lock the whole tree for this check.... I really don't like whole + * tree locks... -Hans */ + read_lock_tree(znode_get_tree(a)); + result = (znode_parent(a) == znode_parent(b)); + read_unlock_tree(znode_get_tree(a)); + return result; +} + +/* FLUSH SCAN */ + +/* Initialize the flush_scan data structure. */ +static void scan_init(flush_scan * scan) +{ + memset(scan, 0, sizeof(*scan)); + init_lh(&scan->node_lock); + init_lh(&scan->parent_lock); + init_load_count(&scan->parent_load); + init_load_count(&scan->node_load); + coord_init_invalid(&scan->parent_coord, NULL); +} + +/* Release any resources held by the flush scan, e.g. release locks, + free memory, etc. */ +static void scan_done(flush_scan * scan) +{ + done_load_count(&scan->node_load); + if (scan->node != NULL) { + jput(scan->node); + scan->node = NULL; + } + done_load_count(&scan->parent_load); + done_lh(&scan->parent_lock); + done_lh(&scan->node_lock); +} + +/* Returns true if flush scanning is finished. */ +int reiser4_scan_finished(flush_scan * scan) +{ + return scan->stop || (scan->direction == RIGHT_SIDE && + scan->count >= scan->max_count); +} + +/* Return true if the scan should continue to the @tonode. True if the node + meets the same_slum_check condition. If not, deref the "left" node and stop + the scan. */ +int reiser4_scan_goto(flush_scan * scan, jnode * tonode) +{ + int go = same_slum_check(scan->node, tonode, 1, 0); + + if (!go) { + scan->stop = 1; + jput(tonode); + } + + return go; +} + +/* Set the current scan->node, refcount it, increment count by the @add_count + (number to count, e.g., skipped unallocated nodes), deref previous current, + and copy the current parent coordinate. */ +int +scan_set_current(flush_scan * scan, jnode * node, unsigned add_count, + const coord_t *parent) +{ + /* Release the old references, take the new reference. */ + done_load_count(&scan->node_load); + + if (scan->node != NULL) + jput(scan->node); + scan->node = node; + scan->count += add_count; + + /* This next stmt is somewhat inefficient. The reiser4_scan_extent() + code could delay this update step until it finishes and update the + parent_coord only once. It did that before, but there was a bug and + this was the easiest way to make it correct. */ + if (parent != NULL) + coord_dup(&scan->parent_coord, parent); + + /* Failure may happen at the incr_load_count call, but the caller can + assume the reference is safely taken. */ + return incr_load_count_jnode(&scan->node_load, node); +} + +/* Return true if scanning in the leftward direction. */ +int reiser4_scanning_left(flush_scan * scan) +{ + return scan->direction == LEFT_SIDE; +} + +/* Performs leftward scanning starting from either kind of node. Counts the + starting node. The right-scan object is passed in for the left-scan in order + to copy the parent of an unformatted starting position. This way we avoid + searching for the unformatted node's parent when scanning in each direction. + If we search for the parent once it is set in both scan objects. The limit + parameter tells flush-scan when to stop. + + Rapid scanning is used only during scan_left, where we are interested in + finding the 'leftpoint' where we begin flushing. We are interested in + stopping at the left child of a twig that does not have a dirty left + neighbour. THIS IS A SPECIAL CASE. The problem is finding a way to flush only + those nodes without unallocated children, and it is difficult to solve in the + bottom-up flushing algorithm we are currently using. The problem can be + solved by scanning left at every level as we go upward, but this would + basically bring us back to using a top-down allocation strategy, which we + already tried (see BK history from May 2002), and has a different set of + problems. The top-down strategy makes avoiding unallocated children easier, + but makes it difficult to propertly flush dirty children with clean parents + that would otherwise stop the top-down flush, only later to dirty the parent + once the children are flushed. So we solve the problem in the bottom-up + algorithm with a special case for twigs and leaves only. + + The first step in solving the problem is this rapid leftward scan. After we + determine that there are at least enough nodes counted to qualify for + FLUSH_RELOCATE_THRESHOLD we are no longer interested in the exact count, we + are only interested in finding the best place to start the flush. + + We could choose one of two possibilities: + + 1. Stop at the leftmost child (of a twig) that does not have a dirty left + neighbor. This requires checking one leaf per rapid-scan twig + + 2. Stop at the leftmost child (of a twig) where there are no dirty children + of the twig to the left. This requires checking possibly all of the in-memory + children of each twig during the rapid scan. + + For now we implement the first policy. +*/ +static int +scan_left(flush_scan * scan, flush_scan * right, jnode * node, unsigned limit) +{ + int ret = 0; + + scan->max_count = limit; + scan->direction = LEFT_SIDE; + + ret = scan_set_current(scan, jref(node), 1, NULL); + if (ret != 0) + return ret; + + ret = scan_common(scan, right); + if (ret != 0) + return ret; + + /* Before rapid scanning, we need a lock on scan->node so that we can + get its parent, only if formatted. */ + if (jnode_is_znode(scan->node)) { + ret = longterm_lock_znode(&scan->node_lock, JZNODE(scan->node), + ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI); + } + + /* Rapid_scan would go here (with limit set to FLUSH_RELOCATE_THRESHOLD) + */ + return ret; +} + +/* Performs rightward scanning... Does not count the starting node. The limit + parameter is described in scan_left. If the starting node is unformatted then + the parent_coord was already set during scan_left. The rapid_after parameter + is not used during right-scanning. + + scan_right is only called if the scan_left operation does not count at least + FLUSH_RELOCATE_THRESHOLD nodes for flushing. Otherwise, the limit parameter + is set to the difference between scan-left's count and + FLUSH_RELOCATE_THRESHOLD, meaning scan-right counts as high as + FLUSH_RELOCATE_THRESHOLD and then stops. */ +static int scan_right(flush_scan * scan, jnode * node, unsigned limit) +{ + int ret; + + scan->max_count = limit; + scan->direction = RIGHT_SIDE; + + ret = scan_set_current(scan, jref(node), 0, NULL); + if (ret != 0) + return ret; + + return scan_common(scan, NULL); +} + +/* Common code to perform left or right scanning. */ +static int scan_common(flush_scan * scan, flush_scan * other) +{ + int ret; + + assert("nikita-2376", scan->node != NULL); + assert("edward-54", jnode_is_unformatted(scan->node) + || jnode_is_znode(scan->node)); + + /* Special case for starting at an unformatted node. Optimization: we + only want to search for the parent (which requires a tree traversal) + once. Obviously, we shouldn't have to call it once for the left scan + and once for the right scan. For this reason, if we search for the + parent during scan-left we then duplicate the coord/lock/load into + the scan-right object. */ + if (jnode_is_unformatted(scan->node)) { + ret = scan_unformatted(scan, other); + if (ret != 0) + return ret; + } + /* This loop expects to start at a formatted position and performs + chaining of formatted regions */ + while (!reiser4_scan_finished(scan)) { + + ret = scan_formatted(scan); + if (ret != 0) + return ret; + } + + return 0; +} + +static int scan_unformatted(flush_scan * scan, flush_scan * other) +{ + int ret = 0; + int try = 0; + + if (!coord_is_invalid(&scan->parent_coord)) + goto scan; + + /* set parent coord from */ + if (!jnode_is_unformatted(scan->node)) { + /* formatted position */ + + lock_handle lock; + assert("edward-301", jnode_is_znode(scan->node)); + init_lh(&lock); + + /* + * when flush starts from unformatted node, first thing it + * does is tree traversal to find formatted parent of starting + * node. This parent is then kept lock across scans to the + * left and to the right. This means that during scan to the + * left we cannot take left-ward lock, because this is + * dead-lock prone. So, if we are scanning to the left and + * there is already lock held by this thread, + * jnode_lock_parent_coord() should use try-lock. + */ + try = reiser4_scanning_left(scan) + && !lock_stack_isclean(get_current_lock_stack()); + /* Need the node locked to get the parent lock, We have to + take write lock since there is at least one call path + where this znode is already write-locked by us. */ + ret = + longterm_lock_znode(&lock, JZNODE(scan->node), + ZNODE_WRITE_LOCK, + reiser4_scanning_left(scan) ? + ZNODE_LOCK_LOPRI : + ZNODE_LOCK_HIPRI); + if (ret != 0) + /* EINVAL or E_DEADLOCK here mean... try again! At this + point we've scanned too far and can't back out, just + start over. */ + return ret; + + ret = jnode_lock_parent_coord(scan->node, + &scan->parent_coord, + &scan->parent_lock, + &scan->parent_load, + ZNODE_WRITE_LOCK, try); + + /* FIXME(C): check EINVAL, E_DEADLOCK */ + done_lh(&lock); + if (ret == -E_REPEAT) { + scan->stop = 1; + return 0; + } + if (ret) + return ret; + + } else { + /* unformatted position */ + + ret = + jnode_lock_parent_coord(scan->node, &scan->parent_coord, + &scan->parent_lock, + &scan->parent_load, + ZNODE_WRITE_LOCK, try); + + if (IS_CBKERR(ret)) + return ret; + + if (ret == CBK_COORD_NOTFOUND) + /* FIXME(C): check EINVAL, E_DEADLOCK */ + return ret; + + /* parent was found */ + assert("jmacd-8661", other != NULL); + /* Duplicate the reference into the other flush_scan. */ + coord_dup(&other->parent_coord, &scan->parent_coord); + copy_lh(&other->parent_lock, &scan->parent_lock); + copy_load_count(&other->parent_load, &scan->parent_load); + } +scan: + return scan_by_coord(scan); +} + +/* Performs left- or rightward scanning starting from a formatted node. Follow + left pointers under tree lock as long as: + + - node->left/right is non-NULL + - node->left/right is connected, dirty + - node->left/right belongs to the same atom + - scan has not reached maximum count +*/ +static int scan_formatted(flush_scan * scan) +{ + int ret; + znode *neighbor = NULL; + + assert("jmacd-1401", !reiser4_scan_finished(scan)); + + do { + znode *node = JZNODE(scan->node); + + /* Node should be connected, but if not stop the scan. */ + if (!znode_is_connected(node)) { + scan->stop = 1; + break; + } + + /* Lock the tree, check-for and reference the next sibling. */ + read_lock_tree(znode_get_tree(node)); + + /* It may be that a node is inserted or removed between a node + and its left sibling while the tree lock is released, but the + flush-scan count does not need to be precise. Thus, we + release the tree lock as soon as we get the neighboring node. + */ + neighbor = + reiser4_scanning_left(scan) ? node->left : node->right; + if (neighbor != NULL) + zref(neighbor); + + read_unlock_tree(znode_get_tree(node)); + + /* If neighbor is NULL at the leaf level, need to check for an + unformatted sibling using the parent--break in any case. */ + if (neighbor == NULL) + break; + + /* Check the condition for going left, break if it is not met. + This also releases (jputs) the neighbor if false. */ + if (!reiser4_scan_goto(scan, ZJNODE(neighbor))) + break; + + /* Advance the flush_scan state to the left, repeat. */ + ret = scan_set_current(scan, ZJNODE(neighbor), 1, NULL); + if (ret != 0) + return ret; + + } while (!reiser4_scan_finished(scan)); + + /* If neighbor is NULL then we reached the end of a formatted region, or + else the sibling is out of memory, now check for an extent to the + left (as long as LEAF_LEVEL). */ + if (neighbor != NULL || jnode_get_level(scan->node) != LEAF_LEVEL + || reiser4_scan_finished(scan)) { + scan->stop = 1; + return 0; + } + /* Otherwise, calls scan_by_coord for the right(left)most item of the + left(right) neighbor on the parent level, then possibly continue. */ + + coord_init_invalid(&scan->parent_coord, NULL); + return scan_unformatted(scan, NULL); +} + +/* NOTE-EDWARD: + This scans adjacent items of the same type and calls scan flush plugin for + each one. Performs left(right)ward scanning starting from a (possibly) + unformatted node. If we start from unformatted node, then we continue only if + the next neighbor is also unformatted. When called from scan_formatted, we + skip first iteration (to make sure that right(left)most item of the + left(right) neighbor on the parent level is of the same type and set + appropriate coord). */ +static int scan_by_coord(flush_scan * scan) +{ + int ret = 0; + int scan_this_coord; + lock_handle next_lock; + load_count next_load; + coord_t next_coord; + jnode *child; + item_plugin *iplug; + + init_lh(&next_lock); + init_load_count(&next_load); + scan_this_coord = (jnode_is_unformatted(scan->node) ? 1 : 0); + + /* set initial item id */ + iplug = item_plugin_by_coord(&scan->parent_coord); + + for (; !reiser4_scan_finished(scan); scan_this_coord = 1) { + if (scan_this_coord) { + /* Here we expect that unit is scannable. it would not + * be so due to race with extent->tail conversion. */ + if (iplug->f.scan == NULL) { + scan->stop = 1; + ret = -E_REPEAT; + /* skip the check at the end. */ + goto race; + } + + ret = iplug->f.scan(scan); + if (ret != 0) + goto exit; + + if (reiser4_scan_finished(scan)) { + checkchild(scan); + break; + } + } else { + /* the same race against truncate as above is possible + * here, it seems */ + + /* NOTE-JMACD: In this case, apply the same end-of-node + logic but don't scan the first coordinate. */ + assert("jmacd-1231", + item_is_internal(&scan->parent_coord)); + } + + if (iplug->f.utmost_child == NULL + || znode_get_level(scan->parent_coord.node) != TWIG_LEVEL) { + /* stop this coord and continue on parrent level */ + ret = + scan_set_current(scan, + ZJNODE(zref + (scan->parent_coord.node)), + 1, NULL); + if (ret != 0) + goto exit; + break; + } + + /* Either way, the invariant is that scan->parent_coord is set + to the parent of scan->node. Now get the next unit. */ + coord_dup(&next_coord, &scan->parent_coord); + coord_sideof_unit(&next_coord, scan->direction); + + /* If off-the-end of the twig, try the next twig. */ + if (coord_is_after_sideof_unit(&next_coord, scan->direction)) { + /* We take the write lock because we may start flushing + * from this coordinate. */ + ret = neighbor_in_slum(next_coord.node, + &next_lock, + scan->direction, + ZNODE_WRITE_LOCK, + 1 /* check dirty */, + 0 /* don't go though upper + levels */); + if (ret == -E_NO_NEIGHBOR) { + scan->stop = 1; + ret = 0; + break; + } + + if (ret != 0) + goto exit; + + ret = incr_load_count_znode(&next_load, next_lock.node); + if (ret != 0) + goto exit; + + coord_init_sideof_unit(&next_coord, next_lock.node, + sideof_reverse(scan->direction)); + } + + iplug = item_plugin_by_coord(&next_coord); + + /* Get the next child. */ + ret = + iplug->f.utmost_child(&next_coord, + sideof_reverse(scan->direction), + &child); + if (ret != 0) + goto exit; + /* If the next child is not in memory, or, item_utmost_child + failed (due to race with unlink, most probably), stop + here. */ + if (child == NULL || IS_ERR(child)) { + scan->stop = 1; + checkchild(scan); + break; + } + + assert("nikita-2374", jnode_is_unformatted(child) + || jnode_is_znode(child)); + + /* See if it is dirty, part of the same atom. */ + if (!reiser4_scan_goto(scan, child)) { + checkchild(scan); + break; + } + + /* If so, make this child current. */ + ret = scan_set_current(scan, child, 1, &next_coord); + if (ret != 0) + goto exit; + + /* Now continue. If formatted we release the parent lock and + return, then proceed. */ + if (jnode_is_znode(child)) + break; + + /* Otherwise, repeat the above loop with next_coord. */ + if (next_load.node != NULL) { + done_lh(&scan->parent_lock); + move_lh(&scan->parent_lock, &next_lock); + move_load_count(&scan->parent_load, &next_load); + } + } + + assert("jmacd-6233", + reiser4_scan_finished(scan) || jnode_is_znode(scan->node)); +exit: + checkchild(scan); +race: /* skip the above check */ + if (jnode_is_znode(scan->node)) { + done_lh(&scan->parent_lock); + done_load_count(&scan->parent_load); + } + + done_load_count(&next_load); + done_lh(&next_lock); + return ret; +} + +/* FLUSH POS HELPERS */ + +/* Initialize the fields of a flush_position. */ +static void pos_init(flush_pos_t *pos) +{ + memset(pos, 0, sizeof *pos); + + pos->state = POS_INVALID; + coord_init_invalid(&pos->coord, NULL); + init_lh(&pos->lock); + init_load_count(&pos->load); + + reiser4_blocknr_hint_init(&pos->preceder); +} + +/* The flush loop inside squalloc periodically checks pos_valid to determine + when "enough flushing" has been performed. This will return true until one + of the following conditions is met: + + 1. the number of flush-queued nodes has reached the kernel-supplied + "int *nr_to_flush" parameter, meaning we have flushed as many blocks as the + kernel requested. When flushing to commit, this parameter is NULL. + + 2. pos_stop() is called because squalloc discovers that the "next" node in + the flush order is either non-existant, not dirty, or not in the same atom. +*/ + +static int pos_valid(flush_pos_t *pos) +{ + return pos->state != POS_INVALID; +} + +/* Release any resources of a flush_position. Called when jnode_flush + finishes. */ +static void pos_done(flush_pos_t *pos) +{ + pos_stop(pos); + reiser4_blocknr_hint_done(&pos->preceder); + if (convert_data(pos)) + free_convert_data(pos); +} + +/* Reset the point and parent. Called during flush subroutines to terminate the + squalloc loop. */ +static int pos_stop(flush_pos_t *pos) +{ + pos->state = POS_INVALID; + done_lh(&pos->lock); + done_load_count(&pos->load); + coord_init_invalid(&pos->coord, NULL); + + if (pos->child) { + jput(pos->child); + pos->child = NULL; + } + + return 0; +} + +/* Return the flush_position's block allocator hint. */ +reiser4_blocknr_hint *reiser4_pos_hint(flush_pos_t *pos) +{ + return &pos->preceder; +} + +flush_queue_t *reiser4_pos_fq(flush_pos_t *pos) +{ + return pos->fq; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 90 + LocalWords: preceder + End: +*/ diff --git a/fs/reiser4/flush.h b/fs/reiser4/flush.h new file mode 100644 index 000000000000..270ea01e5c32 --- /dev/null +++ b/fs/reiser4/flush.h @@ -0,0 +1,290 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* DECLARATIONS: */ + +#if !defined(__REISER4_FLUSH_H__) +#define __REISER4_FLUSH_H__ + +#include "plugin/cluster.h" + +/* The flush_scan data structure maintains the state of an in-progress + flush-scan on a single level of the tree. A flush-scan is used for counting + the number of adjacent nodes to flush, which is used to determine whether we + should relocate, and it is also used to find a starting point for flush. A + flush-scan object can scan in both right and left directions via the + scan_left() and scan_right() interfaces. The right- and left-variations are + similar but perform different functions. When scanning left we (optionally + perform rapid scanning and then) longterm-lock the endpoint node. When + scanning right we are simply counting the number of adjacent, dirty nodes. */ +struct flush_scan { + + /* The current number of nodes scanned on this level. */ + unsigned count; + + /* There may be a maximum number of nodes for a scan on any single + level. When going leftward, max_count is determined by + FLUSH_SCAN_MAXNODES (see reiser4.h) */ + unsigned max_count; + + /* Direction: Set to one of the sideof enumeration: + { LEFT_SIDE, RIGHT_SIDE }. */ + sideof direction; + + /* Initially @stop is set to false then set true once some condition + stops the search (e.g., we found a clean node before reaching + max_count or we found a node belonging to another atom). */ + int stop; + + /* The current scan position. If @node is non-NULL then its reference + count has been incremented to reflect this reference. */ + jnode *node; + + /* A handle for zload/zrelse of current scan position node. */ + load_count node_load; + + /* During left-scan, if the final position (a.k.a. endpoint node) is + formatted the node is locked using this lock handle. The endpoint + needs to be locked for transfer to the flush_position object after + scanning finishes. */ + lock_handle node_lock; + + /* When the position is unformatted, its parent, coordinate, and parent + zload/zrelse handle. */ + lock_handle parent_lock; + coord_t parent_coord; + load_count parent_load; + + /* The block allocator preceder hint. Sometimes flush_scan determines + what the preceder is and if so it sets it here, after which it is + copied into the flush_position. Otherwise, the preceder is computed + later. */ + reiser4_block_nr preceder_blk; +}; + +struct convert_item_info { + dc_item_stat d_cur; /* per-cluster status of the current item */ + dc_item_stat d_next; /* per-cluster status of the first item on + the right neighbor */ + int cluster_shift; /* disk cluster shift */ + flow_t flow; /* disk cluster data */ +}; + +struct convert_info { + int count; /* for squalloc terminating */ + item_plugin *iplug; /* current item plugin */ + struct convert_item_info *itm; /* current item info */ + struct cluster_handle clust; /* transform cluster */ + lock_handle right_lock; /* lock handle of the right neighbor */ + int right_locked; +}; + +typedef enum flush_position_state { + POS_INVALID, /* Invalid or stopped pos, do not continue slum + * processing */ + POS_ON_LEAF, /* pos points to already prepped, locked + * formatted node at leaf level */ + POS_ON_EPOINT, /* pos keeps a lock on twig level, "coord" field + * is used to traverse unformatted nodes */ + POS_TO_LEAF, /* pos is being moved to leaf level */ + POS_TO_TWIG, /* pos is being moved to twig level */ + POS_END_OF_TWIG, /* special case of POS_ON_TWIG, when coord is + * after rightmost unit of the current twig */ + POS_ON_INTERNAL /* same as POS_ON_LEAF, but points to internal + * node */ +} flushpos_state_t; + +/* An encapsulation of the current flush point and all the parameters that are + passed through the entire squeeze-and-allocate stage of the flush routine. + A single flush_position object is constructed after left- and right-scanning + finishes. */ +struct flush_position { + flushpos_state_t state; + + coord_t coord; /* coord to traverse unformatted nodes */ + lock_handle lock; /* current lock we hold */ + load_count load; /* load status for current locked formatted node + */ + jnode *child; /* for passing a reference to unformatted child + * across pos state changes */ + + reiser4_blocknr_hint preceder; /* The flush 'hint' state. */ + int leaf_relocate; /* True if enough leaf-level nodes were + * found to suggest a relocate policy. */ + int alloc_cnt; /* The number of nodes allocated during squeeze + and allococate. */ + int prep_or_free_cnt; /* The number of nodes prepared for write + (allocate) or squeezed and freed. */ + flush_queue_t *fq; + long *nr_written; /* number of nodes submitted to disk */ + int flags; /* a copy of jnode_flush flags argument */ + + znode *prev_twig; /* previous parent pointer value, used to catch + * processing of new twig node */ + struct convert_info *sq; /* convert info */ + + unsigned long pos_in_unit; /* for extents only. Position + within an extent unit of first + jnode of slum */ + long nr_to_write; /* number of unformatted nodes to handle on + flush */ +}; + +static inline int item_convert_count(flush_pos_t *pos) +{ + return pos->sq->count; +} +static inline void inc_item_convert_count(flush_pos_t *pos) +{ + pos->sq->count++; +} +static inline void set_item_convert_count(flush_pos_t *pos, int count) +{ + pos->sq->count = count; +} +static inline item_plugin *item_convert_plug(flush_pos_t *pos) +{ + return pos->sq->iplug; +} + +static inline struct convert_info *convert_data(flush_pos_t *pos) +{ + return pos->sq; +} + +static inline struct convert_item_info *item_convert_data(flush_pos_t *pos) +{ + assert("edward-955", convert_data(pos)); + return pos->sq->itm; +} + +static inline struct tfm_cluster *tfm_cluster_sq(flush_pos_t *pos) +{ + return &pos->sq->clust.tc; +} + +static inline struct tfm_stream *tfm_stream_sq(flush_pos_t *pos, + tfm_stream_id id) +{ + assert("edward-854", pos->sq != NULL); + return get_tfm_stream(tfm_cluster_sq(pos), id); +} + +static inline int convert_data_attached(flush_pos_t *pos) +{ + return convert_data(pos) != NULL && item_convert_data(pos) != NULL; +} + +#define should_convert_right_neighbor(pos) convert_data_attached(pos) + +/* Returns true if next node contains next item of the disk cluster + so item convert data should be moved to the right slum neighbor. +*/ +static inline int next_node_is_chained(flush_pos_t *pos) +{ + return convert_data_attached(pos) && + item_convert_data(pos)->d_next == DC_CHAINED_ITEM; +} + +/* + * Update "twin state" (d_cur, d_next) to assign a proper + * conversion mode in the next iteration of convert_node() + */ +static inline void update_chaining_state(flush_pos_t *pos, + int this_node /* where to proceed */) +{ + + assert("edward-1010", convert_data_attached(pos)); + + if (this_node) { + /* + * we want to perform one more iteration with the same item + */ + assert("edward-1013", + item_convert_data(pos)->d_cur == DC_FIRST_ITEM || + item_convert_data(pos)->d_cur == DC_CHAINED_ITEM); + assert("edward-1227", + item_convert_data(pos)->d_next == DC_AFTER_CLUSTER || + item_convert_data(pos)->d_next == DC_INVALID_STATE); + + item_convert_data(pos)->d_cur = DC_AFTER_CLUSTER; + item_convert_data(pos)->d_next = DC_INVALID_STATE; + } + else { + /* + * we want to proceed on right neighbor, which is chained + */ + assert("edward-1011", + item_convert_data(pos)->d_cur == DC_FIRST_ITEM || + item_convert_data(pos)->d_cur == DC_CHAINED_ITEM); + assert("edward-1012", + item_convert_data(pos)->d_next == DC_CHAINED_ITEM); + + item_convert_data(pos)->d_cur = DC_CHAINED_ITEM; + item_convert_data(pos)->d_next = DC_INVALID_STATE; + } +} + +#define SQUALLOC_THRESHOLD 256 + +static inline int should_terminate_squalloc(flush_pos_t *pos) +{ + return convert_data(pos) && + !item_convert_data(pos) && + item_convert_count(pos) >= SQUALLOC_THRESHOLD; +} + +#if REISER4_DEBUG +#define check_convert_info(pos) \ +do { \ + if (unlikely(should_convert_right_neighbor(pos))) { \ + warning("edward-1006", "unprocessed chained data"); \ + printk("d_cur = %d, d_next = %d, flow.len = %llu\n", \ + item_convert_data(pos)->d_cur, \ + item_convert_data(pos)->d_next, \ + item_convert_data(pos)->flow.length); \ + } \ +} while (0) +#else +#define check_convert_info(pos) +#endif /* REISER4_DEBUG */ + +void free_convert_data(flush_pos_t *pos); +/* used in extent.c */ +int scan_set_current(flush_scan * scan, jnode * node, unsigned add_size, + const coord_t *parent); +int reiser4_scan_finished(flush_scan * scan); +int reiser4_scanning_left(flush_scan * scan); +int reiser4_scan_goto(flush_scan * scan, jnode * tonode); +txn_atom *atom_locked_by_fq(flush_queue_t *fq); +int reiser4_alloc_extent(flush_pos_t *flush_pos); +squeeze_result squalloc_extent(znode *left, const coord_t *, flush_pos_t *, + reiser4_key *stop_key); +extern int reiser4_init_fqs(void); +extern void reiser4_done_fqs(void); + +#if REISER4_DEBUG + +extern void reiser4_check_fq(const txn_atom *atom); +extern atomic_t flush_cnt; + +#define check_preceder(blk) \ +assert("nikita-2588", blk < reiser4_block_count(reiser4_get_current_sb())); +extern void check_pos(flush_pos_t *pos); +#else +#define check_preceder(b) noop +#define check_pos(pos) noop +#endif + +/* __REISER4_FLUSH_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 90 + LocalWords: preceder + End: +*/ diff --git a/fs/reiser4/flush_queue.c b/fs/reiser4/flush_queue.c new file mode 100644 index 000000000000..8aa9c3625fef --- /dev/null +++ b/fs/reiser4/flush_queue.c @@ -0,0 +1,677 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "debug.h" +#include "super.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "page_cache.h" +#include "wander.h" +#include "vfs_ops.h" +#include "writeout.h" +#include "flush.h" + +#include +#include +#include +#include +#include + +/* A flush queue object is an accumulator for keeping jnodes prepared + by the jnode_flush() function for writing to disk. Those "queued" jnodes are + kept on the flush queue until memory pressure or atom commit asks + flush queues to write some or all from their jnodes. */ + +/* + LOCKING: + + fq->guard spin lock protects fq->atom pointer and nothing else. fq->prepped + list protected by atom spin lock. fq->prepped list uses the following + locking: + + two ways to protect fq->prepped list for read-only list traversal: + + 1. atom spin-lock atom. + 2. fq is IN_USE, atom->nr_running_queues increased. + + and one for list modification: + + 1. atom is spin-locked and one condition is true: fq is IN_USE or + atom->nr_running_queues == 0. + + The deadlock-safe order for flush queues and atoms is: first lock atom, then + lock flush queue, then lock jnode. +*/ + +#define fq_in_use(fq) ((fq)->state & FQ_IN_USE) +#define fq_ready(fq) (!fq_in_use(fq)) + +#define mark_fq_in_use(fq) do { (fq)->state |= FQ_IN_USE; } while (0) +#define mark_fq_ready(fq) do { (fq)->state &= ~FQ_IN_USE; } while (0) + +/* get lock on atom from locked flush queue object */ +static txn_atom *atom_locked_by_fq_nolock(flush_queue_t *fq) +{ + /* This code is similar to jnode_get_atom(), look at it for the + * explanation. */ + txn_atom *atom; + + assert_spin_locked(&(fq->guard)); + + while (1) { + atom = fq->atom; + if (atom == NULL) + break; + + if (spin_trylock_atom(atom)) + break; + + atomic_inc(&atom->refcount); + spin_unlock(&(fq->guard)); + spin_lock_atom(atom); + spin_lock(&(fq->guard)); + + if (fq->atom == atom) { + atomic_dec(&atom->refcount); + break; + } + + spin_unlock(&(fq->guard)); + atom_dec_and_unlock(atom); + spin_lock(&(fq->guard)); + } + + return atom; +} + +txn_atom *atom_locked_by_fq(flush_queue_t *fq) +{ + txn_atom *atom; + + spin_lock(&(fq->guard)); + atom = atom_locked_by_fq_nolock(fq); + spin_unlock(&(fq->guard)); + return atom; +} + +static void init_fq(flush_queue_t *fq) +{ + memset(fq, 0, sizeof *fq); + + atomic_set(&fq->nr_submitted, 0); + + INIT_LIST_HEAD(ATOM_FQ_LIST(fq)); + + init_waitqueue_head(&fq->wait); + spin_lock_init(&fq->guard); +} + +/* slab for flush queues */ +static struct kmem_cache *fq_slab; + +/** + * reiser4_init_fqs - create flush queue cache + * + * Initializes slab cache of flush queues. It is part of reiser4 module + * initialization. + */ +int reiser4_init_fqs(void) +{ + fq_slab = kmem_cache_create("fq", + sizeof(flush_queue_t), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (fq_slab == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * reiser4_done_fqs - delete flush queue cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_fqs(void) +{ + destroy_reiser4_cache(&fq_slab); +} + +/* create new flush queue object */ +static flush_queue_t *create_fq(gfp_t gfp) +{ + flush_queue_t *fq; + + fq = kmem_cache_alloc(fq_slab, gfp); + if (fq) + init_fq(fq); + + return fq; +} + +/* adjust atom's and flush queue's counters of queued nodes */ +static void count_enqueued_node(flush_queue_t *fq) +{ + ON_DEBUG(fq->atom->num_queued++); +} + +static void count_dequeued_node(flush_queue_t *fq) +{ + assert("zam-993", fq->atom->num_queued > 0); + ON_DEBUG(fq->atom->num_queued--); +} + +/* attach flush queue object to the atom */ +static void attach_fq(txn_atom *atom, flush_queue_t *fq) +{ + assert_spin_locked(&(atom->alock)); + list_add(&fq->alink, &atom->flush_queues); + fq->atom = atom; + ON_DEBUG(atom->nr_flush_queues++); +} + +static void detach_fq(flush_queue_t *fq) +{ + assert_spin_locked(&(fq->atom->alock)); + + spin_lock(&(fq->guard)); + list_del_init(&fq->alink); + assert("vs-1456", fq->atom->nr_flush_queues > 0); + ON_DEBUG(fq->atom->nr_flush_queues--); + fq->atom = NULL; + spin_unlock(&(fq->guard)); +} + +/* destroy flush queue object */ +static void done_fq(flush_queue_t *fq) +{ + assert("zam-763", list_empty_careful(ATOM_FQ_LIST(fq))); + assert("zam-766", atomic_read(&fq->nr_submitted) == 0); + + kmem_cache_free(fq_slab, fq); +} + +/* */ +static void mark_jnode_queued(flush_queue_t *fq, jnode * node) +{ + JF_SET(node, JNODE_FLUSH_QUEUED); + count_enqueued_node(fq); +} + +/* Putting jnode into the flush queue. Both atom and jnode should be + spin-locked. */ +void queue_jnode(flush_queue_t *fq, jnode * node) +{ + assert_spin_locked(&(node->guard)); + assert("zam-713", node->atom != NULL); + assert_spin_locked(&(node->atom->alock)); + assert("zam-716", fq->atom != NULL); + assert("zam-717", fq->atom == node->atom); + assert("zam-907", fq_in_use(fq)); + + assert("zam-714", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-826", JF_ISSET(node, JNODE_RELOC)); + assert("vs-1481", !JF_ISSET(node, JNODE_FLUSH_QUEUED)); + assert("vs-1481", NODE_LIST(node) != FQ_LIST); + + mark_jnode_queued(fq, node); + list_move_tail(&node->capture_link, ATOM_FQ_LIST(fq)); + + ON_DEBUG(count_jnode(node->atom, node, NODE_LIST(node), + FQ_LIST, 1)); +} + +/* repeatable process for waiting io completion on a flush queue object */ +static int wait_io(flush_queue_t *fq, int *nr_io_errors) +{ + assert("zam-738", fq->atom != NULL); + assert_spin_locked(&(fq->atom->alock)); + assert("zam-736", fq_in_use(fq)); + assert("zam-911", list_empty_careful(ATOM_FQ_LIST(fq))); + + if (atomic_read(&fq->nr_submitted) != 0) { + struct super_block *super; + + spin_unlock_atom(fq->atom); + + assert("nikita-3013", reiser4_schedulable()); + + super = reiser4_get_current_sb(); + + /* FIXME: this is instead of blk_run_queues() */ + //blk_flush_plug(current); + + if (!(super->s_flags & MS_RDONLY)) + wait_event(fq->wait, + atomic_read(&fq->nr_submitted) == 0); + + /* Ask the caller to re-acquire the locks and call this + function again. Note: this technique is commonly used in + the txnmgr code. */ + return -E_REPEAT; + } + + *nr_io_errors += atomic_read(&fq->nr_errors); + return 0; +} + +/* wait on I/O completion, re-submit dirty nodes to write */ +static int finish_fq(flush_queue_t *fq, int *nr_io_errors) +{ + int ret; + txn_atom *atom = fq->atom; + + assert("zam-801", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("zam-762", fq_in_use(fq)); + + ret = wait_io(fq, nr_io_errors); + if (ret) + return ret; + + detach_fq(fq); + done_fq(fq); + + reiser4_atom_send_event(atom); + + return 0; +} + +/* wait for all i/o for given atom to be completed, actually do one iteration + on that and return -E_REPEAT if there more iterations needed */ +static int finish_all_fq(txn_atom * atom, int *nr_io_errors) +{ + flush_queue_t *fq; + + assert_spin_locked(&(atom->alock)); + + if (list_empty_careful(&atom->flush_queues)) + return 0; + + list_for_each_entry(fq, &atom->flush_queues, alink) { + if (fq_ready(fq)) { + int ret; + + mark_fq_in_use(fq); + assert("vs-1247", fq->owner == NULL); + ON_DEBUG(fq->owner = current); + ret = finish_fq(fq, nr_io_errors); + + if (*nr_io_errors) + reiser4_handle_error(); + + if (ret) { + reiser4_fq_put(fq); + return ret; + } + + spin_unlock_atom(atom); + + return -E_REPEAT; + } + } + + /* All flush queues are in use; atom remains locked */ + return -EBUSY; +} + +/* wait all i/o for current atom */ +int current_atom_finish_all_fq(void) +{ + txn_atom *atom; + int nr_io_errors = 0; + int ret = 0; + + do { + while (1) { + atom = get_current_atom_locked(); + ret = finish_all_fq(atom, &nr_io_errors); + if (ret != -EBUSY) + break; + reiser4_atom_wait_event(atom); + } + } while (ret == -E_REPEAT); + + /* we do not need locked atom after this function finishes, SUCCESS or + -EBUSY are two return codes when atom remains locked after + finish_all_fq */ + if (!ret) + spin_unlock_atom(atom); + + assert_spin_not_locked(&(atom->alock)); + + if (ret) + return ret; + + if (nr_io_errors) + return RETERR(-EIO); + + return 0; +} + +/* change node->atom field for all jnode from given list */ +static void +scan_fq_and_update_atom_ref(struct list_head *list, txn_atom *atom) +{ + jnode *cur; + + list_for_each_entry(cur, list, capture_link) { + spin_lock_jnode(cur); + cur->atom = atom; + spin_unlock_jnode(cur); + } +} + +/* support for atom fusion operation */ +void reiser4_fuse_fq(txn_atom *to, txn_atom *from) +{ + flush_queue_t *fq; + + assert_spin_locked(&(to->alock)); + assert_spin_locked(&(from->alock)); + + list_for_each_entry(fq, &from->flush_queues, alink) { + scan_fq_and_update_atom_ref(ATOM_FQ_LIST(fq), to); + spin_lock(&(fq->guard)); + fq->atom = to; + spin_unlock(&(fq->guard)); + } + + list_splice_init(&from->flush_queues, to->flush_queues.prev); + +#if REISER4_DEBUG + to->num_queued += from->num_queued; + to->nr_flush_queues += from->nr_flush_queues; + from->nr_flush_queues = 0; +#endif +} + +#if REISER4_DEBUG +int atom_fq_parts_are_clean(txn_atom * atom) +{ + assert("zam-915", atom != NULL); + return list_empty_careful(&atom->flush_queues); +} +#endif + +/* + * Bio i/o completion routine for reiser4 write operations + */ +static void end_io_handler(struct bio *bio) +{ + int i; + int nr_errors = 0; + flush_queue_t *fq; + + assert("zam-958", bio_op(bio) == WRITE); + + /* we expect that bio->private is set to NULL or fq object which is used + * for synchronization and error counting. */ + fq = bio->bi_private; + /* Check all elements of io_vec for correct write completion. */ + for (i = 0; i < bio->bi_vcnt; i += 1) { + struct page *pg = bio->bi_io_vec[i].bv_page; + + if (bio->bi_status) { + SetPageError(pg); + nr_errors++; + } + + { + /* jnode WRITEBACK ("write is in progress bit") is + * atomically cleared here. */ + jnode *node; + + assert("zam-736", pg != NULL); + assert("zam-736", PagePrivate(pg)); + node = jprivate(pg); + + JF_CLR(node, JNODE_WRITEBACK); + } + + end_page_writeback(pg); + put_page(pg); + } + + if (fq) { + /* count i/o error in fq object */ + atomic_add(nr_errors, &fq->nr_errors); + + /* If all write requests registered in this "fq" are done we up + * the waiter. */ + if (atomic_sub_and_test(bio->bi_vcnt, &fq->nr_submitted)) + wake_up(&fq->wait); + } + + bio_put(bio); +} + +/* Count I/O requests which will be submitted by @bio in given flush queues + @fq */ +void add_fq_to_bio(flush_queue_t *fq, struct bio *bio) +{ + bio->bi_private = fq; + bio->bi_end_io = end_io_handler; + + if (fq) + atomic_add(bio->bi_vcnt, &fq->nr_submitted); +} + +/* Move all queued nodes out from @fq->prepped list. */ +static void release_prepped_list(flush_queue_t *fq) +{ + txn_atom *atom; + + assert("zam-904", fq_in_use(fq)); + atom = atom_locked_by_fq(fq); + + while (!list_empty(ATOM_FQ_LIST(fq))) { + jnode *cur; + + cur = list_entry(ATOM_FQ_LIST(fq)->next, jnode, capture_link); + list_del_init(&cur->capture_link); + + count_dequeued_node(fq); + spin_lock_jnode(cur); + assert("nikita-3154", !JF_ISSET(cur, JNODE_OVRWR)); + assert("nikita-3154", JF_ISSET(cur, JNODE_RELOC)); + assert("nikita-3154", JF_ISSET(cur, JNODE_FLUSH_QUEUED)); + JF_CLR(cur, JNODE_FLUSH_QUEUED); + + if (JF_ISSET(cur, JNODE_DIRTY)) { + list_add_tail(&cur->capture_link, + ATOM_DIRTY_LIST(atom, + jnode_get_level(cur))); + ON_DEBUG(count_jnode(atom, cur, FQ_LIST, + DIRTY_LIST, 1)); + } else { + list_add_tail(&cur->capture_link, + ATOM_CLEAN_LIST(atom)); + ON_DEBUG(count_jnode(atom, cur, FQ_LIST, + CLEAN_LIST, 1)); + } + + spin_unlock_jnode(cur); + } + + if (--atom->nr_running_queues == 0) + reiser4_atom_send_event(atom); + + spin_unlock_atom(atom); +} + +/* Submit write requests for nodes on the already filled flush queue @fq. + + @fq: flush queue object which contains jnodes we can (and will) write. + @return: number of submitted blocks (>=0) if success, otherwise -- an error + code (<0). */ +int reiser4_write_fq(flush_queue_t *fq, long *nr_submitted, int flags) +{ + int ret; + txn_atom *atom; + + while (1) { + atom = atom_locked_by_fq(fq); + assert("zam-924", atom); + /* do not write fq in parallel. */ + if (atom->nr_running_queues == 0 + || !(flags & WRITEOUT_SINGLE_STREAM)) + break; + reiser4_atom_wait_event(atom); + } + + atom->nr_running_queues++; + spin_unlock_atom(atom); + + ret = write_jnode_list(ATOM_FQ_LIST(fq), fq, nr_submitted, flags); + release_prepped_list(fq); + + return ret; +} + +/* Getting flush queue object for exclusive use by one thread. May require + several iterations which is indicated by -E_REPEAT return code. + + This function does not contain code for obtaining an atom lock because an + atom lock is obtained by different ways in different parts of reiser4, + usually it is current atom, but we need a possibility for getting fq for the + atom of given jnode. */ +static int fq_by_atom_gfp(txn_atom *atom, flush_queue_t **new_fq, gfp_t gfp) +{ + flush_queue_t *fq; + + assert_spin_locked(&(atom->alock)); + + fq = list_entry(atom->flush_queues.next, flush_queue_t, alink); + while (&atom->flush_queues != &fq->alink) { + spin_lock(&(fq->guard)); + + if (fq_ready(fq)) { + mark_fq_in_use(fq); + assert("vs-1246", fq->owner == NULL); + ON_DEBUG(fq->owner = current); + spin_unlock(&(fq->guard)); + + if (*new_fq) + done_fq(*new_fq); + + *new_fq = fq; + + return 0; + } + + spin_unlock(&(fq->guard)); + + fq = list_entry(fq->alink.next, flush_queue_t, alink); + } + + /* Use previously allocated fq object */ + if (*new_fq) { + mark_fq_in_use(*new_fq); + assert("vs-1248", (*new_fq)->owner == 0); + ON_DEBUG((*new_fq)->owner = current); + attach_fq(atom, *new_fq); + + return 0; + } + + spin_unlock_atom(atom); + + *new_fq = create_fq(gfp); + + if (*new_fq == NULL) + return RETERR(-ENOMEM); + + return RETERR(-E_REPEAT); +} + +int reiser4_fq_by_atom(txn_atom * atom, flush_queue_t **new_fq) +{ + return fq_by_atom_gfp(atom, new_fq, reiser4_ctx_gfp_mask_get()); +} + +/* A wrapper around reiser4_fq_by_atom for getting a flush queue + object for current atom, if success fq->atom remains locked. */ +flush_queue_t *get_fq_for_current_atom(void) +{ + flush_queue_t *fq = NULL; + txn_atom *atom; + int ret; + + do { + atom = get_current_atom_locked(); + ret = reiser4_fq_by_atom(atom, &fq); + } while (ret == -E_REPEAT); + + if (ret) + return ERR_PTR(ret); + return fq; +} + +/* Releasing flush queue object after exclusive use */ +void reiser4_fq_put_nolock(flush_queue_t *fq) +{ + assert("zam-747", fq->atom != NULL); + assert("zam-902", list_empty_careful(ATOM_FQ_LIST(fq))); + mark_fq_ready(fq); + assert("vs-1245", fq->owner == current); + ON_DEBUG(fq->owner = NULL); +} + +void reiser4_fq_put(flush_queue_t *fq) +{ + txn_atom *atom; + + spin_lock(&(fq->guard)); + atom = atom_locked_by_fq_nolock(fq); + + assert("zam-746", atom != NULL); + + reiser4_fq_put_nolock(fq); + reiser4_atom_send_event(atom); + + spin_unlock(&(fq->guard)); + spin_unlock_atom(atom); +} + +/* A part of atom object initialization related to the embedded flush queue + list head */ + +void init_atom_fq_parts(txn_atom *atom) +{ + INIT_LIST_HEAD(&atom->flush_queues); +} + +#if REISER4_DEBUG + +void reiser4_check_fq(const txn_atom *atom) +{ + /* check number of nodes on all atom's flush queues */ + flush_queue_t *fq; + int count; + struct list_head *pos; + + count = 0; + list_for_each_entry(fq, &atom->flush_queues, alink) { + spin_lock(&(fq->guard)); + /* calculate number of jnodes on fq' list of prepped jnodes */ + list_for_each(pos, ATOM_FQ_LIST(fq)) + count++; + spin_unlock(&(fq->guard)); + } + if (count != atom->fq) + warning("", "fq counter %d, real %d\n", atom->fq, count); + +} + +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/forward.h b/fs/reiser4/forward.h new file mode 100644 index 000000000000..9170c2bbab38 --- /dev/null +++ b/fs/reiser4/forward.h @@ -0,0 +1,259 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Forward declarations. Thank you Kernighan. */ + +#if !defined(__REISER4_FORWARD_H__) +#define __REISER4_FORWARD_H__ + +#include +#include + +typedef struct zlock zlock; +typedef struct lock_stack lock_stack; +typedef struct lock_handle lock_handle; +typedef struct znode znode; +typedef struct flow flow_t; +typedef struct coord coord_t; +typedef struct tree_access_pointer tap_t; +typedef struct reiser4_object_create_data reiser4_object_create_data; +typedef union reiser4_plugin reiser4_plugin; +typedef __u16 reiser4_plugin_id; +typedef __u64 reiser4_plugin_groups; +typedef struct item_plugin item_plugin; +typedef struct jnode_plugin jnode_plugin; +typedef struct reiser4_item_data reiser4_item_data; +typedef union reiser4_key reiser4_key; +typedef struct reiser4_tree reiser4_tree; +typedef struct carry_cut_data carry_cut_data; +typedef struct carry_kill_data carry_kill_data; +typedef struct carry_tree_op carry_tree_op; +typedef struct carry_tree_node carry_tree_node; +typedef struct carry_plugin_info carry_plugin_info; +typedef struct reiser4_journal reiser4_journal; +typedef struct txn_atom txn_atom; +typedef struct txn_handle txn_handle; +typedef struct txn_mgr txn_mgr; +typedef struct reiser4_dir_entry_desc reiser4_dir_entry_desc; +typedef struct reiser4_context reiser4_context; +typedef struct carry_level carry_level; +typedef struct blocknr_set_entry blocknr_set_entry; +typedef struct blocknr_list_entry blocknr_list_entry; +/* super_block->s_fs_info points to this */ +typedef struct reiser4_super_info_data reiser4_super_info_data; +/* next two objects are fields of reiser4_super_info_data */ +typedef struct reiser4_oid_allocator reiser4_oid_allocator; +typedef struct reiser4_space_allocator reiser4_space_allocator; + +typedef struct flush_scan flush_scan; +typedef struct flush_position flush_pos_t; + +typedef unsigned short pos_in_node_t; +#define MAX_POS_IN_NODE 65535 + +typedef struct jnode jnode; +typedef struct reiser4_blocknr_hint reiser4_blocknr_hint; + +typedef struct uf_coord uf_coord_t; +typedef struct hint hint_t; + +typedef struct ktxnmgrd_context ktxnmgrd_context; + +struct inode; +struct page; +struct file; +struct dentry; +struct super_block; + +/* return values of coord_by_key(). cbk == coord_by_key */ +typedef enum { + CBK_COORD_FOUND = 0, + CBK_COORD_NOTFOUND = -ENOENT, +} lookup_result; + +/* results of lookup with directory file */ +typedef enum { + FILE_NAME_FOUND = 0, + FILE_NAME_NOTFOUND = -ENOENT, + FILE_IO_ERROR = -EIO, /* FIXME: it seems silly to have special OOM, + IO_ERROR return codes for each search. */ + FILE_OOM = -ENOMEM /* FIXME: it seems silly to have special OOM, + IO_ERROR return codes for each search. */ +} file_lookup_result; + +/* behaviors of lookup. If coord we are looking for is actually in a tree, + both coincide. */ +typedef enum { + /* search exactly for the coord with key given */ + FIND_EXACT, + /* search for coord with the maximal key not greater than one + given */ + FIND_MAX_NOT_MORE_THAN /*LEFT_SLANT_BIAS */ +} lookup_bias; + +typedef enum { + /* number of leaf level of the tree + The fake root has (tree_level=0). */ + LEAF_LEVEL = 1, + + /* number of level one above leaf level of the tree. + + It is supposed that internal tree used by reiser4 to store file + system data and meta data will have height 2 initially (when + created by mkfs). + */ + TWIG_LEVEL = 2, +} tree_level; + +/* The "real" maximum ztree height is the 0-origin size of any per-level + array, since the zero'th level is not used. */ +#define REAL_MAX_ZTREE_HEIGHT (REISER4_MAX_ZTREE_HEIGHT-LEAF_LEVEL) + +/* enumeration of possible mutual position of item and coord. This enum is + return type of ->is_in_item() item plugin method which see. */ +typedef enum { + /* coord is on the left of an item */ + IP_ON_THE_LEFT, + /* coord is inside item */ + IP_INSIDE, + /* coord is inside item, but to the right of the rightmost unit of + this item */ + IP_RIGHT_EDGE, + /* coord is on the right of an item */ + IP_ON_THE_RIGHT +} interposition; + +/* type of lock to acquire on znode before returning it to caller */ +typedef enum { + ZNODE_NO_LOCK = 0, + ZNODE_READ_LOCK = 1, + ZNODE_WRITE_LOCK = 2, +} znode_lock_mode; + +/* type of lock request */ +typedef enum { + ZNODE_LOCK_LOPRI = 0, + ZNODE_LOCK_HIPRI = (1 << 0), + + /* By setting the ZNODE_LOCK_NONBLOCK flag in a lock request the call to + longterm_lock_znode will not sleep waiting for the lock to become + available. If the lock is unavailable, reiser4_znode_lock will + immediately return the value -E_REPEAT. */ + ZNODE_LOCK_NONBLOCK = (1 << 1), + /* An option for longterm_lock_znode which prevents atom fusion */ + ZNODE_LOCK_DONT_FUSE = (1 << 2) +} znode_lock_request; + +typedef enum { READ_OP = 0, WRITE_OP = 1 } rw_op; + +/* used to specify direction of shift. These must be -1 and 1 */ +typedef enum { + SHIFT_LEFT = 1, + SHIFT_RIGHT = -1 +} shift_direction; + +typedef enum { + LEFT_SIDE, + RIGHT_SIDE +} sideof; + +#define reiser4_round_up(value, order) \ + ((typeof(value))(((long) (value) + (order) - 1U) & \ + ~((order) - 1))) + +/* values returned by squalloc_right_neighbor and its auxiliary functions */ +typedef enum { + /* unit of internal item is moved */ + SUBTREE_MOVED = 0, + /* nothing else can be squeezed into left neighbor */ + SQUEEZE_TARGET_FULL = 1, + /* all content of node is squeezed into its left neighbor */ + SQUEEZE_SOURCE_EMPTY = 2, + /* one more item is copied (this is only returned by + allocate_and_copy_extent to squalloc_twig)) */ + SQUEEZE_CONTINUE = 3 +} squeeze_result; + +/* Do not change items ids. If you do - there will be format change */ +typedef enum { + STATIC_STAT_DATA_ID = 0x0, + SIMPLE_DIR_ENTRY_ID = 0x1, + COMPOUND_DIR_ID = 0x2, + NODE_POINTER_ID = 0x3, + EXTENT_POINTER_ID = 0x5, + FORMATTING_ID = 0x6, + CTAIL_ID = 0x7, + BLACK_BOX_ID = 0x8, + LAST_ITEM_ID = 0x9 +} item_id; + +/* Flags passed to jnode_flush() to allow it to distinguish default settings + based on whether commit() was called or VM memory pressure was applied. */ +typedef enum { + /* submit flush queue to disk at jnode_flush completion */ + JNODE_FLUSH_WRITE_BLOCKS = 1, + + /* flush is called for commit */ + JNODE_FLUSH_COMMIT = 2, + /* not implemented */ + JNODE_FLUSH_MEMORY_FORMATTED = 4, + + /* not implemented */ + JNODE_FLUSH_MEMORY_UNFORMATTED = 8, +} jnode_flush_flags; + +/* Flags to insert/paste carry operations. Currently they only used in + flushing code, but in future, they can be used to optimize for repetitive + accesses. */ +typedef enum { + /* carry is not allowed to shift data to the left when trying to find + free space */ + COPI_DONT_SHIFT_LEFT = (1 << 0), + /* carry is not allowed to shift data to the right when trying to find + free space */ + COPI_DONT_SHIFT_RIGHT = (1 << 1), + /* carry is not allowed to allocate new node(s) when trying to find + free space */ + COPI_DONT_ALLOCATE = (1 << 2), + /* try to load left neighbor if its not in a cache */ + COPI_LOAD_LEFT = (1 << 3), + /* try to load right neighbor if its not in a cache */ + COPI_LOAD_RIGHT = (1 << 4), + /* shift insertion point to the left neighbor */ + COPI_GO_LEFT = (1 << 5), + /* shift insertion point to the right neighbor */ + COPI_GO_RIGHT = (1 << 6), + /* try to step back into original node if insertion into new node + fails after shifting data there. */ + COPI_STEP_BACK = (1 << 7), + /* use all possible space in the node */ + COPI_SWEEP = (1 << 8) +} cop_insert_flag; + +typedef enum { + SAFE_UNLINK, /* safe-link for unlink */ + SAFE_TRUNCATE /* safe-link for truncate */ +} reiser4_safe_link_t; + +/* this is to show on which list of atom jnode is */ +typedef enum { + NOT_CAPTURED, + DIRTY_LIST, + CLEAN_LIST, + FQ_LIST, + WB_LIST, + OVRWR_LIST +} atom_list; + +/* __REISER4_FORWARD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/fsdata.c b/fs/reiser4/fsdata.c new file mode 100644 index 000000000000..ad3f4101b7ca --- /dev/null +++ b/fs/reiser4/fsdata.c @@ -0,0 +1,801 @@ +/* Copyright 2001, 2002, 2003, 2004, 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "fsdata.h" +#include "inode.h" + +#include + +/* cache or dir_cursors */ +static struct kmem_cache *d_cursor_cache; + +/* list of unused cursors */ +static LIST_HEAD(cursor_cache); + +/* number of cursors in list of ununsed cursors */ +static unsigned long d_cursor_unused = 0; + +/* spinlock protecting manipulations with dir_cursor's hash table and lists */ +DEFINE_SPINLOCK(d_c_lock); + +static reiser4_file_fsdata *create_fsdata(struct file *file); +static int file_is_stateless(struct file *file); +static void free_fsdata(reiser4_file_fsdata *fsdata); +static void kill_cursor(dir_cursor *); + +static unsigned long d_cursor_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + dir_cursor *scan; + unsigned long freed = 0; + + spin_lock(&d_c_lock); + while (!list_empty(&cursor_cache) && sc->nr_to_scan) { + scan = list_entry(cursor_cache.next, dir_cursor, alist); + assert("nikita-3567", scan->ref == 0); + kill_cursor(scan); + freed++; + sc->nr_to_scan--; + } + spin_unlock(&d_c_lock); + return freed; +} + +static unsigned long d_cursor_shrink_count (struct shrinker *shrink, + struct shrink_control *sc) +{ + return d_cursor_unused; +} + +/* + * actually, d_cursors are "priceless", because there is no way to + * recover information stored in them. On the other hand, we don't + * want to consume all kernel memory by them. As a compromise, just + * assign higher "seeks" value to d_cursor cache, so that it will be + * shrunk only if system is really tight on memory. + */ +static struct shrinker d_cursor_shrinker = { + .count_objects = d_cursor_shrink_count, + .scan_objects = d_cursor_shrink_scan, + .seeks = DEFAULT_SEEKS << 3 +}; + +/** + * reiser4_init_d_cursor - create d_cursor cache + * + * Initializes slab cache of d_cursors. It is part of reiser4 module + * initialization. + */ +int reiser4_init_d_cursor(void) +{ + d_cursor_cache = kmem_cache_create("d_cursor", sizeof(dir_cursor), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (d_cursor_cache == NULL) + return RETERR(-ENOMEM); + + register_shrinker(&d_cursor_shrinker); + return 0; +} + +/** + * reiser4_done_d_cursor - delete d_cursor cache and d_cursor shrinker + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_d_cursor(void) +{ + unregister_shrinker(&d_cursor_shrinker); + + destroy_reiser4_cache(&d_cursor_cache); +} + +#define D_CURSOR_TABLE_SIZE (256) + +static inline unsigned long +d_cursor_hash(d_cursor_hash_table * table, const struct d_cursor_key *key) +{ + assert("nikita-3555", IS_POW(D_CURSOR_TABLE_SIZE)); + return (key->oid + key->cid) & (D_CURSOR_TABLE_SIZE - 1); +} + +static inline int d_cursor_eq(const struct d_cursor_key *k1, + const struct d_cursor_key *k2) +{ + return k1->cid == k2->cid && k1->oid == k2->oid; +} + +/* + * define functions to manipulate reiser4 super block's hash table of + * dir_cursors + */ +#define KMALLOC(size) kmalloc((size), reiser4_ctx_gfp_mask_get()) +#define KFREE(ptr, size) kfree(ptr) +TYPE_SAFE_HASH_DEFINE(d_cursor, + dir_cursor, + struct d_cursor_key, + key, hash, d_cursor_hash, d_cursor_eq); +#undef KFREE +#undef KMALLOC + +/** + * reiser4_init_super_d_info - initialize per-super-block d_cursor resources + * @super: super block to initialize + * + * Initializes per-super-block d_cursor's hash table and radix tree. It is part + * of mount. + */ +int reiser4_init_super_d_info(struct super_block *super) +{ + struct d_cursor_info *p; + + p = &get_super_private(super)->d_info; + + INIT_RADIX_TREE(&p->tree, reiser4_ctx_gfp_mask_get()); + return d_cursor_hash_init(&p->table, D_CURSOR_TABLE_SIZE); +} + +/** + * reiser4_done_super_d_info - release per-super-block d_cursor resources + * @super: super block being umounted + * + * It is called on umount. Kills all directory cursors attached to suoer block. + */ +void reiser4_done_super_d_info(struct super_block *super) +{ + struct d_cursor_info *d_info; + dir_cursor *cursor, *next; + + d_info = &get_super_private(super)->d_info; + for_all_in_htable(&d_info->table, d_cursor, cursor, next) + kill_cursor(cursor); + + BUG_ON(d_info->tree.rnode != NULL); + d_cursor_hash_done(&d_info->table); +} + +/** + * kill_cursor - free dir_cursor and reiser4_file_fsdata attached to it + * @cursor: cursor to free + * + * Removes reiser4_file_fsdata attached to @cursor from readdir list of + * reiser4_inode, frees that reiser4_file_fsdata. Removes @cursor from from + * indices, hash table, list of unused cursors and frees it. + */ +static void kill_cursor(dir_cursor *cursor) +{ + unsigned long index; + + assert("nikita-3566", cursor->ref == 0); + assert("nikita-3572", cursor->fsdata != NULL); + + index = (unsigned long)cursor->key.oid; + list_del_init(&cursor->fsdata->dir.linkage); + free_fsdata(cursor->fsdata); + cursor->fsdata = NULL; + + if (list_empty_careful(&cursor->list)) + /* this is last cursor for a file. Kill radix-tree entry */ + radix_tree_delete(&cursor->info->tree, index); + else { + void **slot; + + /* + * there are other cursors for the same oid. + */ + + /* + * if radix tree point to the cursor being removed, re-target + * radix tree slot to the next cursor in the (non-empty as was + * checked above) element of the circular list of all cursors + * for this oid. + */ + slot = radix_tree_lookup_slot(&cursor->info->tree, index); + assert("nikita-3571", *slot != NULL); + if (*slot == cursor) + *slot = list_entry(cursor->list.next, dir_cursor, list); + /* remove cursor from circular list */ + list_del_init(&cursor->list); + } + /* remove cursor from the list of unused cursors */ + list_del_init(&cursor->alist); + /* remove cursor from the hash table */ + d_cursor_hash_remove(&cursor->info->table, cursor); + /* and free it */ + kmem_cache_free(d_cursor_cache, cursor); + --d_cursor_unused; +} + +/* possible actions that can be performed on all cursors for the given file */ +enum cursor_action { + /* + * load all detached state: this is called when stat-data is loaded + * from the disk to recover information about all pending readdirs + */ + CURSOR_LOAD, + /* + * detach all state from inode, leaving it in the cache. This is called + * when inode is removed form the memory by memory pressure + */ + CURSOR_DISPOSE, + /* + * detach cursors from the inode, and free them. This is called when + * inode is destroyed + */ + CURSOR_KILL +}; + +/* + * return d_cursor data for the file system @inode is in. + */ +static inline struct d_cursor_info *d_info(struct inode *inode) +{ + return &get_super_private(inode->i_sb)->d_info; +} + +/* + * lookup d_cursor in the per-super-block radix tree. + */ +static inline dir_cursor *lookup(struct d_cursor_info *info, + unsigned long index) +{ + return (dir_cursor *) radix_tree_lookup(&info->tree, index); +} + +/* + * attach @cursor to the radix tree. There may be multiple cursors for the + * same oid, they are chained into circular list. + */ +static void bind_cursor(dir_cursor * cursor, unsigned long index) +{ + dir_cursor *head; + + head = lookup(cursor->info, index); + if (head == NULL) { + /* this is the first cursor for this index */ + INIT_LIST_HEAD(&cursor->list); + radix_tree_insert(&cursor->info->tree, index, cursor); + } else { + /* some cursor already exists. Chain ours */ + list_add(&cursor->list, &head->list); + } +} + +/* + * detach fsdata (if detachable) from file descriptor, and put cursor on the + * "unused" list. Called when file descriptor is not longer in active use. + */ +static void clean_fsdata(struct file *file) +{ + dir_cursor *cursor; + reiser4_file_fsdata *fsdata; + + assert("nikita-3570", file_is_stateless(file)); + + fsdata = (reiser4_file_fsdata *) file->private_data; + if (fsdata != NULL) { + cursor = fsdata->cursor; + if (cursor != NULL) { + spin_lock(&d_c_lock); + --cursor->ref; + if (cursor->ref == 0) { + list_add_tail(&cursor->alist, &cursor_cache); + ++d_cursor_unused; + } + spin_unlock(&d_c_lock); + file->private_data = NULL; + } + } +} + +/* + * global counter used to generate "client ids". These ids are encoded into + * high bits of fpos. + */ +static __u32 cid_counter = 0; +#define CID_SHIFT (20) +#define CID_MASK (0xfffffull) + +static void free_file_fsdata_nolock(struct file *); + +/** + * insert_cursor - allocate file_fsdata, insert cursor to tree and hash table + * @cursor: + * @file: + * @inode: + * + * Allocates reiser4_file_fsdata, attaches it to @cursor, inserts cursor to + * reiser4 super block's hash table and radix tree. + add detachable readdir + * state to the @f + */ +static int insert_cursor(dir_cursor *cursor, struct file *file, loff_t *fpos, + struct inode *inode) +{ + int result; + reiser4_file_fsdata *fsdata; + + memset(cursor, 0, sizeof *cursor); + + /* this is either first call to readdir, or rewind. Anyway, create new + * cursor. */ + fsdata = create_fsdata(NULL); + if (fsdata != NULL) { + result = radix_tree_preload(reiser4_ctx_gfp_mask_get()); + if (result == 0) { + struct d_cursor_info *info; + oid_t oid; + + info = d_info(inode); + oid = get_inode_oid(inode); + /* cid occupies higher 12 bits of f->f_pos. Don't + * allow it to become negative: this confuses + * nfsd_readdir() */ + cursor->key.cid = (++cid_counter) & 0x7ff; + cursor->key.oid = oid; + cursor->fsdata = fsdata; + cursor->info = info; + cursor->ref = 1; + + spin_lock_inode(inode); + /* install cursor as @f's private_data, discarding old + * one if necessary */ +#if REISER4_DEBUG + if (file->private_data) + warning("", "file has fsdata already"); +#endif + clean_fsdata(file); + free_file_fsdata_nolock(file); + file->private_data = fsdata; + fsdata->cursor = cursor; + spin_unlock_inode(inode); + spin_lock(&d_c_lock); + /* insert cursor into hash table */ + d_cursor_hash_insert(&info->table, cursor); + /* and chain it into radix-tree */ + bind_cursor(cursor, (unsigned long)oid); + spin_unlock(&d_c_lock); + radix_tree_preload_end(); + *fpos = ((__u64) cursor->key.cid) << CID_SHIFT; + } + } else + result = RETERR(-ENOMEM); + return result; +} + +/** + * process_cursors - do action on each cursor attached to inode + * @inode: + * @act: action to do + * + * Finds all cursors of @inode in reiser4's super block radix tree of cursors + * and performs action specified by @act on each of cursors. + */ +static void process_cursors(struct inode *inode, enum cursor_action act) +{ + oid_t oid; + dir_cursor *start; + struct list_head *head; + reiser4_context *ctx; + struct d_cursor_info *info; + + /* this can be called by + * + * kswapd->...->prune_icache->..reiser4_destroy_inode + * + * without reiser4_context + */ + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + warning("vs-23", "failed to init context"); + return; + } + + assert("nikita-3558", inode != NULL); + + info = d_info(inode); + oid = get_inode_oid(inode); + spin_lock_inode(inode); + head = get_readdir_list(inode); + spin_lock(&d_c_lock); + /* find any cursor for this oid: reference to it is hanging of radix + * tree */ + start = lookup(info, (unsigned long)oid); + if (start != NULL) { + dir_cursor *scan; + reiser4_file_fsdata *fsdata; + + /* process circular list of cursors for this oid */ + scan = start; + do { + dir_cursor *next; + + next = list_entry(scan->list.next, dir_cursor, list); + fsdata = scan->fsdata; + assert("nikita-3557", fsdata != NULL); + if (scan->key.oid == oid) { + switch (act) { + case CURSOR_DISPOSE: + list_del_init(&fsdata->dir.linkage); + break; + case CURSOR_LOAD: + list_add(&fsdata->dir.linkage, head); + break; + case CURSOR_KILL: + kill_cursor(scan); + break; + } + } + if (scan == next) + /* last cursor was just killed */ + break; + scan = next; + } while (scan != start); + } + spin_unlock(&d_c_lock); + /* check that we killed 'em all */ + assert("nikita-3568", + ergo(act == CURSOR_KILL, + list_empty_careful(get_readdir_list(inode)))); + assert("nikita-3569", + ergo(act == CURSOR_KILL, lookup(info, oid) == NULL)); + spin_unlock_inode(inode); + reiser4_exit_context(ctx); +} + +/** + * reiser4_dispose_cursors - removes cursors from inode's list + * @inode: inode to dispose cursors of + * + * For each of cursors corresponding to @inode - removes reiser4_file_fsdata + * attached to cursor from inode's readdir list. This is called when inode is + * removed from the memory by memory pressure. + */ +void reiser4_dispose_cursors(struct inode *inode) +{ + process_cursors(inode, CURSOR_DISPOSE); +} + +/** + * reiser4_load_cursors - attach cursors to inode + * @inode: inode to load cursors to + * + * For each of cursors corresponding to @inode - attaches reiser4_file_fsdata + * attached to cursor to inode's readdir list. This is done when inode is + * loaded into memory. + */ +void reiser4_load_cursors(struct inode *inode) +{ + process_cursors(inode, CURSOR_LOAD); +} + +/** + * reiser4_kill_cursors - kill all inode cursors + * @inode: inode to kill cursors of + * + * Frees all cursors for this inode. This is called when inode is destroyed. + */ +void reiser4_kill_cursors(struct inode *inode) +{ + process_cursors(inode, CURSOR_KILL); +} + +/** + * file_is_stateless - + * @file: + * + * true, if file descriptor @f is created by NFS server by "demand" to serve + * one file system operation. This means that there may be "detached state" + * for underlying inode. + */ +static int file_is_stateless(struct file *file) +{ + return reiser4_get_dentry_fsdata(file->f_path.dentry)->stateless; +} + +/** + * reiser4_get_dir_fpos - + * @dir: + * @fpos: effective value of dir->f_pos + * + * Calculates ->fpos from user-supplied cookie. Normally it is dir->f_pos, but + * in the case of stateless directory operation (readdir-over-nfs), client id + * was encoded in the high bits of cookie and should me masked off. + */ +loff_t reiser4_get_dir_fpos(struct file *dir, loff_t fpos) +{ + if (file_is_stateless(dir)) + return fpos & CID_MASK; + else + return fpos; +} + +/** + * reiser4_attach_fsdata - try to attach fsdata + * @file: + * @fpos: effective value of @file->f_pos + * @inode: + * + * Finds or creates cursor for readdir-over-nfs. + */ +int reiser4_attach_fsdata(struct file *file, loff_t *fpos, struct inode *inode) +{ + loff_t pos; + int result; + dir_cursor *cursor; + + /* + * we are serialized by inode->i_mutex + */ + if (!file_is_stateless(file)) + return 0; + + pos = *fpos; + result = 0; + if (pos == 0) { + /* + * first call to readdir (or rewind to the beginning of + * directory) + */ + cursor = kmem_cache_alloc(d_cursor_cache, + reiser4_ctx_gfp_mask_get()); + if (cursor != NULL) + result = insert_cursor(cursor, file, fpos, inode); + else + result = RETERR(-ENOMEM); + } else { + /* try to find existing cursor */ + struct d_cursor_key key; + + key.cid = pos >> CID_SHIFT; + key.oid = get_inode_oid(inode); + spin_lock(&d_c_lock); + cursor = d_cursor_hash_find(&d_info(inode)->table, &key); + if (cursor != NULL) { + /* cursor was found */ + if (cursor->ref == 0) { + /* move it from unused list */ + list_del_init(&cursor->alist); + --d_cursor_unused; + } + ++cursor->ref; + } + spin_unlock(&d_c_lock); + if (cursor != NULL) { + spin_lock_inode(inode); + assert("nikita-3556", cursor->fsdata->back == NULL); + clean_fsdata(file); + free_file_fsdata_nolock(file); + file->private_data = cursor->fsdata; + spin_unlock_inode(inode); + } + } + return result; +} + +/** + * reiser4_detach_fsdata - ??? + * @file: + * + * detach fsdata, if necessary + */ +void reiser4_detach_fsdata(struct file *file) +{ + struct inode *inode; + + if (!file_is_stateless(file)) + return; + + inode = file_inode(file); + spin_lock_inode(inode); + clean_fsdata(file); + spin_unlock_inode(inode); +} + +/* slab for reiser4_dentry_fsdata */ +static struct kmem_cache *dentry_fsdata_cache; + +/** + * reiser4_init_dentry_fsdata - create cache of dentry_fsdata + * + * Initializes slab cache of structures attached to denty->d_fsdata. It is + * part of reiser4 module initialization. + */ +int reiser4_init_dentry_fsdata(void) +{ + dentry_fsdata_cache = kmem_cache_create("dentry_fsdata", + sizeof(struct reiser4_dentry_fsdata), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, + NULL); + if (dentry_fsdata_cache == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * reiser4_done_dentry_fsdata - delete cache of dentry_fsdata + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_dentry_fsdata(void) +{ + destroy_reiser4_cache(&dentry_fsdata_cache); +} + +/** + * reiser4_get_dentry_fsdata - get fs-specific dentry data + * @dentry: queried dentry + * + * Allocates if necessary and returns per-dentry data that we attach to each + * dentry. + */ +struct reiser4_dentry_fsdata *reiser4_get_dentry_fsdata(struct dentry *dentry) +{ + assert("nikita-1365", dentry != NULL); + + if (dentry->d_fsdata == NULL) { + dentry->d_fsdata = kmem_cache_alloc(dentry_fsdata_cache, + reiser4_ctx_gfp_mask_get()); + if (dentry->d_fsdata == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + memset(dentry->d_fsdata, 0, + sizeof(struct reiser4_dentry_fsdata)); + } + return dentry->d_fsdata; +} + +/** + * reiser4_free_dentry_fsdata - detach and free dentry_fsdata + * @dentry: dentry to free fsdata of + * + * Detaches and frees fs-specific dentry data + */ +void reiser4_free_dentry_fsdata(struct dentry *dentry) +{ + if (dentry->d_fsdata != NULL) { + kmem_cache_free(dentry_fsdata_cache, dentry->d_fsdata); + dentry->d_fsdata = NULL; + } +} + +/* slab for reiser4_file_fsdata */ +static struct kmem_cache *file_fsdata_cache; + +/** + * reiser4_init_file_fsdata - create cache of reiser4_file_fsdata + * + * Initializes slab cache of structures attached to file->private_data. It is + * part of reiser4 module initialization. + */ +int reiser4_init_file_fsdata(void) +{ + file_fsdata_cache = kmem_cache_create("file_fsdata", + sizeof(reiser4_file_fsdata), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (file_fsdata_cache == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * reiser4_done_file_fsdata - delete cache of reiser4_file_fsdata + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_file_fsdata(void) +{ + destroy_reiser4_cache(&file_fsdata_cache); +} + +/** + * create_fsdata - allocate and initialize reiser4_file_fsdata + * @file: what to create file_fsdata for, may be NULL + * + * Allocates and initializes reiser4_file_fsdata structure. + */ +static reiser4_file_fsdata *create_fsdata(struct file *file) +{ + reiser4_file_fsdata *fsdata; + + fsdata = kmem_cache_alloc(file_fsdata_cache, + reiser4_ctx_gfp_mask_get()); + if (fsdata != NULL) { + memset(fsdata, 0, sizeof *fsdata); + fsdata->back = file; + INIT_LIST_HEAD(&fsdata->dir.linkage); + } + return fsdata; +} + +/** + * free_fsdata - free reiser4_file_fsdata + * @fsdata: object to free + * + * Dual to create_fsdata(). Free reiser4_file_fsdata. + */ +static void free_fsdata(reiser4_file_fsdata *fsdata) +{ + BUG_ON(fsdata == NULL); + kmem_cache_free(file_fsdata_cache, fsdata); +} + +/** + * reiser4_get_file_fsdata - get fs-specific file data + * @file: queried file + * + * Returns fs-specific data of @file. If it is NULL, allocates it and attaches + * to @file. + */ +reiser4_file_fsdata *reiser4_get_file_fsdata(struct file *file) +{ + assert("nikita-1603", file != NULL); + + if (file->private_data == NULL) { + reiser4_file_fsdata *fsdata; + struct inode *inode; + + fsdata = create_fsdata(file); + if (fsdata == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + inode = file_inode(file); + spin_lock_inode(inode); + if (file->private_data == NULL) { + file->private_data = fsdata; + fsdata = NULL; + } + spin_unlock_inode(inode); + if (fsdata != NULL) + /* other thread initialized ->fsdata */ + kmem_cache_free(file_fsdata_cache, fsdata); + } + assert("nikita-2665", file->private_data != NULL); + return file->private_data; +} + +/** + * free_file_fsdata_nolock - detach and free reiser4_file_fsdata + * @file: + * + * Detaches reiser4_file_fsdata from @file, removes reiser4_file_fsdata from + * readdir list, frees if it is not linked to d_cursor object. + */ +static void free_file_fsdata_nolock(struct file *file) +{ + reiser4_file_fsdata *fsdata; + + assert("", spin_inode_is_locked(file_inode(file))); + fsdata = file->private_data; + if (fsdata != NULL) { + list_del_init(&fsdata->dir.linkage); + if (fsdata->cursor == NULL) + free_fsdata(fsdata); + } + file->private_data = NULL; +} + +/** + * reiser4_free_file_fsdata - detach from struct file and free reiser4_file_fsdata + * @file: + * + * Spinlocks inode and calls free_file_fsdata_nolock to do the work. + */ +void reiser4_free_file_fsdata(struct file *file) +{ + spin_lock_inode(file_inode(file)); + free_file_fsdata_nolock(file); + spin_unlock_inode(file_inode(file)); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/fsdata.h b/fs/reiser4/fsdata.h new file mode 100644 index 000000000000..fa6634e87997 --- /dev/null +++ b/fs/reiser4/fsdata.h @@ -0,0 +1,203 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#if !defined(__REISER4_FSDATA_H__) +#define __REISER4_FSDATA_H__ + +#include "debug.h" +#include "kassign.h" +#include "seal.h" +#include "type_safe_hash.h" +#include "plugin/file/file.h" +#include "readahead.h" + +/* + * comment about reiser4_dentry_fsdata + * + * + */ + +/* + * locking: fields of per file descriptor readdir_pos and ->f_pos are + * protected by ->i_mutex on inode. Under this lock following invariant + * holds: + * + * file descriptor is "looking" at the entry_no-th directory entry from + * the beginning of directory. This entry has key dir_entry_key and is + * pos-th entry with duplicate-key sequence. + * + */ + +/* logical position within directory */ +struct dir_pos { + /* key of directory entry (actually, part of a key sufficient to + identify directory entry) */ + de_id dir_entry_key; + /* ordinal number of directory entry among all entries with the same + key. (Starting from 0.) */ + unsigned pos; +}; + +struct readdir_pos { + /* f_pos corresponding to this readdir position */ + __u64 fpos; + /* logical position within directory */ + struct dir_pos position; + /* logical number of directory entry within + directory */ + __u64 entry_no; +}; + +/* + * this is used to speed up lookups for directory entry: on initial call to + * ->lookup() seal and coord of directory entry (if found, that is) are stored + * in struct dentry and reused later to avoid tree traversals. + */ +struct de_location { + /* seal covering directory entry */ + seal_t entry_seal; + /* coord of directory entry */ + coord_t entry_coord; + /* ordinal number of directory entry among all entries with the same + key. (Starting from 0.) */ + int pos; +}; + +/** + * reiser4_dentry_fsdata - reiser4-specific data attached to dentries + * + * This is allocated dynamically and released in d_op->d_release() + * + * Currently it only contains cached location (hint) of directory entry, but + * it is expected that other information will be accumulated here. + */ +struct reiser4_dentry_fsdata { + /* + * here will go fields filled by ->lookup() to speedup next + * create/unlink, like blocknr of znode with stat-data, or key of + * stat-data. + */ + struct de_location dec; + int stateless; /* created through reiser4_decode_fh, needs + * special treatment in readdir. */ +}; + +extern int reiser4_init_dentry_fsdata(void); +extern void reiser4_done_dentry_fsdata(void); +extern struct reiser4_dentry_fsdata *reiser4_get_dentry_fsdata(struct dentry *); +extern void reiser4_free_dentry_fsdata(struct dentry *dentry); + +/** + * reiser4_file_fsdata - reiser4-specific data attached to file->private_data + * + * This is allocated dynamically and released in inode->i_fop->release + */ +typedef struct reiser4_file_fsdata { + /* + * pointer back to the struct file which this reiser4_file_fsdata is + * part of + */ + struct file *back; + /* detached cursor for stateless readdir. */ + struct dir_cursor *cursor; + /* + * We need both directory and regular file parts here, because there + * are file system objects that are files and directories. + */ + struct { + /* + * position in directory. It is updated each time directory is + * modified + */ + struct readdir_pos readdir; + /* head of this list is reiser4_inode->lists.readdir_list */ + struct list_head linkage; + } dir; + /* hints to speed up operations with regular files: read and write. */ + struct { + hint_t hint; + } reg; +} reiser4_file_fsdata; + +extern int reiser4_init_file_fsdata(void); +extern void reiser4_done_file_fsdata(void); +extern reiser4_file_fsdata *reiser4_get_file_fsdata(struct file *); +extern void reiser4_free_file_fsdata(struct file *); + +/* + * d_cursor is reiser4_file_fsdata not attached to struct file. d_cursors are + * used to address problem reiser4 has with readdir accesses via NFS. See + * plugin/file_ops_readdir.c for more details. + */ +struct d_cursor_key{ + __u16 cid; + __u64 oid; +}; + +/* + * define structures d_cursor_hash_table d_cursor_hash_link which are used to + * maintain hash table of dir_cursor-s in reiser4's super block + */ +typedef struct dir_cursor dir_cursor; +TYPE_SAFE_HASH_DECLARE(d_cursor, dir_cursor); + +struct dir_cursor { + int ref; + reiser4_file_fsdata *fsdata; + + /* link to reiser4 super block hash table of cursors */ + d_cursor_hash_link hash; + + /* + * this is to link cursors to reiser4 super block's radix tree of + * cursors if there are more than one cursor of the same objectid + */ + struct list_head list; + struct d_cursor_key key; + struct d_cursor_info *info; + /* list of unused cursors */ + struct list_head alist; +}; + +extern int reiser4_init_d_cursor(void); +extern void reiser4_done_d_cursor(void); + +extern int reiser4_init_super_d_info(struct super_block *); +extern void reiser4_done_super_d_info(struct super_block *); + +extern loff_t reiser4_get_dir_fpos(struct file *, loff_t); +extern int reiser4_attach_fsdata(struct file *, loff_t *, struct inode *); +extern void reiser4_detach_fsdata(struct file *); + +/* these are needed for "stateless" readdir. See plugin/file_ops_readdir.c for + more details */ +void reiser4_dispose_cursors(struct inode *inode); +void reiser4_load_cursors(struct inode *inode); +void reiser4_kill_cursors(struct inode *inode); +void reiser4_adjust_dir_file(struct inode *dir, const struct dentry *de, + int offset, int adj); + +/* + * this structure is embedded to reise4_super_info_data. It maintains d_cursors + * (detached readdir state). See plugin/file_ops_readdir.c for more details. + */ +struct d_cursor_info { + d_cursor_hash_table table; + struct radix_tree_root tree; +}; + +/* spinlock protecting readdir cursors */ +extern spinlock_t d_c_lock; + +/* __REISER4_FSDATA_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/init_super.c b/fs/reiser4/init_super.c new file mode 100644 index 000000000000..e9945960ceaa --- /dev/null +++ b/fs/reiser4/init_super.c @@ -0,0 +1,806 @@ +/* Copyright by Hans Reiser, 2003 */ + +#include "super.h" +#include "inode.h" +#include "plugin/plugin_set.h" + +#include + +/** + * init_fs_info - allocate reiser4 specific super block + * @super: super block of filesystem + * + * Allocates and initialize reiser4_super_info_data, attaches it to + * super->s_fs_info, initializes structures maintaining d_cursor-s. + */ +int reiser4_init_fs_info(struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = kzalloc(sizeof(reiser4_super_info_data), + reiser4_ctx_gfp_mask_get()); + if (!sbinfo) + return RETERR(-ENOMEM); + + super->s_fs_info = sbinfo; + super->s_op = NULL; + + ON_DEBUG(INIT_LIST_HEAD(&sbinfo->all_jnodes)); + ON_DEBUG(spin_lock_init(&sbinfo->all_guard)); + + mutex_init(&sbinfo->delete_mutex); + spin_lock_init(&(sbinfo->guard)); + + /* initialize per-super-block d_cursor resources */ + reiser4_init_super_d_info(super); + + return 0; +} + +/** + * Release reiser4 specific super block + * + * release per-super-block d_cursor resources + * free reiser4_super_info_data. + */ +void reiser4_done_fs_info(struct super_block *super) +{ + assert("zam-990", super->s_fs_info != NULL); + + reiser4_done_super_d_info(super); + kfree(super->s_fs_info); + super->s_fs_info = NULL; +} + +/* type of option parseable by parse_option() */ +typedef enum { + /* value of option is arbitrary string */ + OPT_STRING, + + /* + * option specifies bit in a bitmask. When option is set - bit in + * sbinfo->fs_flags is set. Examples are bsdgroups, 32bittimes, mtflush, + * dont_load_bitmap, atomic_write. + */ + OPT_BIT, + + /* + * value of option should conform to sprintf() format. Examples are + * tmgr.atom_max_size=N, tmgr.atom_max_age=N + */ + OPT_FORMAT, + + /* + * option can take one of predefined values. Example is onerror=panic or + * onerror=remount-ro + */ + OPT_ONEOF, + + /* + * option take one of txmod plugin labels. + * Example is "txmod=journal" or "txmod=wa" + */ + OPT_TXMOD, +} opt_type_t; + +#if 0 +struct opt_bitmask_bit { + const char *bit_name; + int bit_nr; +}; +#endif + +#define MAX_ONEOF_LIST 10 + +/* description of option parseable by parse_option() */ +struct opt_desc { + /* option name. + + parsed portion of string has a form "name=value". + */ + const char *name; + /* type of option */ + opt_type_t type; + union { + /* where to store value of string option (type == OPT_STRING) */ + char **string; + /* description of bits for bit option (type == OPT_BIT) */ + struct { + int nr; + void *addr; + } bit; + /* description of format and targets for format option (type + == OPT_FORMAT) */ + struct { + const char *format; + int nr_args; + void *arg1; + void *arg2; + void *arg3; + void *arg4; + } f; + struct { + int *result; + const char *list[MAX_ONEOF_LIST]; + } oneof; + struct { + reiser4_txmod_id *result; + } txmod; + struct { + void *addr; + int nr_bits; + /* struct opt_bitmask_bit *bits; */ + } bitmask; + } u; +}; + +/** + * parse_option - parse one option + * @opt_strin: starting point of parsing + * @opt: option description + * + * foo=bar, + * ^ ^ ^ + * | | +-- replaced to '\0' + * | +-- val_start + * +-- opt_string + * Figures out option type and handles option correspondingly. + */ +static int parse_option(char *opt_string, struct opt_desc *opt) +{ + char *val_start; + int result; + const char *err_msg; + + /* NOTE-NIKITA think about using lib/cmdline.c functions here. */ + + val_start = strchr(opt_string, '='); + if (val_start != NULL) { + *val_start = '\0'; + ++val_start; + } + + err_msg = NULL; + result = 0; + switch (opt->type) { + case OPT_STRING: + if (val_start == NULL) { + err_msg = "String arg missing"; + result = RETERR(-EINVAL); + } else + *opt->u.string = val_start; + break; + case OPT_BIT: + if (val_start != NULL) + err_msg = "Value ignored"; + else + set_bit(opt->u.bit.nr, opt->u.bit.addr); + break; + case OPT_FORMAT: + if (val_start == NULL) { + err_msg = "Formatted arg missing"; + result = RETERR(-EINVAL); + break; + } + if (sscanf(val_start, opt->u.f.format, + opt->u.f.arg1, opt->u.f.arg2, opt->u.f.arg3, + opt->u.f.arg4) != opt->u.f.nr_args) { + err_msg = "Wrong conversion"; + result = RETERR(-EINVAL); + } + break; + case OPT_ONEOF: + { + int i = 0; + + if (val_start == NULL) { + err_msg = "Value is missing"; + result = RETERR(-EINVAL); + break; + } + err_msg = "Wrong option value"; + result = RETERR(-EINVAL); + while (opt->u.oneof.list[i]) { + if (!strcmp(opt->u.oneof.list[i], val_start)) { + result = 0; + err_msg = NULL; + *opt->u.oneof.result = i; + break; + } + i++; + } + break; + } + break; + case OPT_TXMOD: + { + reiser4_txmod_id i = 0; + + if (val_start == NULL) { + err_msg = "Value is missing"; + result = RETERR(-EINVAL); + break; + } + err_msg = "Wrong option value"; + result = RETERR(-EINVAL); + while (i < LAST_TXMOD_ID) { + if (!strcmp(txmod_plugins[i].h.label, + val_start)) { + result = 0; + err_msg = NULL; + *opt->u.txmod.result = i; + break; + } + i++; + } + break; + } + default: + wrong_return_value("nikita-2100", "opt -> type"); + break; + } + if (err_msg != NULL) { + warning("nikita-2496", "%s when parsing option \"%s%s%s\"", + err_msg, opt->name, val_start ? "=" : "", + val_start ? : ""); + } + return result; +} + +/** + * parse_options - parse reiser4 mount options + * @opt_string: starting point + * @opts: array of option description + * @nr_opts: number of elements in @opts + * + * Parses comma separated list of reiser4 mount options. + */ +static int parse_options(char *opt_string, struct opt_desc *opts, int nr_opts) +{ + int result; + + result = 0; + while ((result == 0) && opt_string && *opt_string) { + int j; + char *next; + + next = strchr(opt_string, ','); + if (next != NULL) { + *next = '\0'; + ++next; + } + for (j = 0; j < nr_opts; ++j) { + if (!strncmp(opt_string, opts[j].name, + strlen(opts[j].name))) { + result = parse_option(opt_string, &opts[j]); + break; + } + } + if (j == nr_opts) { + warning("nikita-2307", "Unrecognized option: \"%s\"", + opt_string); + /* traditionally, -EINVAL is returned on wrong mount + option */ + result = RETERR(-EINVAL); + } + opt_string = next; + } + return result; +} + +#define NUM_OPT(label, fmt, addr) \ + { \ + .name = (label), \ + .type = OPT_FORMAT, \ + .u = { \ + .f = { \ + .format = (fmt), \ + .nr_args = 1, \ + .arg1 = (addr), \ + .arg2 = NULL, \ + .arg3 = NULL, \ + .arg4 = NULL \ + } \ + } \ + } + +#define SB_FIELD_OPT(field, fmt) NUM_OPT(#field, fmt, &sbinfo->field) + +#define BIT_OPT(label, bitnr) \ + { \ + .name = label, \ + .type = OPT_BIT, \ + .u = { \ + .bit = { \ + .nr = bitnr, \ + .addr = &sbinfo->fs_flags \ + } \ + } \ + } + +#define MAX_NR_OPTIONS (30) + +#if REISER4_DEBUG +# define OPT_ARRAY_CHECK(opt, array) \ + if ((opt) > (array) + MAX_NR_OPTIONS) { \ + warning("zam-1046", "opt array is overloaded"); break; \ + } +#else +# define OPT_ARRAY_CHECK(opt, array) noop +#endif + +#define PUSH_OPT(opt, array, ...) \ +do { \ + struct opt_desc o = __VA_ARGS__; \ + OPT_ARRAY_CHECK(opt, array); \ + *(opt) ++ = o; \ +} while (0) + +static noinline void push_sb_field_opts(struct opt_desc **p, + struct opt_desc *opts, + reiser4_super_info_data *sbinfo) +{ +#define PUSH_SB_FIELD_OPT(field, format) \ + PUSH_OPT(*p, opts, SB_FIELD_OPT(field, format)) + /* + * tmgr.atom_max_size=N + * Atoms containing more than N blocks will be forced to commit. N is + * decimal. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_max_size, "%u"); + /* + * tmgr.atom_max_age=N + * Atoms older than N seconds will be forced to commit. N is decimal. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_max_age, "%u"); + /* + * tmgr.atom_min_size=N + * In committing an atom to free dirty pages, force the atom less than + * N in size to fuse with another one. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_min_size, "%u"); + /* + * tmgr.atom_max_flushers=N + * limit of concurrent flushers for one atom. 0 means no limit. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_max_flushers, "%u"); + /* + * tree.cbk_cache_slots=N + * Number of slots in the cbk cache. + */ + PUSH_SB_FIELD_OPT(tree.cbk_cache.nr_slots, "%u"); + /* + * If flush finds more than FLUSH_RELOCATE_THRESHOLD adjacent dirty + * leaf-level blocks it will force them to be relocated. + */ + PUSH_SB_FIELD_OPT(flush.relocate_threshold, "%u"); + /* + * If flush finds can find a block allocation closer than at most + * FLUSH_RELOCATE_DISTANCE from the preceder it will relocate to that + * position. + */ + PUSH_SB_FIELD_OPT(flush.relocate_distance, "%u"); + /* + * If we have written this much or more blocks before encountering busy + * jnode in flush list - abort flushing hoping that next time we get + * called this jnode will be clean already, and we will save some + * seeks. + */ + PUSH_SB_FIELD_OPT(flush.written_threshold, "%u"); + /* The maximum number of nodes to scan left on a level during flush. */ + PUSH_SB_FIELD_OPT(flush.scan_maxnodes, "%u"); + /* preferred IO size */ + PUSH_SB_FIELD_OPT(optimal_io_size, "%u"); + /* carry flags used for insertion of new nodes */ + PUSH_SB_FIELD_OPT(tree.carry.new_node_flags, "%u"); + /* carry flags used for insertion of new extents */ + PUSH_SB_FIELD_OPT(tree.carry.new_extent_flags, "%u"); + /* carry flags used for paste operations */ + PUSH_SB_FIELD_OPT(tree.carry.paste_flags, "%u"); + /* carry flags used for insert operations */ + PUSH_SB_FIELD_OPT(tree.carry.insert_flags, "%u"); + +#ifdef CONFIG_REISER4_BADBLOCKS + /* + * Alternative master superblock location in case if it's original + * location is not writeable/accessable. This is offset in BYTES. + */ + PUSH_SB_FIELD_OPT(altsuper, "%lu"); +#endif +} + +/** + * reiser4_init_super_data - initialize reiser4 private super block + * @super: super block to initialize + * @opt_string: list of reiser4 mount options + * + * Sets various reiser4 parameters to default values. Parses mount options and + * overwrites default settings. + */ +int reiser4_init_super_data(struct super_block *super, char *opt_string) +{ + int result; + struct opt_desc *opts, *p; + reiser4_super_info_data *sbinfo = get_super_private(super); + + /* initialize super, export, dentry operations */ + sbinfo->ops.super = reiser4_super_operations; + sbinfo->ops.export = reiser4_export_operations; + sbinfo->ops.dentry = reiser4_dentry_operations; + super->s_op = &sbinfo->ops.super; + super->s_export_op = &sbinfo->ops.export; + + /* initialize transaction manager parameters to default values */ + sbinfo->tmgr.atom_max_size = totalram_pages / 4; + sbinfo->tmgr.atom_max_age = REISER4_ATOM_MAX_AGE / HZ; + sbinfo->tmgr.atom_min_size = 256; + sbinfo->tmgr.atom_max_flushers = ATOM_MAX_FLUSHERS; + + /* initialize cbk cache parameter */ + sbinfo->tree.cbk_cache.nr_slots = CBK_CACHE_SLOTS; + + /* initialize flush parameters */ + sbinfo->flush.relocate_threshold = FLUSH_RELOCATE_THRESHOLD; + sbinfo->flush.relocate_distance = FLUSH_RELOCATE_DISTANCE; + sbinfo->flush.written_threshold = FLUSH_WRITTEN_THRESHOLD; + sbinfo->flush.scan_maxnodes = FLUSH_SCAN_MAXNODES; + + sbinfo->optimal_io_size = REISER4_OPTIMAL_IO_SIZE; + + /* preliminary tree initializations */ + sbinfo->tree.super = super; + sbinfo->tree.carry.new_node_flags = REISER4_NEW_NODE_FLAGS; + sbinfo->tree.carry.new_extent_flags = REISER4_NEW_EXTENT_FLAGS; + sbinfo->tree.carry.paste_flags = REISER4_PASTE_FLAGS; + sbinfo->tree.carry.insert_flags = REISER4_INSERT_FLAGS; + rwlock_init(&(sbinfo->tree.tree_lock)); + spin_lock_init(&(sbinfo->tree.epoch_lock)); + + /* initialize default readahead params */ + sbinfo->ra_params.max = totalram_pages / 4; + sbinfo->ra_params.flags = 0; + + /* allocate memory for structure describing reiser4 mount options */ + opts = kmalloc(sizeof(struct opt_desc) * MAX_NR_OPTIONS, + reiser4_ctx_gfp_mask_get()); + if (opts == NULL) + return RETERR(-ENOMEM); + + /* initialize structure describing reiser4 mount options */ + p = opts; + + push_sb_field_opts(&p, opts, sbinfo); + /* turn on BSD-style gid assignment */ + +#define PUSH_BIT_OPT(name, bit) \ + PUSH_OPT(p, opts, BIT_OPT(name, bit)) + + PUSH_BIT_OPT("bsdgroups", REISER4_BSD_GID); + /* turn on 32 bit times */ + PUSH_BIT_OPT("32bittimes", REISER4_32_BIT_TIMES); + /* + * Don't load all bitmap blocks at mount time, it is useful for + * machines with tiny RAM and large disks. + */ + PUSH_BIT_OPT("dont_load_bitmap", REISER4_DONT_LOAD_BITMAP); + /* disable transaction commits during write() */ + PUSH_BIT_OPT("atomic_write", REISER4_ATOMIC_WRITE); + /* enable issuing of discard requests */ + PUSH_BIT_OPT("discard", REISER4_DISCARD); + /* disable hole punching at flush time */ + PUSH_BIT_OPT("dont_punch_holes", REISER4_DONT_PUNCH_HOLES); + + PUSH_OPT(p, opts, + { + /* + * tree traversal readahead parameters: + * -o readahead:MAXNUM:FLAGS + * MAXNUM - max number fo nodes to request readahead for: -1UL + * will set it to max_sane_readahead() + * FLAGS - combination of bits: RA_ADJCENT_ONLY, RA_ALL_LEVELS, + * CONTINUE_ON_PRESENT + */ + .name = "readahead", + .type = OPT_FORMAT, + .u = { + .f = { + .format = "%u:%u", + .nr_args = 2, + .arg1 = &sbinfo->ra_params.max, + .arg2 = &sbinfo->ra_params.flags, + .arg3 = NULL, + .arg4 = NULL + } + } + } + ); + + /* What to do in case of fs error */ + PUSH_OPT(p, opts, + { + .name = "onerror", + .type = OPT_ONEOF, + .u = { + .oneof = { + .result = &sbinfo->onerror, + .list = { + "remount-ro", "panic", NULL + }, + } + } + } + ); + + /* + * What trancaction model (journal, cow, etc) + * is used to commit transactions + */ + PUSH_OPT(p, opts, + { + .name = "txmod", + .type = OPT_TXMOD, + .u = { + .txmod = { + .result = &sbinfo->txmod + } + } + } + ); + + /* modify default settings to values set by mount options */ + result = parse_options(opt_string, opts, p - opts); + kfree(opts); + if (result != 0) + return result; + + /* correct settings to sanity values */ + sbinfo->tmgr.atom_max_age *= HZ; + if (sbinfo->tmgr.atom_max_age <= 0) + /* overflow */ + sbinfo->tmgr.atom_max_age = REISER4_ATOM_MAX_AGE; + + /* round optimal io size up to 512 bytes */ + sbinfo->optimal_io_size >>= VFS_BLKSIZE_BITS; + sbinfo->optimal_io_size <<= VFS_BLKSIZE_BITS; + if (sbinfo->optimal_io_size == 0) { + warning("nikita-2497", "optimal_io_size is too small"); + return RETERR(-EINVAL); + } + return result; +} + +/** + * reiser4_init_read_super - read reiser4 master super block + * @super: super block to fill + * @silent: if 0 - print warnings + * + * Reads reiser4 master super block either from predefined location or from + * location specified by altsuper mount option, initializes disk format plugin. + */ +int reiser4_init_read_super(struct super_block *super, int silent) +{ + struct buffer_head *super_bh; + struct reiser4_master_sb *master_sb; + reiser4_super_info_data *sbinfo = get_super_private(super); + unsigned long blocksize; + + read_super_block: +#ifdef CONFIG_REISER4_BADBLOCKS + if (sbinfo->altsuper) + /* + * read reiser4 master super block at position specified by + * mount option + */ + super_bh = sb_bread(super, + (sector_t)(sbinfo->altsuper / super->s_blocksize)); + else +#endif + /* read reiser4 master super block at 16-th 4096 block */ + super_bh = sb_bread(super, + (sector_t)(REISER4_MAGIC_OFFSET / super->s_blocksize)); + if (!super_bh) + return RETERR(-EIO); + + master_sb = (struct reiser4_master_sb *)super_bh->b_data; + /* check reiser4 magic string */ + if (!strncmp(master_sb->magic, REISER4_SUPER_MAGIC_STRING, + sizeof(REISER4_SUPER_MAGIC_STRING))) { + /* reiser4 master super block contains filesystem blocksize */ + blocksize = le16_to_cpu(get_unaligned(&master_sb->blocksize)); + + if (blocksize != PAGE_SIZE) { + /* + * currenly reiser4's blocksize must be equal to + * pagesize + */ + if (!silent) + warning("nikita-2609", + "%s: wrong block size %ld\n", super->s_id, + blocksize); + brelse(super_bh); + return RETERR(-EINVAL); + } + if (blocksize != super->s_blocksize) { + /* + * filesystem uses different blocksize. Reread master + * super block with correct blocksize + */ + brelse(super_bh); + if (!sb_set_blocksize(super, (int)blocksize)) + return RETERR(-EINVAL); + goto read_super_block; + } + + sbinfo->df_plug = + disk_format_plugin_by_unsafe_id( + le16_to_cpu(get_unaligned(&master_sb->disk_plugin_id))); + if (sbinfo->df_plug == NULL) { + if (!silent) + warning("nikita-26091", + "%s: unknown disk format plugin %d\n", + super->s_id, + le16_to_cpu(get_unaligned(&master_sb->disk_plugin_id))); + brelse(super_bh); + return RETERR(-EINVAL); + } + sbinfo->diskmap_block = le64_to_cpu(get_unaligned(&master_sb->diskmap)); + brelse(super_bh); + return 0; + } + + /* there is no reiser4 on the device */ + if (!silent) + warning("nikita-2608", + "%s: wrong master super block magic", super->s_id); + brelse(super_bh); + return RETERR(-EINVAL); +} + +static struct { + reiser4_plugin_type type; + reiser4_plugin_id id; +} default_plugins[PSET_LAST] = { + [PSET_FILE] = { + .type = REISER4_FILE_PLUGIN_TYPE, + .id = UNIX_FILE_PLUGIN_ID + }, + [PSET_DIR] = { + .type = REISER4_DIR_PLUGIN_TYPE, + .id = HASHED_DIR_PLUGIN_ID + }, + [PSET_HASH] = { + .type = REISER4_HASH_PLUGIN_TYPE, + .id = R5_HASH_ID + }, + [PSET_FIBRATION] = { + .type = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_DOT_O + }, + [PSET_PERM] = { + .type = REISER4_PERM_PLUGIN_TYPE, + .id = NULL_PERM_ID + }, + [PSET_FORMATTING] = { + .type = REISER4_FORMATTING_PLUGIN_TYPE, + .id = SMALL_FILE_FORMATTING_ID + }, + [PSET_SD] = { + .type = REISER4_ITEM_PLUGIN_TYPE, + .id = STATIC_STAT_DATA_ID + }, + [PSET_DIR_ITEM] = { + .type = REISER4_ITEM_PLUGIN_TYPE, + .id = COMPOUND_DIR_ID + }, + [PSET_CIPHER] = { + .type = REISER4_CIPHER_PLUGIN_TYPE, + .id = NONE_CIPHER_ID + }, + [PSET_DIGEST] = { + .type = REISER4_DIGEST_PLUGIN_TYPE, + .id = SHA256_32_DIGEST_ID + }, + [PSET_COMPRESSION] = { + .type = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = LZO1_COMPRESSION_ID + }, + [PSET_COMPRESSION_MODE] = { + .type = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = CONVX_COMPRESSION_MODE_ID + }, + [PSET_CLUSTER] = { + .type = REISER4_CLUSTER_PLUGIN_TYPE, + .id = CLUSTER_64K_ID + }, + [PSET_CREATE] = { + .type = REISER4_FILE_PLUGIN_TYPE, + .id = UNIX_FILE_PLUGIN_ID + } +}; + +/* access to default plugin table */ +reiser4_plugin *get_default_plugin(pset_member memb) +{ + return plugin_by_id(default_plugins[memb].type, + default_plugins[memb].id); +} + +/** + * reiser4_init_root_inode - obtain inode of root directory + * @super: super block of filesystem + * + * Obtains inode of root directory (reading it from disk), initializes plugin + * set it was not initialized. + */ +int reiser4_init_root_inode(struct super_block *super) +{ + reiser4_super_info_data *sbinfo = get_super_private(super); + struct inode *inode; + int result = 0; + + inode = reiser4_iget(super, sbinfo->df_plug->root_dir_key(super), 0); + if (IS_ERR(inode)) + return RETERR(PTR_ERR(inode)); + + super->s_root = d_make_root(inode); + if (!super->s_root) { + return RETERR(-ENOMEM); + } + + super->s_root->d_op = &sbinfo->ops.dentry; + + if (!is_inode_loaded(inode)) { + pset_member memb; + plugin_set *pset; + + pset = reiser4_inode_data(inode)->pset; + for (memb = 0; memb < PSET_LAST; ++memb) { + + if (aset_get(pset, memb) != NULL) + continue; + + result = grab_plugin_pset(inode, NULL, memb); + if (result != 0) + break; + + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); + } + + if (result == 0) { + if (REISER4_DEBUG) { + for (memb = 0; memb < PSET_LAST; ++memb) + assert("nikita-3500", + aset_get(pset, memb) != NULL); + } + } else + warning("nikita-3448", "Cannot set plugins of root: %i", + result); + reiser4_iget_complete(inode); + + /* As the default pset kept in the root dir may has been changed + (length is unknown), call update_sd. */ + if (!reiser4_inode_get_flag(inode, REISER4_SDLEN_KNOWN)) { + result = reiser4_grab_space( + inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); + + if (result == 0) + result = reiser4_update_sd(inode); + + all_grabbed2free(); + } + } + + super->s_maxbytes = MAX_LFS_FILESIZE; + return result; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/inode.c b/fs/reiser4/inode.c new file mode 100644 index 000000000000..cc4a401da2ba --- /dev/null +++ b/fs/reiser4/inode.c @@ -0,0 +1,711 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Inode specific operations. */ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "kassign.h" +#include "coord.h" +#include "seal.h" +#include "dscale.h" +#include "plugin/item/item.h" +#include "plugin/security/perm.h" +#include "plugin/plugin.h" +#include "plugin/object.h" +#include "znode.h" +#include "vfs_ops.h" +#include "inode.h" +#include "super.h" +#include "reiser4.h" + +#include /* for struct super_block, address_space */ + +/* return reiser4 internal tree which inode belongs to */ +/* Audited by: green(2002.06.17) */ +reiser4_tree *reiser4_tree_by_inode(const struct inode *inode/* inode queried*/) +{ + assert("nikita-256", inode != NULL); + assert("nikita-257", inode->i_sb != NULL); + return reiser4_get_tree(inode->i_sb); +} + +/* return reiser4-specific inode flags */ +static inline unsigned long *inode_flags(const struct inode *const inode) +{ + assert("nikita-2842", inode != NULL); + return &reiser4_inode_data(inode)->flags; +} + +/* set reiser4-specific flag @f in @inode */ +void reiser4_inode_set_flag(struct inode *inode, reiser4_file_plugin_flags f) +{ + assert("nikita-2248", inode != NULL); + set_bit((int)f, inode_flags(inode)); +} + +/* clear reiser4-specific flag @f in @inode */ +void reiser4_inode_clr_flag(struct inode *inode, reiser4_file_plugin_flags f) +{ + assert("nikita-2250", inode != NULL); + clear_bit((int)f, inode_flags(inode)); +} + +/* true if reiser4-specific flag @f is set in @inode */ +int reiser4_inode_get_flag(const struct inode *inode, + reiser4_file_plugin_flags f) +{ + assert("nikita-2251", inode != NULL); + return test_bit((int)f, inode_flags(inode)); +} + +/* convert oid to inode number */ +ino_t oid_to_ino(oid_t oid) +{ + return (ino_t) oid; +} + +/* convert oid to user visible inode number */ +ino_t oid_to_uino(oid_t oid) +{ + /* reiser4 object is uniquely identified by oid which is 64 bit + quantity. Kernel in-memory inode is indexed (in the hash table) by + 32 bit i_ino field, but this is not a problem, because there is a + way to further distinguish inodes with identical inode numbers + (find_actor supplied to iget()). + + But user space expects unique 32 bit inode number. Obviously this + is impossible. Work-around is to somehow hash oid into user visible + inode number. + */ + oid_t max_ino = (ino_t) ~0; + + if (REISER4_INO_IS_OID || (oid <= max_ino)) + return oid; + else + /* this is remotely similar to algorithm used to find next pid + to use for process: after wrap-around start from some + offset rather than from 0. Idea is that there are some long + living objects with which we don't want to collide. + */ + return REISER4_UINO_SHIFT + ((oid - max_ino) & (max_ino >> 1)); +} + +/* check that "inode" is on reiser4 file-system */ +int is_reiser4_inode(const struct inode *inode/* inode queried */) +{ + return inode != NULL && is_reiser4_super(inode->i_sb); +} + +/* Maximal length of a name that can be stored in directory @inode. + + This is used in check during file creation and lookup. */ +int reiser4_max_filename_len(const struct inode *inode/* inode queried */) +{ + assert("nikita-287", is_reiser4_inode(inode)); + assert("nikita-1710", inode_dir_item_plugin(inode)); + if (inode_dir_item_plugin(inode)->s.dir.max_name_len) + return inode_dir_item_plugin(inode)->s.dir.max_name_len(inode); + else + return 255; +} + +#if REISER4_USE_COLLISION_LIMIT +/* Maximal number of hash collisions for this directory. */ +int max_hash_collisions(const struct inode *dir/* inode queried */) +{ + assert("nikita-1711", dir != NULL); + return reiser4_inode_data(dir)->plugin.max_collisions; +} +#endif /* REISER4_USE_COLLISION_LIMIT */ + +/* Install file, inode, and address_space operation on @inode, depending on + its mode. */ +int setup_inode_ops(struct inode *inode /* inode to intialize */ , + reiser4_object_create_data * data /* parameters to create + * object */ ) +{ + reiser4_super_info_data *sinfo; + file_plugin *fplug; + dir_plugin *dplug; + + fplug = inode_file_plugin(inode); + dplug = inode_dir_plugin(inode); + + sinfo = get_super_private(inode->i_sb); + + switch (inode->i_mode & S_IFMT) { + case S_IFSOCK: + case S_IFBLK: + case S_IFCHR: + case S_IFIFO: + { + dev_t rdev; /* to keep gcc happy */ + + assert("vs-46", fplug != NULL); + /* ugly hack with rdev */ + if (data == NULL) { + rdev = inode->i_rdev; + inode->i_rdev = 0; + } else + rdev = data->rdev; + inode->i_blocks = 0; + assert("vs-42", fplug->h.id == SPECIAL_FILE_PLUGIN_ID); + inode->i_op = file_plugins[fplug->h.id].inode_ops; + /* initialize inode->i_fop and inode->i_rdev for block + and char devices */ + init_special_inode(inode, inode->i_mode, rdev); + /* all address space operations are null */ + inode->i_mapping->a_ops = + file_plugins[fplug->h.id].as_ops; + break; + } + case S_IFLNK: + assert("vs-46", fplug != NULL); + assert("vs-42", fplug->h.id == SYMLINK_FILE_PLUGIN_ID); + inode->i_op = file_plugins[fplug->h.id].inode_ops; + inode->i_fop = NULL; + /* all address space operations are null */ + inode->i_mapping->a_ops = file_plugins[fplug->h.id].as_ops; + break; + case S_IFDIR: + assert("vs-46", dplug != NULL); + assert("vs-43", (dplug->h.id == HASHED_DIR_PLUGIN_ID || + dplug->h.id == SEEKABLE_HASHED_DIR_PLUGIN_ID)); + inode->i_op = dir_plugins[dplug->h.id].inode_ops; + inode->i_fop = dir_plugins[dplug->h.id].file_ops; + inode->i_mapping->a_ops = dir_plugins[dplug->h.id].as_ops; + break; + case S_IFREG: + assert("vs-46", fplug != NULL); + assert("vs-43", (fplug->h.id == UNIX_FILE_PLUGIN_ID || + fplug->h.id == CRYPTCOMPRESS_FILE_PLUGIN_ID)); + inode->i_op = file_plugins[fplug->h.id].inode_ops; + inode->i_fop = file_plugins[fplug->h.id].file_ops; + inode->i_mapping->a_ops = file_plugins[fplug->h.id].as_ops; + break; + default: + warning("nikita-291", "wrong file mode: %o for %llu", + inode->i_mode, + (unsigned long long)get_inode_oid(inode)); + reiser4_make_bad_inode(inode); + return RETERR(-EINVAL); + } + return 0; +} + +/* Initialize inode from disk data. Called with inode locked. + Return inode locked. */ +static int init_inode(struct inode *inode /* inode to intialise */ , + coord_t *coord/* coord of stat data */) +{ + int result; + item_plugin *iplug; + void *body; + int length; + reiser4_inode *state; + + assert("nikita-292", coord != NULL); + assert("nikita-293", inode != NULL); + + coord_clear_iplug(coord); + result = zload(coord->node); + if (result) + return result; + iplug = item_plugin_by_coord(coord); + body = item_body_by_coord(coord); + length = item_length_by_coord(coord); + + assert("nikita-295", iplug != NULL); + assert("nikita-296", body != NULL); + assert("nikita-297", length > 0); + + /* inode is under I_LOCK now */ + + state = reiser4_inode_data(inode); + /* call stat-data plugin method to load sd content into inode */ + result = iplug->s.sd.init_inode(inode, body, length); + set_plugin(&state->pset, PSET_SD, item_plugin_to_plugin(iplug)); + if (result == 0) { + result = setup_inode_ops(inode, NULL); + if (result == 0 && inode->i_sb->s_root && + inode->i_sb->s_root->d_inode) + result = finish_pset(inode); + } + zrelse(coord->node); + return result; +} + +/* read `inode' from the disk. This is what was previously in + reiserfs_read_inode2(). + + Must be called with inode locked. Return inode still locked. +*/ +static int read_inode(struct inode *inode /* inode to read from disk */ , + const reiser4_key * key /* key of stat data */ , + int silent) +{ + int result; + lock_handle lh; + reiser4_inode *info; + coord_t coord; + + assert("nikita-298", inode != NULL); + assert("nikita-1945", !is_inode_loaded(inode)); + + info = reiser4_inode_data(inode); + assert("nikita-300", info->locality_id != 0); + + coord_init_zero(&coord); + init_lh(&lh); + /* locate stat-data in a tree and return znode locked */ + result = lookup_sd(inode, ZNODE_READ_LOCK, &coord, &lh, key, silent); + assert("nikita-301", !is_inode_loaded(inode)); + if (result == 0) { + /* use stat-data plugin to load sd into inode. */ + result = init_inode(inode, &coord); + if (result == 0) { + /* initialize stat-data seal */ + spin_lock_inode(inode); + reiser4_seal_init(&info->sd_seal, &coord, key); + info->sd_coord = coord; + spin_unlock_inode(inode); + + /* call file plugin's method to initialize plugin + * specific part of inode */ + if (inode_file_plugin(inode)->init_inode_data) + inode_file_plugin(inode)->init_inode_data(inode, + NULL, + 0); + /* load detached directory cursors for stateless + * directory readers (NFS). */ + reiser4_load_cursors(inode); + + /* Check the opened inode for consistency. */ + result = + get_super_private(inode->i_sb)->df_plug-> + check_open(inode); + } + } + /* lookup_sd() doesn't release coord because we want znode + stay read-locked while stat-data fields are accessed in + init_inode() */ + done_lh(&lh); + + if (result != 0) + reiser4_make_bad_inode(inode); + return result; +} + +/* initialise new reiser4 inode being inserted into hash table. */ +static int init_locked_inode(struct inode *inode /* new inode */ , + void *opaque /* key of stat data passed to + * the iget5_locked as cookie */) +{ + reiser4_key *key; + + assert("nikita-1995", inode != NULL); + assert("nikita-1996", opaque != NULL); + key = opaque; + set_inode_oid(inode, get_key_objectid(key)); + reiser4_inode_data(inode)->locality_id = get_key_locality(key); + return 0; +} + +/* reiser4_inode_find_actor() - "find actor" supplied by reiser4 to + iget5_locked(). + + This function is called by iget5_locked() to distinguish reiser4 inodes + having the same inode numbers. Such inodes can only exist due to some error + condition. One of them should be bad. Inodes with identical inode numbers + (objectids) are distinguished by their packing locality. + +*/ +static int reiser4_inode_find_actor(struct inode *inode /* inode from hash table + * to check */ , + void *opaque /* "cookie" passed to + * iget5_locked(). This + * is stat-data key */) +{ + reiser4_key *key; + + key = opaque; + return + /* oid is unique, so first term is enough, actually. */ + get_inode_oid(inode) == get_key_objectid(key) && + /* + * also, locality should be checked, but locality is stored in + * the reiser4-specific part of the inode, and actor can be + * called against arbitrary inode that happened to be in this + * hash chain. Hence we first have to check that this is + * reiser4 inode at least. is_reiser4_inode() is probably too + * early to call, as inode may have ->i_op not yet + * initialised. + */ + is_reiser4_super(inode->i_sb) && + /* + * usually objectid is unique, but pseudo files use counter to + * generate objectid. All pseudo files are placed into special + * (otherwise unused) locality. + */ + reiser4_inode_data(inode)->locality_id == get_key_locality(key); +} + +/* hook for kmem_cache_create */ +void loading_init_once(reiser4_inode * info) +{ + mutex_init(&info->loading); +} + +/* for reiser4_alloc_inode */ +void loading_alloc(reiser4_inode * info) +{ + assert("vs-1717", !mutex_is_locked(&info->loading)); +} + +/* for reiser4_destroy */ +void loading_destroy(reiser4_inode * info) +{ + assert("vs-1717a", !mutex_is_locked(&info->loading)); +} + +static void loading_begin(reiser4_inode * info) +{ + mutex_lock(&info->loading); +} + +static void loading_end(reiser4_inode * info) +{ + mutex_unlock(&info->loading); +} + +/** + * reiser4_iget - obtain inode via iget5_locked, read from disk if necessary + * @super: super block of filesystem + * @key: key of inode's stat-data + * @silent: + * + * This is our helper function a la iget(). This is be called by + * lookup_common() and reiser4_read_super(). Return inode locked or error + * encountered. + */ +struct inode *reiser4_iget(struct super_block *super, const reiser4_key *key, + int silent) +{ + struct inode *inode; + int result; + reiser4_inode *info; + + assert("nikita-302", super != NULL); + assert("nikita-303", key != NULL); + + result = 0; + + /* call iget(). Our ->read_inode() is dummy, so this will either + find inode in cache or return uninitialised inode */ + inode = iget5_locked(super, + (unsigned long)get_key_objectid(key), + reiser4_inode_find_actor, + init_locked_inode, (reiser4_key *) key); + if (inode == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + if (is_bad_inode(inode)) { + warning("nikita-304", "Bad inode found"); + reiser4_print_key("key", key); + iput(inode); + return ERR_PTR(RETERR(-EIO)); + } + + info = reiser4_inode_data(inode); + + /* Reiser4 inode state bit REISER4_LOADED is used to distinguish fully + loaded and initialized inode from just allocated inode. If + REISER4_LOADED bit is not set, reiser4_iget() completes loading under + info->loading. The place in reiser4 which uses not initialized inode + is the reiser4 repacker, see repacker-related functions in + plugin/item/extent.c */ + if (!is_inode_loaded(inode)) { + loading_begin(info); + if (!is_inode_loaded(inode)) { + /* locking: iget5_locked returns locked inode */ + assert("nikita-1941", !is_inode_loaded(inode)); + assert("nikita-1949", + reiser4_inode_find_actor(inode, + (reiser4_key *) key)); + /* now, inode has objectid as ->i_ino and locality in + reiser4-specific part. This is enough for + read_inode() to read stat data from the disk */ + result = read_inode(inode, key, silent); + } else + loading_end(info); + } + + if (inode->i_state & I_NEW) + unlock_new_inode(inode); + + if (is_bad_inode(inode)) { + assert("vs-1717", result != 0); + loading_end(info); + iput(inode); + inode = ERR_PTR(result); + } else if (REISER4_DEBUG) { + reiser4_key found_key; + + assert("vs-1717", result == 0); + build_sd_key(inode, &found_key); + if (!keyeq(&found_key, key)) { + warning("nikita-305", "Wrong key in sd"); + reiser4_print_key("sought for", key); + reiser4_print_key("found", &found_key); + } + if (inode->i_nlink == 0) { + warning("nikita-3559", "Unlinked inode found: %llu\n", + (unsigned long long)get_inode_oid(inode)); + } + } + return inode; +} + +/* reiser4_iget() may return not fully initialized inode, this function should + * be called after one completes reiser4 inode initializing. */ +void reiser4_iget_complete(struct inode *inode) +{ + assert("zam-988", is_reiser4_inode(inode)); + + if (!is_inode_loaded(inode)) { + reiser4_inode_set_flag(inode, REISER4_LOADED); + loading_end(reiser4_inode_data(inode)); + } +} + +void reiser4_make_bad_inode(struct inode *inode) +{ + assert("nikita-1934", inode != NULL); + + /* clear LOADED bit */ + reiser4_inode_clr_flag(inode, REISER4_LOADED); + make_bad_inode(inode); + return; +} + +file_plugin *inode_file_plugin(const struct inode *inode) +{ + assert("nikita-1997", inode != NULL); + return reiser4_inode_data(inode)->pset->file; +} + +dir_plugin *inode_dir_plugin(const struct inode *inode) +{ + assert("nikita-1998", inode != NULL); + return reiser4_inode_data(inode)->pset->dir; +} + +formatting_plugin *inode_formatting_plugin(const struct inode *inode) +{ + assert("nikita-2000", inode != NULL); + return reiser4_inode_data(inode)->pset->formatting; +} + +hash_plugin *inode_hash_plugin(const struct inode *inode) +{ + assert("nikita-2001", inode != NULL); + return reiser4_inode_data(inode)->pset->hash; +} + +fibration_plugin *inode_fibration_plugin(const struct inode *inode) +{ + assert("nikita-2001", inode != NULL); + return reiser4_inode_data(inode)->pset->fibration; +} + +cipher_plugin *inode_cipher_plugin(const struct inode *inode) +{ + assert("edward-36", inode != NULL); + return reiser4_inode_data(inode)->pset->cipher; +} + +compression_plugin *inode_compression_plugin(const struct inode *inode) +{ + assert("edward-37", inode != NULL); + return reiser4_inode_data(inode)->pset->compression; +} + +compression_mode_plugin *inode_compression_mode_plugin(const struct inode * + inode) +{ + assert("edward-1330", inode != NULL); + return reiser4_inode_data(inode)->pset->compression_mode; +} + +cluster_plugin *inode_cluster_plugin(const struct inode *inode) +{ + assert("edward-1328", inode != NULL); + return reiser4_inode_data(inode)->pset->cluster; +} + +file_plugin *inode_create_plugin(const struct inode *inode) +{ + assert("edward-1329", inode != NULL); + return reiser4_inode_data(inode)->pset->create; +} + +digest_plugin *inode_digest_plugin(const struct inode *inode) +{ + assert("edward-86", inode != NULL); + return reiser4_inode_data(inode)->pset->digest; +} + +item_plugin *inode_sd_plugin(const struct inode *inode) +{ + assert("vs-534", inode != NULL); + return reiser4_inode_data(inode)->pset->sd; +} + +item_plugin *inode_dir_item_plugin(const struct inode *inode) +{ + assert("vs-534", inode != NULL); + return reiser4_inode_data(inode)->pset->dir_item; +} + +file_plugin *child_create_plugin(const struct inode *inode) +{ + assert("edward-1329", inode != NULL); + return reiser4_inode_data(inode)->hset->create; +} + +void inode_set_extension(struct inode *inode, sd_ext_bits ext) +{ + reiser4_inode *state; + + assert("nikita-2716", inode != NULL); + assert("nikita-2717", ext < LAST_SD_EXTENSION); + assert("nikita-3491", spin_inode_is_locked(inode)); + + state = reiser4_inode_data(inode); + state->extmask |= 1 << ext; + /* force re-calculation of stat-data length on next call to + update_sd(). */ + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); +} + +void inode_clr_extension(struct inode *inode, sd_ext_bits ext) +{ + reiser4_inode *state; + + assert("vpf-1926", inode != NULL); + assert("vpf-1927", ext < LAST_SD_EXTENSION); + assert("vpf-1928", spin_inode_is_locked(inode)); + + state = reiser4_inode_data(inode); + state->extmask &= ~(1 << ext); + /* force re-calculation of stat-data length on next call to + update_sd(). */ + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); +} + +void inode_check_scale_nolock(struct inode *inode, __u64 old, __u64 new) +{ + assert("edward-1287", inode != NULL); + if (!dscale_fit(old, new)) + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); + return; +} + +void inode_check_scale(struct inode *inode, __u64 old, __u64 new) +{ + assert("nikita-2875", inode != NULL); + spin_lock_inode(inode); + inode_check_scale_nolock(inode, old, new); + spin_unlock_inode(inode); +} + +/* + * initialize ->ordering field of inode. This field defines how file stat-data + * and body is ordered within a tree with respect to other objects within the + * same parent directory. + */ +void +init_inode_ordering(struct inode *inode, + reiser4_object_create_data * crd, int create) +{ + reiser4_key key; + + if (create) { + struct inode *parent; + + parent = crd->parent; + assert("nikita-3224", inode_dir_plugin(parent) != NULL); + inode_dir_plugin(parent)->build_entry_key(parent, + &crd->dentry->d_name, + &key); + } else { + coord_t *coord; + + coord = &reiser4_inode_data(inode)->sd_coord; + coord_clear_iplug(coord); + /* safe to use ->sd_coord, because node is under long term + * lock */ + WITH_DATA(coord->node, item_key_by_coord(coord, &key)); + } + + set_inode_ordering(inode, get_key_ordering(&key)); +} + +znode *inode_get_vroot(struct inode *inode) +{ + reiser4_block_nr blk; + znode *result; + + spin_lock_inode(inode); + blk = reiser4_inode_data(inode)->vroot; + spin_unlock_inode(inode); + if (!disk_addr_eq(&UBER_TREE_ADDR, &blk)) + result = zlook(reiser4_tree_by_inode(inode), &blk); + else + result = NULL; + return result; +} + +void inode_set_vroot(struct inode *inode, znode *vroot) +{ + spin_lock_inode(inode); + reiser4_inode_data(inode)->vroot = *znode_get_block(vroot); + spin_unlock_inode(inode); +} + +#if REISER4_DEBUG + +void reiser4_inode_invariant(const struct inode *inode) +{ + assert("nikita-3077", spin_inode_is_locked(inode)); +} + +int inode_has_no_jnodes(reiser4_inode * r4_inode) +{ + return jnode_tree_by_reiser4_inode(r4_inode)->rnode == NULL && + r4_inode->nr_jnodes == 0; +} + +#endif + +/* true if directory is empty (only contains dot and dotdot) */ +/* FIXME: shouldn't it be dir plugin method? */ +int is_dir_empty(const struct inode *dir) +{ + assert("nikita-1976", dir != NULL); + + /* rely on our method to maintain directory i_size being equal to the + number of entries. */ + return dir->i_size <= 2 ? 0 : RETERR(-ENOTEMPTY); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/inode.h b/fs/reiser4/inode.h new file mode 100644 index 000000000000..c6a627312268 --- /dev/null +++ b/fs/reiser4/inode.h @@ -0,0 +1,506 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Inode functions. */ + +#if !defined(__REISER4_INODE_H__) +#define __REISER4_INODE_H__ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "seal.h" +#include "plugin/plugin.h" +#include "plugin/file/cryptcompress.h" +#include "plugin/file/file.h" +#include "plugin/dir/dir.h" +#include "plugin/plugin_set.h" +#include "plugin/security/perm.h" +#include "vfs_ops.h" +#include "jnode.h" +#include "fsdata.h" + +#include /* for __u?? , ino_t */ +#include /* for struct super_block, struct + * rw_semaphore, etc */ +#include +#include + +/* reiser4-specific inode flags. They are "transient" and are not + supposed to be stored on disk. Used to trace "state" of + inode +*/ +typedef enum { + /* this is light-weight inode, inheriting some state from its + parent */ + REISER4_LIGHT_WEIGHT = 0, + /* stat data wasn't yet created */ + REISER4_NO_SD = 1, + /* internal immutable flag. Currently is only used + to avoid race condition during file creation. + See comment in create_object(). */ + REISER4_IMMUTABLE = 2, + /* inode was read from storage */ + REISER4_LOADED = 3, + /* this bit is set for symlinks. inode->i_private points to target + name of symlink. */ + REISER4_GENERIC_PTR_USED = 4, + /* set if size of stat-data item for this inode is known. If this is + * set we can avoid recalculating size of stat-data on each update. */ + REISER4_SDLEN_KNOWN = 5, + /* reiser4_inode->crypt points to the crypto stat */ + REISER4_CRYPTO_STAT_LOADED = 6, + /* cryptcompress_inode_data points to the secret key */ + REISER4_SECRET_KEY_INSTALLED = 7, + /* File (possibly) has pages corresponding to the tail items, that + * were created by ->readpage. It is set by mmap_unix_file() and + * sendfile_unix_file(). This bit is inspected by write_unix_file and + * kill-hook of tail items. It is never cleared once set. This bit is + * modified and inspected under i_mutex. */ + REISER4_HAS_MMAP = 8, + REISER4_PART_MIXED = 9, + REISER4_PART_IN_CONV = 10, + /* This flag indicates that file plugin conversion is in progress */ + REISER4_FILE_CONV_IN_PROGRESS = 11 +} reiser4_file_plugin_flags; + +/* state associated with each inode. + reiser4 inode. + + NOTE-NIKITA In 2.5 kernels it is not necessary that all file-system inodes + be of the same size. File-system allocates inodes by itself through + s_op->allocate_inode() method. So, it is possible to adjust size of inode + at the time of its creation. + + Invariants involving parts of this data-type: + + [inode->eflushed] + +*/ + +typedef struct reiser4_inode reiser4_inode; +/* return pointer to reiser4-specific part of inode */ +static inline reiser4_inode *reiser4_inode_data(const struct inode *inode + /* inode queried */ ); + +#if BITS_PER_LONG == 64 + +#define REISER4_INO_IS_OID (1) +typedef struct {; +} oid_hi_t; + +/* BITS_PER_LONG == 64 */ +#else + +#define REISER4_INO_IS_OID (0) +typedef __u32 oid_hi_t; + +/* BITS_PER_LONG == 64 */ +#endif + +struct reiser4_inode { + /* spin lock protecting fields of this structure. */ + spinlock_t guard; + /* main plugin set that control the file + (see comments in plugin/plugin_set.c) */ + plugin_set *pset; + /* plugin set for inheritance + (see comments in plugin/plugin_set.c) */ + plugin_set *hset; + /* high 32 bits of object id */ + oid_hi_t oid_hi; + /* seal for stat-data */ + seal_t sd_seal; + /* locality id for this file */ + oid_t locality_id; +#if REISER4_LARGE_KEY + __u64 ordering; +#endif + /* coord of stat-data in sealed node */ + coord_t sd_coord; + /* bit-mask of stat-data extentions used by this file */ + __u64 extmask; + /* bitmask of non-default plugins for this inode */ + __u16 plugin_mask; + /* bitmask of set heir plugins for this inode. */ + __u16 heir_mask; + union { + struct list_head readdir_list; + struct list_head not_used; + } lists; + /* per-inode flags. Filled by values of reiser4_file_plugin_flags */ + unsigned long flags; + union { + /* fields specific to unix_file plugin */ + struct unix_file_info unix_file_info; + /* fields specific to cryptcompress file plugin */ + struct cryptcompress_info cryptcompress_info; + } file_plugin_data; + + /* this semaphore is to serialize readers and writers of @pset->file + * when file plugin conversion is enabled + */ + struct rw_semaphore conv_sem; + + /* tree of jnodes. Phantom jnodes (ones not attched to any atom) are + tagged in that tree by EFLUSH_TAG_ANONYMOUS */ + struct radix_tree_root jnodes_tree; +#if REISER4_DEBUG + /* number of unformatted node jnodes of this file in jnode hash table */ + unsigned long nr_jnodes; +#endif + + /* block number of virtual root for this object. See comment above + * fs/reiser4/search.c:handle_vroot() */ + reiser4_block_nr vroot; + struct mutex loading; +}; + +void loading_init_once(reiser4_inode *); +void loading_alloc(reiser4_inode *); +void loading_destroy(reiser4_inode *); + +struct reiser4_inode_object { + /* private part */ + reiser4_inode p; + /* generic fields not specific to reiser4, but used by VFS */ + struct inode vfs_inode; +}; + +/* return pointer to the reiser4 specific portion of @inode */ +static inline reiser4_inode *reiser4_inode_data(const struct inode *inode + /* inode queried */ ) +{ + assert("nikita-254", inode != NULL); + return &container_of(inode, struct reiser4_inode_object, vfs_inode)->p; +} + +static inline struct inode *inode_by_reiser4_inode(const reiser4_inode * + r4_inode /* inode queried */ + ) +{ + return &container_of(r4_inode, struct reiser4_inode_object, + p)->vfs_inode; +} + +/* + * reiser4 inodes are identified by 64bit object-id (oid_t), but in struct + * inode ->i_ino field is of type ino_t (long) that can be either 32 or 64 + * bits. + * + * If ->i_ino is 32 bits we store remaining 32 bits in reiser4 specific part + * of inode, otherwise whole oid is stored in i_ino. + * + * Wrappers below ([sg]et_inode_oid()) are used to hide this difference. + */ + +#define OID_HI_SHIFT (sizeof(ino_t) * 8) + +#if REISER4_INO_IS_OID + +static inline oid_t get_inode_oid(const struct inode *inode) +{ + return inode->i_ino; +} + +static inline void set_inode_oid(struct inode *inode, oid_t oid) +{ + inode->i_ino = oid; +} + +/* REISER4_INO_IS_OID */ +#else + +static inline oid_t get_inode_oid(const struct inode *inode) +{ + return + ((__u64) reiser4_inode_data(inode)->oid_hi << OID_HI_SHIFT) | + inode->i_ino; +} + +static inline void set_inode_oid(struct inode *inode, oid_t oid) +{ + assert("nikita-2519", inode != NULL); + inode->i_ino = (ino_t) (oid); + reiser4_inode_data(inode)->oid_hi = (oid) >> OID_HI_SHIFT; + assert("nikita-2521", get_inode_oid(inode) == (oid)); +} + +/* REISER4_INO_IS_OID */ +#endif + +static inline oid_t get_inode_locality(const struct inode *inode) +{ + return reiser4_inode_data(inode)->locality_id; +} + +#if REISER4_LARGE_KEY +static inline __u64 get_inode_ordering(const struct inode *inode) +{ + return reiser4_inode_data(inode)->ordering; +} + +static inline void set_inode_ordering(const struct inode *inode, __u64 ordering) +{ + reiser4_inode_data(inode)->ordering = ordering; +} + +#else + +#define get_inode_ordering(inode) (0) +#define set_inode_ordering(inode, val) noop + +#endif + +/* return inode in which @uf_info is embedded */ +static inline struct inode * +unix_file_info_to_inode(const struct unix_file_info *uf_info) +{ + return &container_of(uf_info, struct reiser4_inode_object, + p.file_plugin_data.unix_file_info)->vfs_inode; +} + +extern ino_t oid_to_ino(oid_t oid) __attribute__ ((const)); +extern ino_t oid_to_uino(oid_t oid) __attribute__ ((const)); + +extern reiser4_tree *reiser4_tree_by_inode(const struct inode *inode); + +#if REISER4_DEBUG +extern void reiser4_inode_invariant(const struct inode *inode); +extern int inode_has_no_jnodes(reiser4_inode *); +#else +#define reiser4_inode_invariant(inode) noop +#endif + +static inline int spin_inode_is_locked(const struct inode *inode) +{ + assert_spin_locked(&reiser4_inode_data(inode)->guard); + return 1; +} + +/** + * spin_lock_inode - lock reiser4_inode' embedded spinlock + * @inode: inode to lock + * + * In debug mode it checks that lower priority locks are not held and + * increments reiser4_context's lock counters on which lock ordering checking + * is based. + */ +static inline void spin_lock_inode(struct inode *inode) +{ + assert("", LOCK_CNT_NIL(spin_locked)); + /* check lock ordering */ + assert_spin_not_locked(&d_c_lock); + + spin_lock(&reiser4_inode_data(inode)->guard); + + LOCK_CNT_INC(spin_locked_inode); + LOCK_CNT_INC(spin_locked); + + reiser4_inode_invariant(inode); +} + +/** + * spin_unlock_inode - unlock reiser4_inode' embedded spinlock + * @inode: inode to unlock + * + * In debug mode it checks that spinlock is held and decrements + * reiser4_context's lock counters on which lock ordering checking is based. + */ +static inline void spin_unlock_inode(struct inode *inode) +{ + assert_spin_locked(&reiser4_inode_data(inode)->guard); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_inode)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + reiser4_inode_invariant(inode); + + LOCK_CNT_DEC(spin_locked_inode); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&reiser4_inode_data(inode)->guard); +} + +extern znode *inode_get_vroot(struct inode *inode); +extern void inode_set_vroot(struct inode *inode, znode * vroot); + +extern int reiser4_max_filename_len(const struct inode *inode); +extern int max_hash_collisions(const struct inode *dir); +extern void reiser4_unlock_inode(struct inode *inode); +extern int is_reiser4_inode(const struct inode *inode); +extern int setup_inode_ops(struct inode *inode, reiser4_object_create_data *); +extern struct inode *reiser4_iget(struct super_block *super, + const reiser4_key * key, int silent); +extern void reiser4_iget_complete(struct inode *inode); +extern void reiser4_inode_set_flag(struct inode *inode, + reiser4_file_plugin_flags f); +extern void reiser4_inode_clr_flag(struct inode *inode, + reiser4_file_plugin_flags f); +extern int reiser4_inode_get_flag(const struct inode *inode, + reiser4_file_plugin_flags f); + +/* has inode been initialized? */ +static inline int +is_inode_loaded(const struct inode *inode/* inode queried */) +{ + assert("nikita-1120", inode != NULL); + return reiser4_inode_get_flag(inode, REISER4_LOADED); +} + +extern file_plugin *inode_file_plugin(const struct inode *inode); +extern dir_plugin *inode_dir_plugin(const struct inode *inode); +extern formatting_plugin *inode_formatting_plugin(const struct inode *inode); +extern hash_plugin *inode_hash_plugin(const struct inode *inode); +extern fibration_plugin *inode_fibration_plugin(const struct inode *inode); +extern cipher_plugin *inode_cipher_plugin(const struct inode *inode); +extern digest_plugin *inode_digest_plugin(const struct inode *inode); +extern compression_plugin *inode_compression_plugin(const struct inode *inode); +extern compression_mode_plugin *inode_compression_mode_plugin(const struct inode + *inode); +extern cluster_plugin *inode_cluster_plugin(const struct inode *inode); +extern file_plugin *inode_create_plugin(const struct inode *inode); +extern item_plugin *inode_sd_plugin(const struct inode *inode); +extern item_plugin *inode_dir_item_plugin(const struct inode *inode); +extern file_plugin *child_create_plugin(const struct inode *inode); + +extern void reiser4_make_bad_inode(struct inode *inode); + +extern void inode_set_extension(struct inode *inode, sd_ext_bits ext); +extern void inode_clr_extension(struct inode *inode, sd_ext_bits ext); +extern void inode_check_scale(struct inode *inode, __u64 old, __u64 new); +extern void inode_check_scale_nolock(struct inode *inode, __u64 old, __u64 new); + +#define INODE_SET_SIZE(i, value) \ +({ \ + struct inode *__i; \ + typeof(value) __v; \ + \ + __i = (i); \ + __v = (value); \ + inode_check_scale(__i, __i->i_size, __v); \ + i_size_write(__i, __v); \ +}) + +/* + * update field @field in inode @i to contain value @value. + */ +#define INODE_SET_FIELD(i, field, value) \ +({ \ + struct inode *__i; \ + typeof(value) __v; \ + \ + __i = (i); \ + __v = (value); \ + inode_check_scale(__i, __i->field, __v); \ + __i->field = __v; \ +}) + +#define INODE_INC_FIELD(i, field) \ +({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->field, __i->field + 1); \ + ++ __i->field; \ +}) + +#define INODE_DEC_FIELD(i, field) \ +({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->field, __i->field - 1); \ + -- __i->field; \ +}) + +/* + * Update field i_nlink in inode @i using library function @op. + */ +#define INODE_SET_NLINK(i, value) \ +({ \ + struct inode *__i; \ + typeof(value) __v; \ + \ + __i = (i); \ + __v = (value); \ + inode_check_scale(__i, __i->i_nlink, __v); \ + set_nlink(__i, __v); \ +}) + +#define INODE_INC_NLINK(i) \ + ({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->i_nlink, __i->i_nlink + 1); \ + inc_nlink(__i); \ +}) + +#define INODE_DROP_NLINK(i) \ + ({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->i_nlink, __i->i_nlink - 1); \ + drop_nlink(__i); \ +}) + +#define INODE_CLEAR_NLINK(i) \ + ({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->i_nlink, 0); \ + clear_nlink(__i); \ +}) + + +static inline void inode_add_blocks(struct inode *inode, __u64 blocks) +{ + inode_add_bytes(inode, blocks << inode->i_blkbits); +} + +static inline void inode_sub_blocks(struct inode *inode, __u64 blocks) +{ + inode_sub_bytes(inode, blocks << inode->i_blkbits); +} + + +/* See comment before reiser4_readdir_common() for description. */ +static inline struct list_head *get_readdir_list(const struct inode *inode) +{ + return &reiser4_inode_data(inode)->lists.readdir_list; +} + +extern void init_inode_ordering(struct inode *inode, + reiser4_object_create_data * crd, int create); + +static inline struct radix_tree_root *jnode_tree_by_inode(struct inode *inode) +{ + return &reiser4_inode_data(inode)->jnodes_tree; +} + +static inline struct radix_tree_root *jnode_tree_by_reiser4_inode(reiser4_inode + *r4_inode) +{ + return &r4_inode->jnodes_tree; +} + +#if REISER4_DEBUG +extern void print_inode(const char *prefix, const struct inode *i); +#endif + +int is_dir_empty(const struct inode *); + +/* __REISER4_INODE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/ioctl.h b/fs/reiser4/ioctl.h new file mode 100644 index 000000000000..b24c6c3dd3d2 --- /dev/null +++ b/fs/reiser4/ioctl.h @@ -0,0 +1,41 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#if !defined(__REISER4_IOCTL_H__) +#define __REISER4_IOCTL_H__ + +#include + +/* + * ioctl(2) command used to "unpack" reiser4 file, that is, convert it into + * extents and fix in this state. This is used by applications that rely on + * + * . files being block aligned, and + * + * . files never migrating on disk + * + * for example, boot loaders (LILO) need this. + * + * This ioctl should be used as + * + * result = ioctl(fd, REISER4_IOC_UNPACK); + * + * File behind fd descriptor will be converted to the extents (if necessary), + * and its stat-data will be updated so that it will never be converted back + * into tails again. + */ +#define REISER4_IOC_UNPACK _IOW(0xCD, 1, long) + +/* __REISER4_IOCTL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/jnode.c b/fs/reiser4/jnode.c new file mode 100644 index 000000000000..0f1594cd643e --- /dev/null +++ b/fs/reiser4/jnode.c @@ -0,0 +1,1905 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ +/* Jnode manipulation functions. */ +/* Jnode is entity used to track blocks with data and meta-data in reiser4. + + In particular, jnodes are used to track transactional information + associated with each block. Each znode contains jnode as ->zjnode field. + + Jnode stands for either Josh or Journal node. +*/ + +/* + * Taxonomy. + * + * Jnode represents block containing data or meta-data. There are jnodes + * for: + * + * unformatted blocks (jnodes proper). There are plans, however to + * have a handle per extent unit rather than per each unformatted + * block, because there are so many of them. + * + * For bitmaps. Each bitmap is actually represented by two jnodes--one + * for working and another for "commit" data, together forming bnode. + * + * For io-heads. These are used by log writer. + * + * For formatted nodes (znode). See comment at the top of znode.c for + * details specific to the formatted nodes (znodes). + * + * Node data. + * + * Jnode provides access to the data of node it represents. Data are + * stored in a page. Page is kept in a page cache. This means, that jnodes + * are highly interconnected with page cache and VM internals. + * + * jnode has a pointer to page (->pg) containing its data. Pointer to data + * themselves is cached in ->data field to avoid frequent calls to + * page_address(). + * + * jnode and page are attached to each other by jnode_attach_page(). This + * function places pointer to jnode in set_page_private(), sets PG_private + * flag and increments page counter. + * + * Opposite operation is performed by page_clear_jnode(). + * + * jnode->pg is protected by jnode spin lock, and page->private is + * protected by page lock. See comment at the top of page_cache.c for + * more. + * + * page can be detached from jnode for two reasons: + * + * . jnode is removed from a tree (file is truncated, of formatted + * node is removed by balancing). + * + * . during memory pressure, VM calls ->releasepage() method + * (reiser4_releasepage()) to evict page from memory. + * + * (there, of course, is also umount, but this is special case we are not + * concerned with here). + * + * To protect jnode page from eviction, one calls jload() function that + * "pins" page in memory (loading it if necessary), increments + * jnode->d_count, and kmap()s page. Page is unpinned through call to + * jrelse(). + * + * Jnode life cycle. + * + * jnode is created, placed in hash table, and, optionally, in per-inode + * radix tree. Page can be attached to jnode, pinned, released, etc. + * + * When jnode is captured into atom its reference counter is + * increased. While being part of an atom, jnode can be "early + * flushed". This means that as part of flush procedure, jnode is placed + * into "relocate set", and its page is submitted to the disk. After io + * completes, page can be detached, then loaded again, re-dirtied, etc. + * + * Thread acquired reference to jnode by calling jref() and releases it by + * jput(). When last reference is removed, jnode is still retained in + * memory (cached) if it has page attached, _unless_ it is scheduled for + * destruction (has JNODE_HEARD_BANSHEE bit set). + * + * Tree read-write lock was used as "existential" lock for jnodes. That is, + * jnode->x_count could be changed from 0 to 1 only under tree write lock, + * that is, tree lock protected unreferenced jnodes stored in the hash + * table, from recycling. + * + * This resulted in high contention on tree lock, because jref()/jput() is + * frequent operation. To ameliorate this problem, RCU is used: when jput() + * is just about to release last reference on jnode it sets JNODE_RIP bit + * on it, and then proceed with jnode destruction (removing jnode from hash + * table, cbk_cache, detaching page, etc.). All places that change jnode + * reference counter from 0 to 1 (jlookup(), zlook(), zget(), and + * cbk_cache_scan_slots()) check for JNODE_RIP bit (this is done by + * jnode_rip_check() function), and pretend that nothing was found in hash + * table if bit is set. + * + * jput defers actual return of jnode into slab cache to some later time + * (by call_rcu()), this guarantees that other threads can safely continue + * working with JNODE_RIP-ped jnode. + * + */ + +#include "reiser4.h" +#include "debug.h" +#include "dformat.h" +#include "jnode.h" +#include "plugin/plugin_header.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +/*#include "jnode.h"*/ +#include "znode.h" +#include "tree.h" +#include "tree_walk.h" +#include "super.h" +#include "inode.h" +#include "page_cache.h" + +#include /* UML needs this for PAGE_OFFSET */ +#include +#include +#include +#include +#include /* for struct address_space */ +#include /* for inode_wb_list_lock */ + +static struct kmem_cache *_jnode_slab = NULL; + +static void jnode_set_type(jnode * node, jnode_type type); +static int jdelete(jnode * node); +static int jnode_try_drop(jnode * node); + +#if REISER4_DEBUG +static int jnode_invariant(jnode * node, int tlocked, int jlocked); +#endif + +/* true if valid page is attached to jnode */ +static inline int jnode_is_parsed(jnode * node) +{ + return JF_ISSET(node, JNODE_PARSED); +} + +/* hash table support */ + +/* compare two jnode keys for equality. Used by hash-table macros */ +static inline int jnode_key_eq(const struct jnode_key *k1, + const struct jnode_key *k2) +{ + assert("nikita-2350", k1 != NULL); + assert("nikita-2351", k2 != NULL); + + return (k1->index == k2->index && k1->objectid == k2->objectid); +} + +/* Hash jnode by its key (inode plus offset). Used by hash-table macros */ +static inline __u32 jnode_key_hashfn(j_hash_table * table, + const struct jnode_key *key) +{ + assert("nikita-2352", key != NULL); + assert("nikita-3346", IS_POW(table->_buckets)); + + /* yes, this is remarkable simply (where not stupid) hash function. */ + return (key->objectid + key->index) & (table->_buckets - 1); +} + +/* The hash table definition */ +#define KMALLOC(size) reiser4_vmalloc(size) +#define KFREE(ptr, size) vfree(ptr) +TYPE_SAFE_HASH_DEFINE(j, jnode, struct jnode_key, key.j, link.j, + jnode_key_hashfn, jnode_key_eq); +#undef KFREE +#undef KMALLOC + +/* call this to initialise jnode hash table */ +int jnodes_tree_init(reiser4_tree * tree/* tree to initialise jnodes for */) +{ + assert("nikita-2359", tree != NULL); + return j_hash_init(&tree->jhash_table, 16384); +} + +/* call this to destroy jnode hash table. This is called during umount. */ +int jnodes_tree_done(reiser4_tree * tree/* tree to destroy jnodes for */) +{ + j_hash_table *jtable; + jnode *node; + jnode *next; + + assert("nikita-2360", tree != NULL); + + /* + * Scan hash table and free all jnodes. + */ + jtable = &tree->jhash_table; + if (jtable->_table) { + for_all_in_htable(jtable, j, node, next) { + assert("nikita-2361", !atomic_read(&node->x_count)); + jdrop(node); + } + + j_hash_done(&tree->jhash_table); + } + return 0; +} + +/** + * init_jnodes - create jnode cache + * + * Initializes slab cache jnodes. It is part of reiser4 module initialization. + */ +int init_jnodes(void) +{ + assert("umka-168", _jnode_slab == NULL); + + _jnode_slab = kmem_cache_create("jnode", sizeof(jnode), 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (_jnode_slab == NULL) + return RETERR(-ENOMEM); + + return 0; +} + +/** + * done_znodes - delete znode cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_jnodes(void) +{ + destroy_reiser4_cache(&_jnode_slab); +} + +/* Initialize a jnode. */ +void jnode_init(jnode * node, reiser4_tree * tree, jnode_type type) +{ + memset(node, 0, sizeof(jnode)); + ON_DEBUG(node->magic = JMAGIC); + jnode_set_type(node, type); + atomic_set(&node->d_count, 0); + atomic_set(&node->x_count, 0); + spin_lock_init(&node->guard); + spin_lock_init(&node->load); + node->atom = NULL; + node->tree = tree; + INIT_LIST_HEAD(&node->capture_link); + + ASSIGN_NODE_LIST(node, NOT_CAPTURED); + +#if REISER4_DEBUG + { + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(tree->super); + spin_lock_irq(&sbinfo->all_guard); + list_add(&node->jnodes, &sbinfo->all_jnodes); + spin_unlock_irq(&sbinfo->all_guard); + } +#endif +} + +#if REISER4_DEBUG +/* + * Remove jnode from ->all_jnodes list. + */ +static void jnode_done(jnode * node, reiser4_tree * tree) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(tree->super); + + spin_lock_irq(&sbinfo->all_guard); + assert("nikita-2422", !list_empty(&node->jnodes)); + list_del_init(&node->jnodes); + spin_unlock_irq(&sbinfo->all_guard); +} +#endif + +/* return already existing jnode of page */ +jnode *jnode_by_page(struct page *pg) +{ + assert("nikita-2400", PageLocked(pg)); + assert("nikita-2068", PagePrivate(pg)); + assert("nikita-2067", jprivate(pg) != NULL); + return jprivate(pg); +} + +/* exported functions to allocate/free jnode objects outside this file */ +jnode *jalloc(void) +{ + jnode *jal = kmem_cache_alloc(_jnode_slab, reiser4_ctx_gfp_mask_get()); + return jal; +} + +/* return jnode back to the slab allocator */ +inline void jfree(jnode * node) +{ + assert("nikita-2663", (list_empty_careful(&node->capture_link) && + NODE_LIST(node) == NOT_CAPTURED)); + assert("nikita-3222", list_empty(&node->jnodes)); + assert("nikita-3221", jnode_page(node) == NULL); + + /* not yet phash_jnode_destroy(node); */ + + kmem_cache_free(_jnode_slab, node); +} + +/* + * This function is supplied as RCU callback. It actually frees jnode when + * last reference to it is gone. + */ +static void jnode_free_actor(struct rcu_head *head) +{ + jnode *node; + jnode_type jtype; + + node = container_of(head, jnode, rcu); + jtype = jnode_get_type(node); + + ON_DEBUG(jnode_done(node, jnode_get_tree(node))); + + switch (jtype) { + case JNODE_IO_HEAD: + case JNODE_BITMAP: + case JNODE_UNFORMATTED_BLOCK: + jfree(node); + break; + case JNODE_FORMATTED_BLOCK: + zfree(JZNODE(node)); + break; + case JNODE_INODE: + default: + wrong_return_value("nikita-3197", "Wrong jnode type"); + } +} + +/* + * Free a jnode. Post a callback to be executed later through RCU when all + * references to @node are released. + */ +static inline void jnode_free(jnode * node, jnode_type jtype) +{ + if (jtype != JNODE_INODE) { + /*assert("nikita-3219", list_empty(&node->rcu.list)); */ + call_rcu(&node->rcu, jnode_free_actor); + } else + jnode_list_remove(node); +} + +/* allocate new unformatted jnode */ +static jnode *jnew_unformatted(void) +{ + jnode *jal; + + jal = jalloc(); + if (jal == NULL) + return NULL; + + jnode_init(jal, current_tree, JNODE_UNFORMATTED_BLOCK); + jal->key.j.mapping = NULL; + jal->key.j.index = (unsigned long)-1; + jal->key.j.objectid = 0; + return jal; +} + +/* look for jnode with given mapping and offset within hash table */ +jnode *jlookup(reiser4_tree * tree, oid_t objectid, unsigned long index) +{ + struct jnode_key jkey; + jnode *node; + + jkey.objectid = objectid; + jkey.index = index; + + /* + * hash table is _not_ protected by any lock during lookups. All we + * have to do is to disable preemption to keep RCU happy. + */ + + rcu_read_lock(); + node = j_hash_find(&tree->jhash_table, &jkey); + if (node != NULL) { + /* protect @node from recycling */ + jref(node); + assert("nikita-2955", jnode_invariant(node, 0, 0)); + node = jnode_rip_check(tree, node); + } + rcu_read_unlock(); + return node; +} + +/* per inode radix tree of jnodes is protected by tree's read write spin lock */ +static jnode *jfind_nolock(struct address_space *mapping, unsigned long index) +{ + assert("vs-1694", mapping->host != NULL); + + return radix_tree_lookup(jnode_tree_by_inode(mapping->host), index); +} + +jnode *jfind(struct address_space *mapping, unsigned long index) +{ + reiser4_tree *tree; + jnode *node; + + assert("vs-1694", mapping->host != NULL); + tree = reiser4_tree_by_inode(mapping->host); + + read_lock_tree(tree); + node = jfind_nolock(mapping, index); + if (node != NULL) + jref(node); + read_unlock_tree(tree); + return node; +} + +static void inode_attach_jnode(jnode * node) +{ + struct inode *inode; + reiser4_inode *info; + struct radix_tree_root *rtree; + + assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock)); + assert("zam-1043", node->key.j.mapping != NULL); + inode = node->key.j.mapping->host; + info = reiser4_inode_data(inode); + rtree = jnode_tree_by_reiser4_inode(info); + if (rtree->rnode == NULL) { + /* prevent inode from being pruned when it has jnodes attached + to it */ + spin_lock_irq(&inode->i_data.tree_lock); + inode->i_data.nrpages++; + spin_unlock_irq(&inode->i_data.tree_lock); + } + assert("zam-1049", equi(rtree->rnode != NULL, info->nr_jnodes != 0)); + check_me("zam-1045", + !radix_tree_insert(rtree, node->key.j.index, node)); + ON_DEBUG(info->nr_jnodes++); +} + +static void inode_detach_jnode(jnode * node) +{ + struct inode *inode; + reiser4_inode *info; + struct radix_tree_root *rtree; + + assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock)); + assert("zam-1044", node->key.j.mapping != NULL); + inode = node->key.j.mapping->host; + info = reiser4_inode_data(inode); + rtree = jnode_tree_by_reiser4_inode(info); + + assert("zam-1051", info->nr_jnodes != 0); + assert("zam-1052", rtree->rnode != NULL); + ON_DEBUG(info->nr_jnodes--); + + /* delete jnode from inode's radix tree of jnodes */ + check_me("zam-1046", radix_tree_delete(rtree, node->key.j.index)); + if (rtree->rnode == NULL) { + /* inode can be pruned now */ + spin_lock_irq(&inode->i_data.tree_lock); + inode->i_data.nrpages--; + spin_unlock_irq(&inode->i_data.tree_lock); + } +} + +/* put jnode into hash table (where they can be found by flush who does not know + mapping) and to inode's tree of jnodes (where they can be found (hopefully + faster) in places where mapping is known). Currently it is used by + fs/reiser4/plugin/item/extent_file_ops.c:index_extent_jnode when new jnode is + created */ +static void +hash_unformatted_jnode(jnode * node, struct address_space *mapping, + unsigned long index) +{ + j_hash_table *jtable; + + assert("vs-1446", jnode_is_unformatted(node)); + assert("vs-1442", node->key.j.mapping == 0); + assert("vs-1443", node->key.j.objectid == 0); + assert("vs-1444", node->key.j.index == (unsigned long)-1); + assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock)); + + node->key.j.mapping = mapping; + node->key.j.objectid = get_inode_oid(mapping->host); + node->key.j.index = index; + + jtable = &jnode_get_tree(node)->jhash_table; + + /* race with some other thread inserting jnode into the hash table is + * impossible, because we keep the page lock. */ + /* + * following assertion no longer holds because of RCU: it is possible + * jnode is in the hash table, but with JNODE_RIP bit set. + */ + /* assert("nikita-3211", j_hash_find(jtable, &node->key.j) == NULL); */ + j_hash_insert_rcu(jtable, node); + inode_attach_jnode(node); +} + +static void unhash_unformatted_node_nolock(jnode * node) +{ + assert("vs-1683", node->key.j.mapping != NULL); + assert("vs-1684", + node->key.j.objectid == + get_inode_oid(node->key.j.mapping->host)); + + /* remove jnode from hash-table */ + j_hash_remove_rcu(&node->tree->jhash_table, node); + inode_detach_jnode(node); + node->key.j.mapping = NULL; + node->key.j.index = (unsigned long)-1; + node->key.j.objectid = 0; + +} + +/* remove jnode from hash table and from inode's tree of jnodes. This is used in + reiser4_invalidatepage and in kill_hook_extent -> truncate_inode_jnodes -> + reiser4_uncapture_jnode */ +void unhash_unformatted_jnode(jnode * node) +{ + assert("vs-1445", jnode_is_unformatted(node)); + + write_lock_tree(node->tree); + unhash_unformatted_node_nolock(node); + write_unlock_tree(node->tree); +} + +/* + * search hash table for a jnode with given oid and index. If not found, + * allocate new jnode, insert it, and also insert into radix tree for the + * given inode/mapping. + */ +static jnode *find_get_jnode(reiser4_tree * tree, + struct address_space *mapping, + oid_t oid, unsigned long index) +{ + jnode *result; + jnode *shadow; + int preload; + + result = jnew_unformatted(); + + if (unlikely(result == NULL)) + return ERR_PTR(RETERR(-ENOMEM)); + + preload = radix_tree_preload(reiser4_ctx_gfp_mask_get()); + if (preload != 0) + return ERR_PTR(preload); + + write_lock_tree(tree); + shadow = jfind_nolock(mapping, index); + if (likely(shadow == NULL)) { + /* add new jnode to hash table and inode's radix tree of + * jnodes */ + jref(result); + hash_unformatted_jnode(result, mapping, index); + } else { + /* jnode is found in inode's radix tree of jnodes */ + jref(shadow); + jnode_free(result, JNODE_UNFORMATTED_BLOCK); + assert("vs-1498", shadow->key.j.mapping == mapping); + result = shadow; + } + write_unlock_tree(tree); + + assert("nikita-2955", + ergo(result != NULL, jnode_invariant(result, 0, 0))); + radix_tree_preload_end(); + return result; +} + +/* jget() (a la zget() but for unformatted nodes). Returns (and possibly + creates) jnode corresponding to page @pg. jnode is attached to page and + inserted into jnode hash-table. */ +static jnode *do_jget(reiser4_tree * tree, struct page *pg) +{ + /* + * There are two ways to create jnode: starting with pre-existing page + * and without page. + * + * When page already exists, jnode is created + * (jnode_of_page()->do_jget()) under page lock. This is done in + * ->writepage(), or when capturing anonymous page dirtied through + * mmap. + * + * Jnode without page is created by index_extent_jnode(). + * + */ + + jnode *result; + oid_t oid = get_inode_oid(pg->mapping->host); + + assert("umka-176", pg != NULL); + assert("nikita-2394", PageLocked(pg)); + + result = jprivate(pg); + if (likely(result != NULL)) + return jref(result); + + tree = reiser4_tree_by_page(pg); + + /* check hash-table first */ + result = jfind(pg->mapping, pg->index); + if (unlikely(result != NULL)) { + spin_lock_jnode(result); + jnode_attach_page(result, pg); + spin_unlock_jnode(result); + result->key.j.mapping = pg->mapping; + return result; + } + + /* since page is locked, jnode should be allocated with GFP_NOFS flag */ + reiser4_ctx_gfp_mask_force(GFP_NOFS); + result = find_get_jnode(tree, pg->mapping, oid, pg->index); + if (unlikely(IS_ERR(result))) + return result; + /* attach jnode to page */ + spin_lock_jnode(result); + jnode_attach_page(result, pg); + spin_unlock_jnode(result); + return result; +} + +/* + * return jnode for @pg, creating it if necessary. + */ +jnode *jnode_of_page(struct page *pg) +{ + jnode *result; + + assert("nikita-2394", PageLocked(pg)); + + result = do_jget(reiser4_tree_by_page(pg), pg); + + if (REISER4_DEBUG && !IS_ERR(result)) { + assert("nikita-3210", result == jprivate(pg)); + assert("nikita-2046", jnode_page(jprivate(pg)) == pg); + if (jnode_is_unformatted(jprivate(pg))) { + assert("nikita-2364", + jprivate(pg)->key.j.index == pg->index); + assert("nikita-2367", + jprivate(pg)->key.j.mapping == pg->mapping); + assert("nikita-2365", + jprivate(pg)->key.j.objectid == + get_inode_oid(pg->mapping->host)); + assert("vs-1200", + jprivate(pg)->key.j.objectid == + pg->mapping->host->i_ino); + assert("nikita-2356", + jnode_is_unformatted(jnode_by_page(pg))); + } + assert("nikita-2956", jnode_invariant(jprivate(pg), 0, 0)); + } + return result; +} + +/* attach page to jnode: set ->pg pointer in jnode, and ->private one in the + * page.*/ +void jnode_attach_page(jnode * node, struct page *pg) +{ + assert("nikita-2060", node != NULL); + assert("nikita-2061", pg != NULL); + + assert("nikita-2050", jprivate(pg) == 0ul); + assert("nikita-2393", !PagePrivate(pg)); + assert("vs-1741", node->pg == NULL); + + assert("nikita-2396", PageLocked(pg)); + assert_spin_locked(&(node->guard)); + + get_page(pg); + set_page_private(pg, (unsigned long)node); + node->pg = pg; + SetPagePrivate(pg); +} + +/* Dual to jnode_attach_page: break a binding between page and jnode */ +void page_clear_jnode(struct page *page, jnode * node) +{ + assert("nikita-2425", PageLocked(page)); + assert_spin_locked(&(node->guard)); + assert("nikita-2428", PagePrivate(page)); + + assert("nikita-3551", !PageWriteback(page)); + + JF_CLR(node, JNODE_PARSED); + set_page_private(page, 0ul); + ClearPagePrivate(page); + node->pg = NULL; + put_page(page); +} + +#if 0 +/* it is only used in one place to handle error */ +void +page_detach_jnode(struct page *page, struct address_space *mapping, + unsigned long index) +{ + assert("nikita-2395", page != NULL); + + lock_page(page); + if ((page->mapping == mapping) && (page->index == index) + && PagePrivate(page)) { + jnode *node; + + node = jprivate(page); + spin_lock_jnode(node); + page_clear_jnode(page, node); + spin_unlock_jnode(node); + } + unlock_page(page); +} +#endif /* 0 */ + +/* return @node page locked. + + Locking ordering requires that one first takes page lock and afterwards + spin lock on node attached to this page. Sometimes it is necessary to go in + the opposite direction. This is done through standard trylock-and-release + loop. +*/ +static struct page *jnode_lock_page(jnode * node) +{ + struct page *page; + + assert("nikita-2052", node != NULL); + assert("nikita-2401", LOCK_CNT_NIL(spin_locked_jnode)); + + while (1) { + + spin_lock_jnode(node); + page = jnode_page(node); + if (page == NULL) + break; + + /* no need to get_page( page ) here, because page cannot + be evicted from memory without detaching it from jnode and + this requires spin lock on jnode that we already hold. + */ + if (trylock_page(page)) { + /* We won a lock on jnode page, proceed. */ + break; + } + + /* Page is locked by someone else. */ + get_page(page); + spin_unlock_jnode(node); + wait_on_page_locked(page); + /* it is possible that page was detached from jnode and + returned to the free pool, or re-assigned while we were + waiting on locked bit. This will be rechecked on the next + loop iteration. + */ + put_page(page); + + /* try again */ + } + return page; +} + +/* + * is JNODE_PARSED bit is not set, call ->parse() method of jnode, to verify + * validness of jnode content. + */ +static inline int jparse(jnode * node) +{ + int result; + + assert("nikita-2466", node != NULL); + + spin_lock_jnode(node); + if (likely(!jnode_is_parsed(node))) { + result = jnode_ops(node)->parse(node); + if (likely(result == 0)) + JF_SET(node, JNODE_PARSED); + } else + result = 0; + spin_unlock_jnode(node); + return result; +} + +/* Lock a page attached to jnode, create and attach page to jnode if it had no + * one. */ +static struct page *jnode_get_page_locked(jnode * node, gfp_t gfp_flags) +{ + struct page *page; + + spin_lock_jnode(node); + page = jnode_page(node); + + if (page == NULL) { + spin_unlock_jnode(node); + page = find_or_create_page(jnode_get_mapping(node), + jnode_get_index(node), gfp_flags); + if (page == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + } else { + if (trylock_page(page)) { + spin_unlock_jnode(node); + return page; + } + get_page(page); + spin_unlock_jnode(node); + lock_page(page); + assert("nikita-3134", page->mapping == jnode_get_mapping(node)); + } + + spin_lock_jnode(node); + if (!jnode_page(node)) + jnode_attach_page(node, page); + spin_unlock_jnode(node); + + put_page(page); + assert("zam-894", jnode_page(node) == page); + return page; +} + +/* Start read operation for jnode's page if page is not up-to-date. */ +static int jnode_start_read(jnode * node, struct page *page) +{ + assert("zam-893", PageLocked(page)); + + if (PageUptodate(page)) { + unlock_page(page); + return 0; + } + return reiser4_page_io(page, node, READ, reiser4_ctx_gfp_mask_get()); +} + +#if REISER4_DEBUG +static void check_jload(jnode * node, struct page *page) +{ + if (jnode_is_znode(node)) { + znode *z = JZNODE(node); + + if (znode_is_any_locked(z)) { + assert("nikita-3253", + z->nr_items == + node_plugin_by_node(z)->num_of_items(z)); + kunmap(page); + } + assert("nikita-3565", znode_invariant(z)); + } +} +#else +#define check_jload(node, page) noop +#endif + +/* prefetch jnode to speed up next call to jload. Call this when you are going + * to call jload() shortly. This will bring appropriate portion of jnode into + * CPU cache. */ +void jload_prefetch(jnode * node) +{ + prefetchw(&node->x_count); +} + +/* load jnode's data into memory */ +int jload_gfp(jnode * node /* node to load */ , + gfp_t gfp_flags /* allocation flags */ , + int do_kmap/* true if page should be kmapped */) +{ + struct page *page; + int result = 0; + int parsed; + + assert("nikita-3010", reiser4_schedulable()); + + prefetchw(&node->pg); + + /* taking d-reference implies taking x-reference. */ + jref(node); + + /* + * acquiring d-reference to @jnode and check for JNODE_PARSED bit + * should be atomic, otherwise there is a race against + * reiser4_releasepage(). + */ + spin_lock(&(node->load)); + add_d_ref(node); + parsed = jnode_is_parsed(node); + spin_unlock(&(node->load)); + + if (unlikely(!parsed)) { + page = jnode_get_page_locked(node, gfp_flags); + if (unlikely(IS_ERR(page))) { + result = PTR_ERR(page); + goto failed; + } + + result = jnode_start_read(node, page); + if (unlikely(result != 0)) + goto failed; + + wait_on_page_locked(page); + if (unlikely(!PageUptodate(page))) { + result = RETERR(-EIO); + goto failed; + } + + if (do_kmap) + node->data = kmap(page); + + result = jparse(node); + if (unlikely(result != 0)) { + if (do_kmap) + kunmap(page); + goto failed; + } + check_jload(node, page); + } else { + page = jnode_page(node); + check_jload(node, page); + if (do_kmap) + node->data = kmap(page); + } + + if (!is_writeout_mode()) + /* We do not mark pages active if jload is called as a part of + * jnode_flush() or reiser4_write_logs(). Both jnode_flush() + * and write_logs() add no value to cached data, there is no + * sense to mark pages as active when they go to disk, it just + * confuses vm scanning routines because clean page could be + * moved out from inactive list as a result of this + * mark_page_accessed() call. */ + mark_page_accessed(page); + + return 0; + +failed: + jrelse_tail(node); + return result; + +} + +/* start asynchronous reading for given jnode's page. */ +int jstartio(jnode * node) +{ + struct page *page; + + page = jnode_get_page_locked(node, reiser4_ctx_gfp_mask_get()); + if (IS_ERR(page)) + return PTR_ERR(page); + + return jnode_start_read(node, page); +} + +/* Initialize a node by calling appropriate plugin instead of reading + * node from disk as in jload(). */ +int jinit_new(jnode * node, gfp_t gfp_flags) +{ + struct page *page; + int result; + + jref(node); + add_d_ref(node); + + page = jnode_get_page_locked(node, gfp_flags); + if (IS_ERR(page)) { + result = PTR_ERR(page); + goto failed; + } + + SetPageUptodate(page); + unlock_page(page); + + node->data = kmap(page); + + if (!jnode_is_parsed(node)) { + jnode_plugin *jplug = jnode_ops(node); + spin_lock_jnode(node); + result = jplug->init(node); + spin_unlock_jnode(node); + if (result) { + kunmap(page); + goto failed; + } + JF_SET(node, JNODE_PARSED); + } + + return 0; + +failed: + jrelse(node); + return result; +} + +/* release a reference to jnode acquired by jload(), decrement ->d_count */ +void jrelse_tail(jnode * node/* jnode to release references to */) +{ + assert("nikita-489", atomic_read(&node->d_count) > 0); + atomic_dec(&node->d_count); + /* release reference acquired in jload_gfp() or jinit_new() */ + jput(node); + if (jnode_is_unformatted(node) || jnode_is_znode(node)) + LOCK_CNT_DEC(d_refs); +} + +/* drop reference to node data. When last reference is dropped, data are + unloaded. */ +void jrelse(jnode * node/* jnode to release references to */) +{ + struct page *page; + + assert("nikita-487", node != NULL); + assert_spin_not_locked(&(node->guard)); + + page = jnode_page(node); + if (likely(page != NULL)) { + /* + * it is safe not to lock jnode here, because at this point + * @node->d_count is greater than zero (if jrelse() is used + * correctly, that is). JNODE_PARSED may be not set yet, if, + * for example, we got here as a result of error handling path + * in jload(). Anyway, page cannot be detached by + * reiser4_releasepage(). truncate will invalidate page + * regardless, but this should not be a problem. + */ + kunmap(page); + } + jrelse_tail(node); +} + +/* called from jput() to wait for io completion */ +static void jnode_finish_io(jnode * node) +{ + struct page *page; + + assert("nikita-2922", node != NULL); + + spin_lock_jnode(node); + page = jnode_page(node); + if (page != NULL) { + get_page(page); + spin_unlock_jnode(node); + wait_on_page_writeback(page); + put_page(page); + } else + spin_unlock_jnode(node); +} + +/* + * This is called by jput() when last reference to jnode is released. This is + * separate function, because we want fast path of jput() to be inline and, + * therefore, small. + */ +void jput_final(jnode * node) +{ + int r_i_p; + + /* A fast check for keeping node in cache. We always keep node in cache + * if its page is present and node was not marked for deletion */ + if (jnode_page(node) != NULL && !JF_ISSET(node, JNODE_HEARD_BANSHEE)) { + rcu_read_unlock(); + return; + } + r_i_p = !JF_TEST_AND_SET(node, JNODE_RIP); + /* + * if r_i_p is true, we were first to set JNODE_RIP on this node. In + * this case it is safe to access node after unlock. + */ + rcu_read_unlock(); + if (r_i_p) { + jnode_finish_io(node); + if (JF_ISSET(node, JNODE_HEARD_BANSHEE)) + /* node is removed from the tree. */ + jdelete(node); + else + jnode_try_drop(node); + } + /* if !r_i_p some other thread is already killing it */ +} + +int jwait_io(jnode * node, int rw) +{ + struct page *page; + int result; + + assert("zam-448", jnode_page(node) != NULL); + + page = jnode_page(node); + + result = 0; + if (rw == READ) { + wait_on_page_locked(page); + } else { + assert("nikita-2227", rw == WRITE); + wait_on_page_writeback(page); + } + if (PageError(page)) + result = RETERR(-EIO); + + return result; +} + +/* + * jnode types and plugins. + * + * jnode by itself is a "base type". There are several different jnode + * flavors, called "jnode types" (see jnode_type for a list). Sometimes code + * has to do different things based on jnode type. In the standard reiser4 way + * this is done by having jnode plugin (see fs/reiser4/plugin.h:jnode_plugin). + * + * Functions below deal with jnode types and define methods of jnode plugin. + * + */ + +/* set jnode type. This is done during jnode initialization. */ +static void jnode_set_type(jnode * node, jnode_type type) +{ + static unsigned long type_to_mask[] = { + [JNODE_UNFORMATTED_BLOCK] = 1, + [JNODE_FORMATTED_BLOCK] = 0, + [JNODE_BITMAP] = 2, + [JNODE_IO_HEAD] = 6, + [JNODE_INODE] = 4 + }; + + assert("zam-647", type < LAST_JNODE_TYPE); + assert("nikita-2815", !jnode_is_loaded(node)); + assert("nikita-3386", node->state == 0); + + node->state |= (type_to_mask[type] << JNODE_TYPE_1); +} + +/* ->init() method of jnode plugin for jnodes that don't require plugin + * specific initialization. */ +static int init_noinit(jnode * node UNUSED_ARG) +{ + return 0; +} + +/* ->parse() method of jnode plugin for jnodes that don't require plugin + * specific pasring. */ +static int parse_noparse(jnode * node UNUSED_ARG) +{ + return 0; +} + +/* ->mapping() method for unformatted jnode */ +struct address_space *mapping_jnode(const jnode * node) +{ + struct address_space *map; + + assert("nikita-2713", node != NULL); + + /* mapping is stored in jnode */ + + map = node->key.j.mapping; + assert("nikita-2714", map != NULL); + assert("nikita-2897", is_reiser4_inode(map->host)); + assert("nikita-2715", get_inode_oid(map->host) == node->key.j.objectid); + return map; +} + +/* ->index() method for unformatted jnodes */ +unsigned long index_jnode(const jnode * node) +{ + /* index is stored in jnode */ + return node->key.j.index; +} + +/* ->remove() method for unformatted jnodes */ +static inline void remove_jnode(jnode * node, reiser4_tree * tree) +{ + /* remove jnode from hash table and radix tree */ + if (node->key.j.mapping) + unhash_unformatted_node_nolock(node); +} + +/* ->mapping() method for znodes */ +static struct address_space *mapping_znode(const jnode * node) +{ + /* all znodes belong to fake inode */ + return reiser4_get_super_fake(jnode_get_tree(node)->super)->i_mapping; +} + +/* ->index() method for znodes */ +static unsigned long index_znode(const jnode * node) +{ + unsigned long addr; + assert("nikita-3317", (1 << znode_shift_order) < sizeof(znode)); + + /* index of znode is just its address (shifted) */ + addr = (unsigned long)node; + return (addr - PAGE_OFFSET) >> znode_shift_order; +} + +/* ->mapping() method for bitmap jnode */ +static struct address_space *mapping_bitmap(const jnode * node) +{ + /* all bitmap blocks belong to special bitmap inode */ + return get_super_private(jnode_get_tree(node)->super)->bitmap-> + i_mapping; +} + +/* ->index() method for jnodes that are indexed by address */ +static unsigned long index_is_address(const jnode * node) +{ + unsigned long ind; + + ind = (unsigned long)node; + return ind - PAGE_OFFSET; +} + +/* resolve race with jput */ +jnode *jnode_rip_sync(reiser4_tree *tree, jnode *node) +{ + /* + * This is used as part of RCU-based jnode handling. + * + * jlookup(), zlook(), zget(), and cbk_cache_scan_slots() have to work + * with unreferenced jnodes (ones with ->x_count == 0). Hash table is + * not protected during this, so concurrent thread may execute + * zget-set-HEARD_BANSHEE-zput, or somehow else cause jnode to be + * freed in jput_final(). To avoid such races, jput_final() sets + * JNODE_RIP on jnode (under tree lock). All places that work with + * unreferenced jnodes call this function. It checks for JNODE_RIP bit + * (first without taking tree lock), and if this bit is set, released + * reference acquired by the current thread and returns NULL. + * + * As a result, if jnode is being concurrently freed, NULL is returned + * and caller should pretend that jnode wasn't found in the first + * place. + * + * Otherwise it's safe to release "rcu-read-lock" and continue with + * jnode. + */ + if (unlikely(JF_ISSET(node, JNODE_RIP))) { + read_lock_tree(tree); + if (JF_ISSET(node, JNODE_RIP)) { + dec_x_ref(node); + node = NULL; + } + read_unlock_tree(tree); + } + return node; +} + +reiser4_key *jnode_build_key(const jnode * node, reiser4_key * key) +{ + struct inode *inode; + item_plugin *iplug; + loff_t off; + + assert("nikita-3092", node != NULL); + assert("nikita-3093", key != NULL); + assert("nikita-3094", jnode_is_unformatted(node)); + + off = ((loff_t) index_jnode(node)) << PAGE_SHIFT; + inode = mapping_jnode(node)->host; + + if (node->parent_item_id != 0) + iplug = item_plugin_by_id(node->parent_item_id); + else + iplug = NULL; + + if (iplug != NULL && iplug->f.key_by_offset) + iplug->f.key_by_offset(inode, off, key); + else { + file_plugin *fplug; + + fplug = inode_file_plugin(inode); + assert("zam-1007", fplug != NULL); + assert("zam-1008", fplug->key_by_inode != NULL); + + fplug->key_by_inode(inode, off, key); + } + + return key; +} + +/* ->parse() method for formatted nodes */ +static int parse_znode(jnode * node) +{ + return zparse(JZNODE(node)); +} + +/* ->delete() method for formatted nodes */ +static void delete_znode(jnode * node, reiser4_tree * tree) +{ + znode *z; + + assert_rw_write_locked(&(tree->tree_lock)); + assert("vs-898", JF_ISSET(node, JNODE_HEARD_BANSHEE)); + + z = JZNODE(node); + assert("vs-899", z->c_count == 0); + + /* delete znode from sibling list. */ + sibling_list_remove(z); + + znode_remove(z, tree); +} + +/* ->remove() method for formatted nodes */ +static int remove_znode(jnode * node, reiser4_tree * tree) +{ + znode *z; + + assert_rw_write_locked(&(tree->tree_lock)); + z = JZNODE(node); + + if (z->c_count == 0) { + /* detach znode from sibling list. */ + sibling_list_drop(z); + /* this is called with tree spin-lock held, so call + znode_remove() directly (rather than znode_lock_remove()). */ + znode_remove(z, tree); + return 0; + } + return RETERR(-EBUSY); +} + +/* ->init() method for formatted nodes */ +int init_znode(jnode * node) +{ + znode *z; + + z = JZNODE(node); + /* call node plugin to do actual initialization */ + z->nr_items = 0; + return z->nplug->init(z); +} + +/* ->clone() method for formatted nodes */ +static jnode *clone_formatted(jnode * node) +{ + znode *clone; + + assert("vs-1430", jnode_is_znode(node)); + clone = zalloc(reiser4_ctx_gfp_mask_get()); + if (clone == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + zinit(clone, NULL, current_tree); + jnode_set_block(ZJNODE(clone), jnode_get_block(node)); + /* ZJNODE(clone)->key.z is not initialized */ + clone->level = JZNODE(node)->level; + + return ZJNODE(clone); +} + +/* jplug->clone for unformatted nodes */ +static jnode *clone_unformatted(jnode * node) +{ + jnode *clone; + + assert("vs-1431", jnode_is_unformatted(node)); + clone = jalloc(); + if (clone == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + jnode_init(clone, current_tree, JNODE_UNFORMATTED_BLOCK); + jnode_set_block(clone, jnode_get_block(node)); + + return clone; + +} + +/* + * Setup jnode plugin methods for various jnode types. + */ +jnode_plugin jnode_plugins[LAST_JNODE_TYPE] = { + [JNODE_UNFORMATTED_BLOCK] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_UNFORMATTED_BLOCK, + .pops = NULL, + .label = "unformatted", + .desc = "unformatted node", + .linkage = {NULL, NULL} + }, + .init = init_noinit, + .parse = parse_noparse, + .mapping = mapping_jnode, + .index = index_jnode, + .clone = clone_unformatted + }, + [JNODE_FORMATTED_BLOCK] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_FORMATTED_BLOCK, + .pops = NULL, + .label = "formatted", + .desc = "formatted tree node", + .linkage = {NULL, NULL} + }, + .init = init_znode, + .parse = parse_znode, + .mapping = mapping_znode, + .index = index_znode, + .clone = clone_formatted + }, + [JNODE_BITMAP] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_BITMAP, + .pops = NULL, + .label = "bitmap", + .desc = "bitmap node", + .linkage = {NULL, NULL} + }, + .init = init_noinit, + .parse = parse_noparse, + .mapping = mapping_bitmap, + .index = index_is_address, + .clone = NULL + }, + [JNODE_IO_HEAD] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_IO_HEAD, + .pops = NULL, + .label = "io head", + .desc = "io head", + .linkage = {NULL, NULL} + }, + .init = init_noinit, + .parse = parse_noparse, + .mapping = mapping_bitmap, + .index = index_is_address, + .clone = NULL + }, + [JNODE_INODE] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_INODE, + .pops = NULL, + .label = "inode", + .desc = "inode's builtin jnode", + .linkage = {NULL, NULL} + }, + .init = NULL, + .parse = NULL, + .mapping = NULL, + .index = NULL, + .clone = NULL + } +}; + +/* + * jnode destruction. + * + * Thread may use a jnode after it acquired a reference to it. References are + * counted in ->x_count field. Reference protects jnode from being + * recycled. This is different from protecting jnode data (that are stored in + * jnode page) from being evicted from memory. Data are protected by jload() + * and released by jrelse(). + * + * If thread already possesses a reference to the jnode it can acquire another + * one through jref(). Initial reference is obtained (usually) by locating + * jnode in some indexing structure that depends on jnode type: formatted + * nodes are kept in global hash table, where they are indexed by block + * number, and also in the cbk cache. Unformatted jnodes are also kept in hash + * table, which is indexed by oid and offset within file, and in per-inode + * radix tree. + * + * Reference to jnode is released by jput(). If last reference is released, + * jput_final() is called. This function determines whether jnode has to be + * deleted (this happens when corresponding node is removed from the file + * system, jnode is marked with JNODE_HEARD_BANSHEE bit in this case), or it + * should be just "removed" (deleted from memory). + * + * Jnode destruction is signally delicate dance because of locking and RCU. + */ + +/* + * Returns true if jnode cannot be removed right now. This check is called + * under tree lock. If it returns true, jnode is irrevocably committed to be + * deleted/removed. + */ +static inline int jnode_is_busy(const jnode * node, jnode_type jtype) +{ + /* if other thread managed to acquire a reference to this jnode, don't + * free it. */ + if (atomic_read(&node->x_count) > 0) + return 1; + /* also, don't free znode that has children in memory */ + if (jtype == JNODE_FORMATTED_BLOCK && JZNODE(node)->c_count > 0) + return 1; + return 0; +} + +/* + * this is called as part of removing jnode. Based on jnode type, call + * corresponding function that removes jnode from indices and returns it back + * to the appropriate slab (through RCU). + */ +static inline void +jnode_remove(jnode * node, jnode_type jtype, reiser4_tree * tree) +{ + switch (jtype) { + case JNODE_UNFORMATTED_BLOCK: + remove_jnode(node, tree); + break; + case JNODE_IO_HEAD: + case JNODE_BITMAP: + break; + case JNODE_INODE: + break; + case JNODE_FORMATTED_BLOCK: + remove_znode(node, tree); + break; + default: + wrong_return_value("nikita-3196", "Wrong jnode type"); + } +} + +/* + * this is called as part of deleting jnode. Based on jnode type, call + * corresponding function that removes jnode from indices and returns it back + * to the appropriate slab (through RCU). + * + * This differs from jnode_remove() only for formatted nodes---for them + * sibling list handling is different for removal and deletion. + */ +static inline void +jnode_delete(jnode * node, jnode_type jtype, reiser4_tree * tree UNUSED_ARG) +{ + switch (jtype) { + case JNODE_UNFORMATTED_BLOCK: + remove_jnode(node, tree); + break; + case JNODE_IO_HEAD: + case JNODE_BITMAP: + break; + case JNODE_FORMATTED_BLOCK: + delete_znode(node, tree); + break; + case JNODE_INODE: + default: + wrong_return_value("nikita-3195", "Wrong jnode type"); + } +} + +#if REISER4_DEBUG +/* + * remove jnode from the debugging list of all jnodes hanging off super-block. + */ +void jnode_list_remove(jnode * node) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(jnode_get_tree(node)->super); + + spin_lock_irq(&sbinfo->all_guard); + assert("nikita-2422", !list_empty(&node->jnodes)); + list_del_init(&node->jnodes); + spin_unlock_irq(&sbinfo->all_guard); +} +#endif + +/* + * this is called by jput_final() to remove jnode when last reference to it is + * released. + */ +static int jnode_try_drop(jnode * node) +{ + int result; + reiser4_tree *tree; + jnode_type jtype; + + assert("nikita-2491", node != NULL); + assert("nikita-2583", JF_ISSET(node, JNODE_RIP)); + + tree = jnode_get_tree(node); + jtype = jnode_get_type(node); + + spin_lock_jnode(node); + write_lock_tree(tree); + /* + * if jnode has a page---leave it alone. Memory pressure will + * eventually kill page and jnode. + */ + if (jnode_page(node) != NULL) { + write_unlock_tree(tree); + spin_unlock_jnode(node); + JF_CLR(node, JNODE_RIP); + return RETERR(-EBUSY); + } + + /* re-check ->x_count under tree lock. */ + result = jnode_is_busy(node, jtype); + if (result == 0) { + assert("nikita-2582", !JF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert("jmacd-511/b", atomic_read(&node->d_count) == 0); + + spin_unlock_jnode(node); + /* no page and no references---despatch him. */ + jnode_remove(node, jtype, tree); + write_unlock_tree(tree); + jnode_free(node, jtype); + } else { + /* busy check failed: reference was acquired by concurrent + * thread. */ + write_unlock_tree(tree); + spin_unlock_jnode(node); + JF_CLR(node, JNODE_RIP); + } + return result; +} + +/* jdelete() -- Delete jnode from the tree and file system */ +static int jdelete(jnode * node/* jnode to finish with */) +{ + struct page *page; + int result; + reiser4_tree *tree; + jnode_type jtype; + + assert("nikita-467", node != NULL); + assert("nikita-2531", JF_ISSET(node, JNODE_RIP)); + + jtype = jnode_get_type(node); + + page = jnode_lock_page(node); + assert_spin_locked(&(node->guard)); + + tree = jnode_get_tree(node); + + write_lock_tree(tree); + /* re-check ->x_count under tree lock. */ + result = jnode_is_busy(node, jtype); + if (likely(!result)) { + assert("nikita-2123", JF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert("jmacd-511", atomic_read(&node->d_count) == 0); + + /* detach page */ + if (page != NULL) { + /* + * FIXME this is racy against jnode_extent_write(). + */ + page_clear_jnode(page, node); + } + spin_unlock_jnode(node); + /* goodbye */ + jnode_delete(node, jtype, tree); + write_unlock_tree(tree); + jnode_free(node, jtype); + /* @node is no longer valid pointer */ + if (page != NULL) + reiser4_drop_page(page); + } else { + /* busy check failed: reference was acquired by concurrent + * thread. */ + JF_CLR(node, JNODE_RIP); + write_unlock_tree(tree); + spin_unlock_jnode(node); + if (page != NULL) + unlock_page(page); + } + return result; +} + +/* drop jnode on the floor. + + Return value: + + -EBUSY: failed to drop jnode, because there are still references to it + + 0: successfully dropped jnode + +*/ +static int jdrop_in_tree(jnode * node, reiser4_tree * tree) +{ + struct page *page; + jnode_type jtype; + int result; + + assert("zam-602", node != NULL); + assert_rw_not_read_locked(&(tree->tree_lock)); + assert_rw_not_write_locked(&(tree->tree_lock)); + assert("nikita-2403", !JF_ISSET(node, JNODE_HEARD_BANSHEE)); + + jtype = jnode_get_type(node); + + page = jnode_lock_page(node); + assert_spin_locked(&(node->guard)); + + write_lock_tree(tree); + + /* re-check ->x_count under tree lock. */ + result = jnode_is_busy(node, jtype); + if (!result) { + assert("nikita-2488", page == jnode_page(node)); + assert("nikita-2533", atomic_read(&node->d_count) == 0); + if (page != NULL) { + assert("nikita-2126", !PageDirty(page)); + assert("nikita-2127", PageUptodate(page)); + assert("nikita-2181", PageLocked(page)); + page_clear_jnode(page, node); + } + spin_unlock_jnode(node); + jnode_remove(node, jtype, tree); + write_unlock_tree(tree); + jnode_free(node, jtype); + if (page != NULL) + reiser4_drop_page(page); + } else { + /* busy check failed: reference was acquired by concurrent + * thread. */ + JF_CLR(node, JNODE_RIP); + write_unlock_tree(tree); + spin_unlock_jnode(node); + if (page != NULL) + unlock_page(page); + } + return result; +} + +/* This function frees jnode "if possible". In particular, [dcx]_count has to + be 0 (where applicable). */ +void jdrop(jnode * node) +{ + jdrop_in_tree(node, jnode_get_tree(node)); +} + +/* IO head jnode implementation; The io heads are simple j-nodes with limited + functionality (these j-nodes are not in any hash table) just for reading + from and writing to disk. */ + +jnode *reiser4_alloc_io_head(const reiser4_block_nr * block) +{ + jnode *jal = jalloc(); + + if (jal != NULL) { + jnode_init(jal, current_tree, JNODE_IO_HEAD); + jnode_set_block(jal, block); + } + + jref(jal); + + return jal; +} + +void reiser4_drop_io_head(jnode * node) +{ + assert("zam-648", jnode_get_type(node) == JNODE_IO_HEAD); + + jput(node); + jdrop(node); +} + +/* protect keep jnode data from reiser4_releasepage() */ +void pin_jnode_data(jnode * node) +{ + assert("zam-671", jnode_page(node) != NULL); + get_page(jnode_page(node)); +} + +/* make jnode data free-able again */ +void unpin_jnode_data(jnode * node) +{ + assert("zam-672", jnode_page(node) != NULL); + put_page(jnode_page(node)); +} + +struct address_space *jnode_get_mapping(const jnode * node) +{ + return jnode_ops(node)->mapping(node); +} + +#if REISER4_DEBUG +/* debugging aid: jnode invariant */ +int jnode_invariant_f(const jnode * node, char const **msg) +{ +#define _ergo(ant, con) \ + ((*msg) = "{" #ant "} ergo {" #con "}", ergo((ant), (con))) +#define _check(exp) ((*msg) = #exp, (exp)) + + return _check(node != NULL) && + /* [jnode-queued] */ + /* only relocated node can be queued, except that when znode + * is being deleted, its JNODE_RELOC bit is cleared */ + _ergo(JF_ISSET(node, JNODE_FLUSH_QUEUED), + JF_ISSET(node, JNODE_RELOC) || + JF_ISSET(node, JNODE_HEARD_BANSHEE)) && + _check(node->jnodes.prev != NULL) && + _check(node->jnodes.next != NULL) && + /* [jnode-dirty] invariant */ + /* dirty inode is part of atom */ + _ergo(JF_ISSET(node, JNODE_DIRTY), node->atom != NULL) && + /* [jnode-oid] invariant */ + /* for unformatted node ->objectid and ->mapping fields are + * consistent */ + _ergo(jnode_is_unformatted(node) && node->key.j.mapping != NULL, + node->key.j.objectid == + get_inode_oid(node->key.j.mapping->host)) && + /* [jnode-atom-valid] invariant */ + /* node atom has valid state */ + _ergo(node->atom != NULL, node->atom->stage != ASTAGE_INVALID) && + /* [jnode-page-binding] invariant */ + /* if node points to page, it points back to node */ + _ergo(node->pg != NULL, jprivate(node->pg) == node) && + /* [jnode-refs] invariant */ + /* only referenced jnode can be loaded */ + _check(atomic_read(&node->x_count) >= atomic_read(&node->d_count)); + +} + +static const char *jnode_type_name(jnode_type type) +{ + switch (type) { + case JNODE_UNFORMATTED_BLOCK: + return "unformatted"; + case JNODE_FORMATTED_BLOCK: + return "formatted"; + case JNODE_BITMAP: + return "bitmap"; + case JNODE_IO_HEAD: + return "io head"; + case JNODE_INODE: + return "inode"; + case LAST_JNODE_TYPE: + return "last"; + default:{ + static char unknown[30]; + + sprintf(unknown, "unknown %i", type); + return unknown; + } + } +} + +#define jnode_state_name(node, flag) \ + (JF_ISSET((node), (flag)) ? ((#flag "|")+6) : "") + +/* debugging aid: output human readable information about @node */ +static void info_jnode(const char *prefix /* prefix to print */ , + const jnode * node/* node to print */) +{ + assert("umka-068", prefix != NULL); + + if (node == NULL) { + printk("%s: null\n", prefix); + return; + } + + printk + ("%s: %p: state: %lx: [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s], level: %i," + " block: %s, d_count: %d, x_count: %d, " + "pg: %p, atom: %p, lock: %i:%i, type: %s, ", prefix, node, + node->state, + jnode_state_name(node, JNODE_PARSED), + jnode_state_name(node, JNODE_HEARD_BANSHEE), + jnode_state_name(node, JNODE_LEFT_CONNECTED), + jnode_state_name(node, JNODE_RIGHT_CONNECTED), + jnode_state_name(node, JNODE_ORPHAN), + jnode_state_name(node, JNODE_CREATED), + jnode_state_name(node, JNODE_RELOC), + jnode_state_name(node, JNODE_OVRWR), + jnode_state_name(node, JNODE_DIRTY), + jnode_state_name(node, JNODE_IS_DYING), + jnode_state_name(node, JNODE_RIP), + jnode_state_name(node, JNODE_MISSED_IN_CAPTURE), + jnode_state_name(node, JNODE_WRITEBACK), + jnode_state_name(node, JNODE_DKSET), + jnode_state_name(node, JNODE_REPACK), + jnode_state_name(node, JNODE_CLUSTER_PAGE), + jnode_get_level(node), sprint_address(jnode_get_block(node)), + atomic_read(&node->d_count), atomic_read(&node->x_count), + jnode_page(node), node->atom, 0, 0, + jnode_type_name(jnode_get_type(node))); + if (jnode_is_unformatted(node)) { + printk("inode: %llu, index: %lu, ", + node->key.j.objectid, node->key.j.index); + } +} + +/* debugging aid: check znode invariant and panic if it doesn't hold */ +static int jnode_invariant(jnode * node, int tlocked, int jlocked) +{ + char const *failed_msg; + int result; + reiser4_tree *tree; + + tree = jnode_get_tree(node); + + assert("umka-063312", node != NULL); + assert("umka-064321", tree != NULL); + + if (!jlocked && !tlocked) + spin_lock_jnode((jnode *) node); + if (!tlocked) + read_lock_tree(jnode_get_tree(node)); + result = jnode_invariant_f(node, &failed_msg); + if (!result) { + info_jnode("corrupted node", node); + warning("jmacd-555", "Condition %s failed", failed_msg); + } + if (!tlocked) + read_unlock_tree(jnode_get_tree(node)); + if (!jlocked && !tlocked) + spin_unlock_jnode((jnode *) node); + return result; +} + +#endif /* REISER4_DEBUG */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/jnode.h b/fs/reiser4/jnode.h new file mode 100644 index 000000000000..9896f5f0ecaa --- /dev/null +++ b/fs/reiser4/jnode.h @@ -0,0 +1,704 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declaration of jnode. See jnode.c for details. */ + +#ifndef __JNODE_H__ +#define __JNODE_H__ + +#include "forward.h" +#include "type_safe_hash.h" +#include "txnmgr.h" +#include "key.h" +#include "debug.h" +#include "dformat.h" +#include "page_cache.h" +#include "context.h" + +#include "plugin/plugin.h" + +#include +#include +#include +#include +#include +#include +#include + +/* declare hash table of jnodes (jnodes proper, that is, unformatted + nodes) */ +TYPE_SAFE_HASH_DECLARE(j, jnode); + +/* declare hash table of znodes */ +TYPE_SAFE_HASH_DECLARE(z, znode); + +struct jnode_key { + __u64 objectid; + unsigned long index; + struct address_space *mapping; +}; + +/* + Jnode is the "base class" of other nodes in reiser4. It is also happens to + be exactly the node we use for unformatted tree nodes. + + Jnode provides following basic functionality: + + . reference counting and indexing. + + . integration with page cache. Jnode has ->pg reference to which page can + be attached. + + . interface to transaction manager. It is jnode that is kept in transaction + manager lists, attached to atoms, etc. (NOTE-NIKITA one may argue that this + means, there should be special type of jnode for inode.) + + Locking: + + Spin lock: the following fields are protected by the per-jnode spin lock: + + ->state + ->atom + ->capture_link + + Following fields are protected by the global tree lock: + + ->link + ->key.z (content of ->key.z is only changed in znode_rehash()) + ->key.j + + Atomic counters + + ->x_count + ->d_count + + ->pg, and ->data are protected by spin lock for unused jnode and are + immutable for used jnode (one for which fs/reiser4/vfs_ops.c:releasable() + is false). + + ->tree is immutable after creation + + Unclear + + ->blocknr: should be under jnode spin-lock, but current interface is based + on passing of block address. + + If you ever need to spin lock two nodes at once, do this in "natural" + memory order: lock znode with lower address first. (See lock_two_nodes().) + + Invariants involving this data-type: + + [jnode-dirty] + [jnode-refs] + [jnode-oid] + [jnode-queued] + [jnode-atom-valid] + [jnode-page-binding] +*/ + +struct jnode { +#if REISER4_DEBUG +#define JMAGIC 0x52654973 /* "ReIs" */ + int magic; +#endif + /* FIRST CACHE LINE (16 bytes): data used by jload */ + + /* jnode's state: bitwise flags from the reiser4_jnode_state enum. */ + /* 0 */ unsigned long state; + + /* lock, protecting jnode's fields. */ + /* 4 */ spinlock_t load; + + /* counter of references to jnode itself. Increased on jref(). + Decreased on jput(). + */ + /* 8 */ atomic_t x_count; + + /* counter of references to jnode's data. Pin data page(s) in + memory while this is greater than 0. Increased on jload(). + Decreased on jrelse(). + */ + /* 12 */ atomic_t d_count; + + /* SECOND CACHE LINE: data used by hash table lookups */ + + /* 16 */ union { + /* znodes are hashed by block number */ + reiser4_block_nr z; + /* unformatted nodes are hashed by mapping plus offset */ + struct jnode_key j; + } key; + + /* THIRD CACHE LINE */ + + /* 32 */ union { + /* pointers to maintain hash-table */ + z_hash_link z; + j_hash_link j; + } link; + + /* pointer to jnode page. */ + /* 36 */ struct page *pg; + /* pointer to node itself. This is page_address(node->pg) when page is + attached to the jnode + */ + /* 40 */ void *data; + + /* 44 */ reiser4_tree *tree; + + /* FOURTH CACHE LINE: atom related fields */ + + /* 48 */ spinlock_t guard; + + /* atom the block is in, if any */ + /* 52 */ txn_atom *atom; + + /* capture list */ + /* 56 */ struct list_head capture_link; + + /* FIFTH CACHE LINE */ + + /* 64 */ struct rcu_head rcu; + /* crosses cache line */ + + /* SIXTH CACHE LINE */ + + /* the real blocknr (where io is going to/from) */ + /* 80 */ reiser4_block_nr blocknr; + /* Parent item type, unformatted and CRC need it for + * offset => key conversion. */ + /* NOTE: this parent_item_id looks like jnode type. */ + /* 88 */ reiser4_plugin_id parent_item_id; + /* 92 */ +#if REISER4_DEBUG + /* list of all jnodes for debugging purposes. */ + struct list_head jnodes; + /* how many times this jnode was written in one transaction */ + int written; + /* this indicates which atom's list the jnode is on */ + atom_list list; +#endif +} __attribute__ ((aligned(16))); + +/* + * jnode types. Enumeration of existing jnode types. + */ +typedef enum { + JNODE_UNFORMATTED_BLOCK, /* unformatted block */ + JNODE_FORMATTED_BLOCK, /* formatted block, znode */ + JNODE_BITMAP, /* bitmap */ + JNODE_IO_HEAD, /* jnode representing a block in the + * wandering log */ + JNODE_INODE, /* jnode embedded into inode */ + LAST_JNODE_TYPE +} jnode_type; + +/* jnode states */ +typedef enum { + /* jnode's page is loaded and data checked */ + JNODE_PARSED = 0, + /* node was deleted, not all locks on it were released. This + node is empty and is going to be removed from the tree + shortly. */ + JNODE_HEARD_BANSHEE = 1, + /* left sibling pointer is valid */ + JNODE_LEFT_CONNECTED = 2, + /* right sibling pointer is valid */ + JNODE_RIGHT_CONNECTED = 3, + + /* znode was just created and doesn't yet have a pointer from + its parent */ + JNODE_ORPHAN = 4, + + /* this node was created by its transaction and has not been assigned + a block address. */ + JNODE_CREATED = 5, + + /* this node is currently relocated */ + JNODE_RELOC = 6, + /* this node is currently wandered */ + JNODE_OVRWR = 7, + + /* this znode has been modified */ + JNODE_DIRTY = 8, + + /* znode lock is being invalidated */ + JNODE_IS_DYING = 9, + + /* THIS PLACE IS INTENTIONALLY LEFT BLANK */ + + /* jnode is queued for flushing. */ + JNODE_FLUSH_QUEUED = 12, + + /* In the following bits jnode type is encoded. */ + JNODE_TYPE_1 = 13, + JNODE_TYPE_2 = 14, + JNODE_TYPE_3 = 15, + + /* jnode is being destroyed */ + JNODE_RIP = 16, + + /* znode was not captured during locking (it might so be because + ->level != LEAF_LEVEL and lock_mode == READ_LOCK) */ + JNODE_MISSED_IN_CAPTURE = 17, + + /* write is in progress */ + JNODE_WRITEBACK = 18, + + /* unused flag */ + JNODE_NEW = 19, + + /* delimiting keys are already set for this znode. */ + JNODE_DKSET = 20, + + /* when this bit is set page and jnode can not be disconnected */ + JNODE_WRITE_PREPARED = 21, + + JNODE_CLUSTER_PAGE = 22, + /* Jnode is marked for repacking, that means the reiser4 flush and the + * block allocator should process this node special way */ + JNODE_REPACK = 23, + /* node should be converted by flush in squalloc phase */ + JNODE_CONVERTIBLE = 24, + /* + * When jnode is dirtied for the first time in given transaction, + * do_jnode_make_dirty() checks whether this jnode can possible became + * member of overwrite set. If so, this bit is set, and one block is + * reserved in the ->flush_reserved space of atom. + * + * This block is "used" (and JNODE_FLUSH_RESERVED bit is cleared) when + * + * (1) flush decides that we want this block to go into relocate + * set after all. + * + * (2) wandering log is allocated (by log writer) + * + * (3) extent is allocated + * + */ + JNODE_FLUSH_RESERVED = 29 +} reiser4_jnode_state; + +/* Macros for accessing the jnode state. */ + +static inline void JF_CLR(jnode * j, int f) +{ + assert("unknown-1", j->magic == JMAGIC); + clear_bit(f, &j->state); +} +static inline int JF_ISSET(const jnode * j, int f) +{ + assert("unknown-2", j->magic == JMAGIC); + return test_bit(f, &((jnode *) j)->state); +} +static inline void JF_SET(jnode * j, int f) +{ + assert("unknown-3", j->magic == JMAGIC); + set_bit(f, &j->state); +} + +static inline int JF_TEST_AND_SET(jnode * j, int f) +{ + assert("unknown-4", j->magic == JMAGIC); + return test_and_set_bit(f, &j->state); +} + +static inline void spin_lock_jnode(jnode *node) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(rw_locked_tree) && + LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_LT(spin_locked_jnode, 2))); + + spin_lock(&(node->guard)); + + LOCK_CNT_INC(spin_locked_jnode); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_unlock_jnode(jnode *node) +{ + assert_spin_locked(&(node->guard)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_jnode)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_jnode); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(node->guard)); +} + +static inline int jnode_is_in_deleteset(const jnode * node) +{ + return JF_ISSET(node, JNODE_RELOC); +} + +extern int init_jnodes(void); +extern void done_jnodes(void); + +/* Jnode routines */ +extern jnode *jalloc(void); +extern void jfree(jnode * node) NONNULL; +extern jnode *jclone(jnode *); +extern jnode *jlookup(reiser4_tree * tree, + oid_t objectid, unsigned long ind) NONNULL; +extern jnode *jfind(struct address_space *, unsigned long index) NONNULL; +extern jnode *jnode_by_page(struct page *pg) NONNULL; +extern jnode *jnode_of_page(struct page *pg) NONNULL; +void jnode_attach_page(jnode * node, struct page *pg); + +void unhash_unformatted_jnode(jnode *); +extern jnode *page_next_jnode(jnode * node) NONNULL; +extern void jnode_init(jnode * node, reiser4_tree * tree, jnode_type) NONNULL; +extern void jnode_make_dirty(jnode * node) NONNULL; +extern void jnode_make_clean(jnode * node) NONNULL; +extern void jnode_make_wander_nolock(jnode * node) NONNULL; +extern void jnode_make_wander(jnode *) NONNULL; +extern void znode_make_reloc(znode * , flush_queue_t *) NONNULL; +extern void unformatted_make_reloc(jnode *, flush_queue_t *) NONNULL; +extern struct address_space *jnode_get_mapping(const jnode * node) NONNULL; + +/** + * jnode_get_block + * @node: jnode to query + * + */ +static inline const reiser4_block_nr *jnode_get_block(const jnode *node) +{ + assert("nikita-528", node != NULL); + + return &node->blocknr; +} + +/** + * jnode_set_block + * @node: jnode to update + * @blocknr: new block nr + */ +static inline void jnode_set_block(jnode *node, const reiser4_block_nr *blocknr) +{ + assert("nikita-2020", node != NULL); + assert("umka-055", blocknr != NULL); + node->blocknr = *blocknr; +} + + +/* block number for IO. Usually this is the same as jnode_get_block(), unless + * jnode was emergency flushed---then block number chosen by eflush is + * used. */ +static inline const reiser4_block_nr *jnode_get_io_block(jnode * node) +{ + assert("nikita-2768", node != NULL); + assert_spin_locked(&(node->guard)); + + return jnode_get_block(node); +} + +/* Jnode flush interface. */ +extern reiser4_blocknr_hint *reiser4_pos_hint(flush_pos_t *pos); +extern flush_queue_t *reiser4_pos_fq(flush_pos_t *pos); + +/* FIXME-VS: these are used in plugin/item/extent.c */ + +/* does extent_get_block have to be called */ +#define jnode_mapped(node) JF_ISSET (node, JNODE_MAPPED) +#define jnode_set_mapped(node) JF_SET (node, JNODE_MAPPED) + +/* the node should be converted during flush squalloc phase */ +#define jnode_convertible(node) JF_ISSET (node, JNODE_CONVERTIBLE) +#define jnode_set_convertible(node) JF_SET (node, JNODE_CONVERTIBLE) + +/* Macros to convert from jnode to znode, znode to jnode. These are macros + because C doesn't allow overloading of const prototypes. */ +#define ZJNODE(x) (&(x)->zjnode) +#define JZNODE(x) \ +({ \ + typeof(x) __tmp_x; \ + \ + __tmp_x = (x); \ + assert("jmacd-1300", jnode_is_znode(__tmp_x)); \ + (znode*) __tmp_x; \ +}) + +extern int jnodes_tree_init(reiser4_tree * tree); +extern int jnodes_tree_done(reiser4_tree * tree); + +#if REISER4_DEBUG + +extern int znode_is_any_locked(const znode * node); +extern void jnode_list_remove(jnode * node); + +#else + +#define jnode_list_remove(node) noop + +#endif + +int znode_is_root(const znode * node) NONNULL; + +/* bump reference counter on @node */ +static inline void add_x_ref(jnode * node/* node to increase x_count of */) +{ + assert("nikita-1911", node != NULL); + + atomic_inc(&node->x_count); + LOCK_CNT_INC(x_refs); +} + +static inline void dec_x_ref(jnode * node) +{ + assert("nikita-3215", node != NULL); + assert("nikita-3216", atomic_read(&node->x_count) > 0); + + atomic_dec(&node->x_count); + assert("nikita-3217", LOCK_CNT_GTZ(x_refs)); + LOCK_CNT_DEC(x_refs); +} + +/* jref() - increase counter of references to jnode/znode (x_count) */ +static inline jnode *jref(jnode * node) +{ + assert("jmacd-508", (node != NULL) && !IS_ERR(node)); + add_x_ref(node); + return node; +} + +/* get the page of jnode */ +static inline struct page *jnode_page(const jnode * node) +{ + return node->pg; +} + +/* return pointer to jnode data */ +static inline char *jdata(const jnode * node) +{ + assert("nikita-1415", node != NULL); + assert("nikita-3198", jnode_page(node) != NULL); + return node->data; +} + +static inline int jnode_is_loaded(const jnode * node) +{ + assert("zam-506", node != NULL); + return atomic_read(&node->d_count) > 0; +} + +extern void page_clear_jnode(struct page *page, jnode * node) NONNULL; + +static inline void jnode_set_reloc(jnode * node) +{ + assert("nikita-2431", node != NULL); + assert("nikita-2432", !JF_ISSET(node, JNODE_OVRWR)); + JF_SET(node, JNODE_RELOC); +} + +/* jload/jwrite/junload give a bread/bwrite/brelse functionality for jnodes */ + +extern int jload_gfp(jnode *, gfp_t, int do_kmap) NONNULL; + +static inline int jload(jnode *node) +{ + return jload_gfp(node, reiser4_ctx_gfp_mask_get(), 1); +} + +extern int jinit_new(jnode *, gfp_t) NONNULL; +extern int jstartio(jnode *) NONNULL; + +extern void jdrop(jnode *) NONNULL; +extern int jwait_io(jnode *, int rw) NONNULL; + +void jload_prefetch(jnode *); + +extern jnode *reiser4_alloc_io_head(const reiser4_block_nr * block) NONNULL; +extern void reiser4_drop_io_head(jnode * node) NONNULL; + +static inline reiser4_tree *jnode_get_tree(const jnode * node) +{ + assert("nikita-2691", node != NULL); + return node->tree; +} + +extern void pin_jnode_data(jnode *); +extern void unpin_jnode_data(jnode *); + +static inline jnode_type jnode_get_type(const jnode * node) +{ + static const unsigned long state_mask = + (1 << JNODE_TYPE_1) | (1 << JNODE_TYPE_2) | (1 << JNODE_TYPE_3); + + static jnode_type mask_to_type[] = { + /* JNODE_TYPE_3 : JNODE_TYPE_2 : JNODE_TYPE_1 */ + + /* 000 */ + [0] = JNODE_FORMATTED_BLOCK, + /* 001 */ + [1] = JNODE_UNFORMATTED_BLOCK, + /* 010 */ + [2] = JNODE_BITMAP, + /* 011 */ + [3] = LAST_JNODE_TYPE, /*invalid */ + /* 100 */ + [4] = JNODE_INODE, + /* 101 */ + [5] = LAST_JNODE_TYPE, + /* 110 */ + [6] = JNODE_IO_HEAD, + /* 111 */ + [7] = LAST_JNODE_TYPE, /* invalid */ + }; + + return mask_to_type[(node->state & state_mask) >> JNODE_TYPE_1]; +} + +/* returns true if node is a znode */ +static inline int jnode_is_znode(const jnode * node) +{ + return jnode_get_type(node) == JNODE_FORMATTED_BLOCK; +} + +static inline int jnode_is_flushprepped(jnode * node) +{ + assert("jmacd-78212", node != NULL); + assert_spin_locked(&(node->guard)); + return !JF_ISSET(node, JNODE_DIRTY) || JF_ISSET(node, JNODE_RELOC) || + JF_ISSET(node, JNODE_OVRWR); +} + +/* Return true if @node has already been processed by the squeeze and allocate + process. This implies the block address has been finalized for the + duration of this atom (or it is clean and will remain in place). If this + returns true you may use the block number as a hint. */ +static inline int jnode_check_flushprepped(jnode * node) +{ + int result; + + /* It must be clean or relocated or wandered. New allocations are set + * to relocate. */ + spin_lock_jnode(node); + result = jnode_is_flushprepped(node); + spin_unlock_jnode(node); + return result; +} + +/* returns true if node is unformatted */ +static inline int jnode_is_unformatted(const jnode * node) +{ + assert("jmacd-0123", node != NULL); + return jnode_get_type(node) == JNODE_UNFORMATTED_BLOCK; +} + +/* returns true if node represents a cluster cache page */ +static inline int jnode_is_cluster_page(const jnode * node) +{ + assert("edward-50", node != NULL); + return (JF_ISSET(node, JNODE_CLUSTER_PAGE)); +} + +/* returns true is node is builtin inode's jnode */ +static inline int jnode_is_inode(const jnode * node) +{ + assert("vs-1240", node != NULL); + return jnode_get_type(node) == JNODE_INODE; +} + +static inline jnode_plugin *jnode_ops_of(const jnode_type type) +{ + assert("nikita-2367", type < LAST_JNODE_TYPE); + return jnode_plugin_by_id((reiser4_plugin_id) type); +} + +static inline jnode_plugin *jnode_ops(const jnode * node) +{ + assert("nikita-2366", node != NULL); + + return jnode_ops_of(jnode_get_type(node)); +} + +/* Get the index of a block. */ +static inline unsigned long jnode_get_index(jnode * node) +{ + return jnode_ops(node)->index(node); +} + +/* return true if "node" is the root */ +static inline int jnode_is_root(const jnode * node) +{ + return jnode_is_znode(node) && znode_is_root(JZNODE(node)); +} + +extern struct address_space *mapping_jnode(const jnode * node); +extern unsigned long index_jnode(const jnode * node); + +static inline void jput(jnode * node); +extern void jput_final(jnode * node); + +/* bump data counter on @node */ +static inline void add_d_ref(jnode * node/* node to increase d_count of */) +{ + assert("nikita-1962", node != NULL); + + atomic_inc(&node->d_count); + if (jnode_is_unformatted(node) || jnode_is_znode(node)) + LOCK_CNT_INC(d_refs); +} + +/* jput() - decrement x_count reference counter on znode. + + Count may drop to 0, jnode stays in cache until memory pressure causes the + eviction of its page. The c_count variable also ensures that children are + pressured out of memory before the parent. The jnode remains hashed as + long as the VM allows its page to stay in memory. +*/ +static inline void jput(jnode * node) +{ + assert("jmacd-509", node != NULL); + assert("jmacd-510", atomic_read(&node->x_count) > 0); + assert("zam-926", reiser4_schedulable()); + LOCK_CNT_DEC(x_refs); + + rcu_read_lock(); + /* + * we don't need any kind of lock here--jput_final() uses RCU. + */ + if (unlikely(atomic_dec_and_test(&node->x_count))) + jput_final(node); + else + rcu_read_unlock(); + assert("nikita-3473", reiser4_schedulable()); +} + +extern void jrelse(jnode * node); +extern void jrelse_tail(jnode * node); + +extern jnode *jnode_rip_sync(reiser4_tree * t, jnode * node); + +/* resolve race with jput */ +static inline jnode *jnode_rip_check(reiser4_tree * tree, jnode * node) +{ + if (unlikely(JF_ISSET(node, JNODE_RIP))) + node = jnode_rip_sync(tree, node); + return node; +} + +extern reiser4_key *jnode_build_key(const jnode *node, reiser4_key * key); + +#if REISER4_DEBUG +extern int jnode_invariant_f(const jnode *node, char const **msg); +#endif + +extern jnode_plugin jnode_plugins[LAST_JNODE_TYPE]; + +/* __JNODE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/kassign.c b/fs/reiser4/kassign.c new file mode 100644 index 000000000000..87a04dc849c0 --- /dev/null +++ b/fs/reiser4/kassign.c @@ -0,0 +1,677 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Key assignment policy implementation */ + +/* + * In reiser4 every piece of file system data and meta-data has a key. Keys + * are used to store information in and retrieve it from reiser4 internal + * tree. In addition to this, keys define _ordering_ of all file system + * information: things having close keys are placed into the same or + * neighboring (in the tree order) nodes of the tree. As our block allocator + * tries to respect tree order (see flush.c), keys also define order in which + * things are laid out on the disk, and hence, affect performance directly. + * + * Obviously, assignment of keys to data and meta-data should be consistent + * across whole file system. Algorithm that calculates a key for a given piece + * of data or meta-data is referred to as "key assignment". + * + * Key assignment is too expensive to be implemented as a plugin (that is, + * with an ability to support different key assignment schemas in the same + * compiled kernel image). As a compromise, all key-assignment functions and + * data-structures are collected in this single file, so that modifications to + * key assignment algorithm can be localized. Additional changes may be + * required in key.[ch]. + * + * Current default reiser4 key assignment algorithm is dubbed "Plan A". As one + * may guess, there is "Plan B" too. + * + */ + +/* + * Additional complication with key assignment implementation is a requirement + * to support different key length. + */ + +/* + * KEY ASSIGNMENT: PLAN A, LONG KEYS. + * + * DIRECTORY ITEMS + * + * | 60 | 4 | 7 |1| 56 | 64 | 64 | + * +--------------+---+---+-+-------------+------------------+-----------------+ + * | dirid | 0 | F |H| prefix-1 | prefix-2 | prefix-3/hash | + * +--------------+---+---+-+-------------+------------------+-----------------+ + * | | | | | + * | 8 bytes | 8 bytes | 8 bytes | 8 bytes | + * + * dirid objectid of directory this item is for + * + * F fibration, see fs/reiser4/plugin/fibration.[ch] + * + * H 1 if last 8 bytes of the key contain hash, + * 0 if last 8 bytes of the key contain prefix-3 + * + * prefix-1 first 7 characters of file name. + * Padded by zeroes if name is not long enough. + * + * prefix-2 next 8 characters of the file name. + * + * prefix-3 next 8 characters of the file name. + * + * hash hash of the rest of file name (i.e., portion of file + * name not included into prefix-1 and prefix-2). + * + * File names shorter than 23 (== 7 + 8 + 8) characters are completely encoded + * in the key. Such file names are called "short". They are distinguished by H + * bit set 0 in the key. + * + * Other file names are "long". For long name, H bit is 1, and first 15 (== 7 + * + 8) characters are encoded in prefix-1 and prefix-2 portions of the + * key. Last 8 bytes of the key are occupied by hash of the remaining + * characters of the name. + * + * This key assignment reaches following important goals: + * + * (1) directory entries are sorted in approximately lexicographical + * order. + * + * (2) collisions (when multiple directory items have the same key), while + * principally unavoidable in a tree with fixed length keys, are rare. + * + * STAT DATA + * + * | 60 | 4 | 64 | 4 | 60 | 64 | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | locality id | 1 | ordering | 0 | objectid | 0 | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | | | | | + * | 8 bytes | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * ordering copy of second 8-byte portion of the key of directory + * entry for the first name of this object. Ordering has a form + * { + * fibration :7; + * h :1; + * prefix1 :56; + * } + * see description of key for directory entry above. + * + * objectid object id for this object + * + * This key assignment policy is designed to keep stat-data in the same order + * as corresponding directory items, thus speeding up readdir/stat types of + * workload. + * + * FILE BODY + * + * | 60 | 4 | 64 | 4 | 60 | 64 | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | locality id | 4 | ordering | 0 | objectid | offset | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | | | | | + * | 8 bytes | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * ordering the same as in the key of stat-data for this object + * + * objectid object id for this object + * + * offset logical offset from the beginning of this file. + * Measured in bytes. + * + * + * KEY ASSIGNMENT: PLAN A, SHORT KEYS. + * + * DIRECTORY ITEMS + * + * | 60 | 4 | 7 |1| 56 | 64 | + * +--------------+---+---+-+-------------+-----------------+ + * | dirid | 0 | F |H| prefix-1 | prefix-2/hash | + * +--------------+---+---+-+-------------+-----------------+ + * | | | | + * | 8 bytes | 8 bytes | 8 bytes | + * + * dirid objectid of directory this item is for + * + * F fibration, see fs/reiser4/plugin/fibration.[ch] + * + * H 1 if last 8 bytes of the key contain hash, + * 0 if last 8 bytes of the key contain prefix-2 + * + * prefix-1 first 7 characters of file name. + * Padded by zeroes if name is not long enough. + * + * prefix-2 next 8 characters of the file name. + * + * hash hash of the rest of file name (i.e., portion of file + * name not included into prefix-1). + * + * File names shorter than 15 (== 7 + 8) characters are completely encoded in + * the key. Such file names are called "short". They are distinguished by H + * bit set in the key. + * + * Other file names are "long". For long name, H bit is 0, and first 7 + * characters are encoded in prefix-1 portion of the key. Last 8 bytes of the + * key are occupied by hash of the remaining characters of the name. + * + * STAT DATA + * + * | 60 | 4 | 4 | 60 | 64 | + * +--------------+---+---+--------------+-----------------+ + * | locality id | 1 | 0 | objectid | 0 | + * +--------------+---+---+--------------+-----------------+ + * | | | | + * | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * objectid object id for this object + * + * FILE BODY + * + * | 60 | 4 | 4 | 60 | 64 | + * +--------------+---+---+--------------+-----------------+ + * | locality id | 4 | 0 | objectid | offset | + * +--------------+---+---+--------------+-----------------+ + * | | | | + * | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * objectid object id for this object + * + * offset logical offset from the beginning of this file. + * Measured in bytes. + * + * + */ + +#include "debug.h" +#include "key.h" +#include "kassign.h" +#include "vfs_ops.h" +#include "inode.h" +#include "super.h" +#include "dscale.h" + +#include /* for __u?? */ +#include /* for struct super_block, etc */ + +/* bitmask for H bit (see comment at the beginning of this file */ +static const __u64 longname_mark = 0x0100000000000000ull; +/* bitmask for F and H portions of the key. */ +static const __u64 fibration_mask = 0xff00000000000000ull; + +/* return true if name is not completely encoded in @key */ +int is_longname_key(const reiser4_key * key) +{ + __u64 highpart; + + assert("nikita-2863", key != NULL); + if (get_key_type(key) != KEY_FILE_NAME_MINOR) + reiser4_print_key("oops", key); + assert("nikita-2864", get_key_type(key) == KEY_FILE_NAME_MINOR); + + if (REISER4_LARGE_KEY) + highpart = get_key_ordering(key); + else + highpart = get_key_objectid(key); + + return (highpart & longname_mark) ? 1 : 0; +} + +/* return true if @name is too long to be completely encoded in the key */ +int is_longname(const char *name UNUSED_ARG, int len) +{ + if (REISER4_LARGE_KEY) + return len > 23; + else + return len > 15; +} + +/* code ascii string into __u64. + + Put characters of @name into result (@str) one after another starting + from @start_idx-th highest (arithmetically) byte. This produces + endian-safe encoding. memcpy(2) will not do. + +*/ +static __u64 pack_string(const char *name /* string to encode */ , + int start_idx /* highest byte in result from + * which to start encoding */ ) +{ + unsigned i; + __u64 str; + + str = 0; + for (i = 0; (i < sizeof str - start_idx) && name[i]; ++i) { + str <<= 8; + str |= (unsigned char)name[i]; + } + str <<= (sizeof str - i - start_idx) << 3; + return str; +} + +/* opposite to pack_string(). Takes value produced by pack_string(), restores + * string encoded in it and stores result in @buf */ +char *reiser4_unpack_string(__u64 value, char *buf) +{ + do { + *buf = value >> (64 - 8); + if (*buf) + ++buf; + value <<= 8; + } while (value != 0); + *buf = 0; + return buf; +} + +/* obtain name encoded in @key and store it in @buf */ +char *extract_name_from_key(const reiser4_key * key, char *buf) +{ + char *c; + + assert("nikita-2868", !is_longname_key(key)); + + c = buf; + if (REISER4_LARGE_KEY) { + c = reiser4_unpack_string(get_key_ordering(key) & + ~fibration_mask, c); + c = reiser4_unpack_string(get_key_fulloid(key), c); + } else + c = reiser4_unpack_string(get_key_fulloid(key) & + ~fibration_mask, c); + reiser4_unpack_string(get_key_offset(key), c); + return buf; +} + +/** + * complete_entry_key - calculate entry key by name + * @dir: directory where entry is (or will be) in + * @name: name to calculate key of + * @len: lenth of name + * @result: place to store result in + * + * Sets fields of entry key @result which depend on file name. + * When REISER4_LARGE_KEY is defined three fields of @result are set: ordering, + * objectid and offset. Otherwise, objectid and offset are set. + */ +void complete_entry_key(const struct inode *dir, const char *name, + int len, reiser4_key *result) +{ +#if REISER4_LARGE_KEY + __u64 ordering; + __u64 objectid; + __u64 offset; + + assert("nikita-1139", dir != NULL); + assert("nikita-1142", result != NULL); + assert("nikita-2867", strlen(name) == len); + + /* + * key allocation algorithm for directory entries in case of large + * keys: + * + * If name is not longer than 7 + 8 + 8 = 23 characters, put first 7 + * characters into ordering field of key, next 8 charactes (if any) + * into objectid field of key and next 8 ones (of any) into offset + * field of key + * + * If file name is longer than 23 characters, put first 7 characters + * into key's ordering, next 8 to objectid and hash of remaining + * characters into offset field. + * + * To distinguish above cases, in latter set up unused high bit in + * ordering field. + */ + + /* [0-6] characters to ordering */ + ordering = pack_string(name, 1); + if (len > 7) { + /* [7-14] characters to objectid */ + objectid = pack_string(name + 7, 0); + if (len > 15) { + if (len <= 23) { + /* [15-23] characters to offset */ + offset = pack_string(name + 15, 0); + } else { + /* note in a key the fact that offset contains + * hash */ + ordering |= longname_mark; + + /* offset is the hash of the file name's tail */ + offset = inode_hash_plugin(dir)->hash(name + 15, + len - 15); + } + } else { + offset = 0ull; + } + } else { + objectid = 0ull; + offset = 0ull; + } + + assert("nikita-3480", inode_fibration_plugin(dir) != NULL); + ordering |= inode_fibration_plugin(dir)->fibre(dir, name, len); + + set_key_ordering(result, ordering); + set_key_fulloid(result, objectid); + set_key_offset(result, offset); + return; + +#else + __u64 objectid; + __u64 offset; + + assert("nikita-1139", dir != NULL); + assert("nikita-1142", result != NULL); + assert("nikita-2867", strlen(name) == len); + + /* + * key allocation algorithm for directory entries in case of not large + * keys: + * + * If name is not longer than 7 + 8 = 15 characters, put first 7 + * characters into objectid field of key, next 8 charactes (if any) + * into offset field of key + * + * If file name is longer than 15 characters, put first 7 characters + * into key's objectid, and hash of remaining characters into offset + * field. + * + * To distinguish above cases, in latter set up unused high bit in + * objectid field. + */ + + /* [0-6] characters to objectid */ + objectid = pack_string(name, 1); + if (len > 7) { + if (len <= 15) { + /* [7-14] characters to offset */ + offset = pack_string(name + 7, 0); + } else { + /* note in a key the fact that offset contains hash. */ + objectid |= longname_mark; + + /* offset is the hash of the file name. */ + offset = inode_hash_plugin(dir)->hash(name + 7, + len - 7); + } + } else + offset = 0ull; + + assert("nikita-3480", inode_fibration_plugin(dir) != NULL); + objectid |= inode_fibration_plugin(dir)->fibre(dir, name, len); + + set_key_fulloid(result, objectid); + set_key_offset(result, offset); + return; +#endif /* ! REISER4_LARGE_KEY */ +} + +/* true, if @key is the key of "." */ +int is_dot_key(const reiser4_key * key/* key to check */) +{ + assert("nikita-1717", key != NULL); + assert("nikita-1718", get_key_type(key) == KEY_FILE_NAME_MINOR); + return + (get_key_ordering(key) == 0ull) && + (get_key_objectid(key) == 0ull) && (get_key_offset(key) == 0ull); +} + +/* build key for stat-data. + + return key of stat-data of this object. This should became sd plugin + method in the future. For now, let it be here. + +*/ +reiser4_key *build_sd_key(const struct inode *target /* inode of an object */ , + reiser4_key * result /* resulting key of @target + stat-data */ ) +{ + assert("nikita-261", result != NULL); + + reiser4_key_init(result); + set_key_locality(result, reiser4_inode_data(target)->locality_id); + set_key_ordering(result, get_inode_ordering(target)); + set_key_objectid(result, get_inode_oid(target)); + set_key_type(result, KEY_SD_MINOR); + set_key_offset(result, (__u64) 0); + return result; +} + +/* encode part of key into &obj_key_id + + This encodes into @id part of @key sufficient to restore @key later, + given that latter is key of object (key of stat-data). + + See &obj_key_id +*/ +int build_obj_key_id(const reiser4_key * key /* key to encode */ , + obj_key_id * id/* id where key is encoded in */) +{ + assert("nikita-1151", key != NULL); + assert("nikita-1152", id != NULL); + + memcpy(id, key, sizeof *id); + return 0; +} + +/* encode reference to @obj in @id. + + This is like build_obj_key_id() above, but takes inode as parameter. */ +int build_inode_key_id(const struct inode *obj /* object to build key of */ , + obj_key_id * id/* result */) +{ + reiser4_key sdkey; + + assert("nikita-1166", obj != NULL); + assert("nikita-1167", id != NULL); + + build_sd_key(obj, &sdkey); + build_obj_key_id(&sdkey, id); + return 0; +} + +/* decode @id back into @key + + Restore key of object stat-data from @id. This is dual to + build_obj_key_id() above. +*/ +int extract_key_from_id(const obj_key_id * id /* object key id to extract key + * from */ , + reiser4_key * key/* result */) +{ + assert("nikita-1153", id != NULL); + assert("nikita-1154", key != NULL); + + reiser4_key_init(key); + memcpy(key, id, sizeof *id); + return 0; +} + +/* extract objectid of directory from key of directory entry within said + directory. + */ +oid_t extract_dir_id_from_key(const reiser4_key * de_key /* key of + * directory + * entry */ ) +{ + assert("nikita-1314", de_key != NULL); + return get_key_locality(de_key); +} + +/* encode into @id key of directory entry. + + Encode into @id information sufficient to later distinguish directory + entries within the same directory. This is not whole key, because all + directory entries within directory item share locality which is equal + to objectid of their directory. + +*/ +int build_de_id(const struct inode *dir /* inode of directory */ , + const struct qstr *name /* name to be given to @obj by + * directory entry being + * constructed */ , + de_id * id/* short key of directory entry */) +{ + reiser4_key key; + + assert("nikita-1290", dir != NULL); + assert("nikita-1292", id != NULL); + + /* NOTE-NIKITA this is suboptimal. */ + inode_dir_plugin(dir)->build_entry_key(dir, name, &key); + return build_de_id_by_key(&key, id); +} + +/* encode into @id key of directory entry. + + Encode into @id information sufficient to later distinguish directory + entries within the same directory. This is not whole key, because all + directory entries within directory item share locality which is equal + to objectid of their directory. + +*/ +int build_de_id_by_key(const reiser4_key * entry_key /* full key of directory + * entry */ , + de_id * id/* short key of directory entry */) +{ + memcpy(id, ((__u64 *) entry_key) + 1, sizeof *id); + return 0; +} + +/* restore from @id key of directory entry. + + Function dual to build_de_id(): given @id and locality, build full + key of directory entry within directory item. + +*/ +int extract_key_from_de_id(const oid_t locality /* locality of directory + * entry */ , + const de_id * id /* directory entry id */ , + reiser4_key * key/* result */) +{ + /* no need to initialise key here: all fields are overwritten */ + memcpy(((__u64 *) key) + 1, id, sizeof *id); + set_key_locality(key, locality); + set_key_type(key, KEY_FILE_NAME_MINOR); + return 0; +} + +/* compare two &de_id's */ +cmp_t de_id_cmp(const de_id * id1 /* first &de_id to compare */ , + const de_id * id2/* second &de_id to compare */) +{ + /* NOTE-NIKITA ugly implementation */ + reiser4_key k1; + reiser4_key k2; + + extract_key_from_de_id((oid_t) 0, id1, &k1); + extract_key_from_de_id((oid_t) 0, id2, &k2); + return keycmp(&k1, &k2); +} + +/* compare &de_id with key */ +cmp_t de_id_key_cmp(const de_id * id /* directory entry id to compare */ , + const reiser4_key * key/* key to compare */) +{ + cmp_t result; + reiser4_key *k1; + + k1 = (reiser4_key *) (((unsigned long)id) - sizeof key->el[0]); + result = KEY_DIFF_EL(k1, key, 1); + if (result == EQUAL_TO) { + result = KEY_DIFF_EL(k1, key, 2); + if (REISER4_LARGE_KEY && result == EQUAL_TO) + result = KEY_DIFF_EL(k1, key, 3); + } + return result; +} + +/* + * return number of bytes necessary to encode @inode identity. + */ +int inode_onwire_size(const struct inode *inode) +{ + int result; + + result = dscale_bytes_to_write(get_inode_oid(inode)); + result += dscale_bytes_to_write(get_inode_locality(inode)); + + /* + * ordering is large (it usually has highest bits set), so it makes + * little sense to dscale it. + */ + if (REISER4_LARGE_KEY) + result += sizeof(get_inode_ordering(inode)); + return result; +} + +/* + * encode @inode identity at @start + */ +char *build_inode_onwire(const struct inode *inode, char *start) +{ + start += dscale_write(start, get_inode_locality(inode)); + start += dscale_write(start, get_inode_oid(inode)); + + if (REISER4_LARGE_KEY) { + put_unaligned(cpu_to_le64(get_inode_ordering(inode)), (__le64 *)start); + start += sizeof(get_inode_ordering(inode)); + } + return start; +} + +/* + * extract key that was previously encoded by build_inode_onwire() at @addr + */ +char *extract_obj_key_id_from_onwire(char *addr, obj_key_id * key_id) +{ + __u64 val; + + addr += dscale_read(addr, &val); + val = (val << KEY_LOCALITY_SHIFT) | KEY_SD_MINOR; + put_unaligned(cpu_to_le64(val), (__le64 *)key_id->locality); + addr += dscale_read(addr, &val); + put_unaligned(cpu_to_le64(val), (__le64 *)key_id->objectid); +#if REISER4_LARGE_KEY + memcpy(&key_id->ordering, addr, sizeof key_id->ordering); + addr += sizeof key_id->ordering; +#endif + return addr; +} + +/* + * skip a key that was previously encoded by build_inode_onwire() at @addr + * FIXME: handle IO errors. + */ +char * locate_obj_key_id_onwire(char * addr) +{ + /* locality */ + addr += dscale_bytes_to_read(addr); + /* objectid */ + addr += dscale_bytes_to_read(addr); +#if REISER4_LARGE_KEY + addr += sizeof ((obj_key_id *)0)->ordering; +#endif + return addr; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/kassign.h b/fs/reiser4/kassign.h new file mode 100644 index 000000000000..8de30027ca76 --- /dev/null +++ b/fs/reiser4/kassign.h @@ -0,0 +1,111 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Key assignment policy interface. See kassign.c for details. */ + +#if !defined(__KASSIGN_H__) +#define __KASSIGN_H__ + +#include "forward.h" +#include "key.h" +#include "dformat.h" + +#include /* for __u?? */ +#include /* for struct super_block, etc */ +#include /* for struct qstr */ + +/* key assignment functions */ + +/* Information from which key of file stat-data can be uniquely + restored. This depends on key assignment policy for + stat-data. Currently it's enough to store object id and locality id + (60+60==120) bits, because minor packing locality and offset of + stat-data key are always known constants: KEY_SD_MINOR and 0 + respectively. For simplicity 4 bits are wasted in each id, and just + two 64 bit integers are stored. + + This field has to be byte-aligned, because we don't want to waste + space in directory entries. There is another side of a coin of + course: we waste CPU and bus bandwidth in stead, by copying data back + and forth. + + Next optimization: &obj_key_id is mainly used to address stat data from + directory entries. Under the assumption that majority of files only have + only name (one hard link) from *the* parent directory it seems reasonable + to only store objectid of stat data and take its locality from key of + directory item. + + This requires some flag to be added to the &obj_key_id to distinguish + between these two cases. Remaining bits in flag byte are then asking to be + used to store file type. + + This optimization requires changes in directory item handling code. + +*/ +typedef struct obj_key_id { + d8 locality[sizeof(__u64)]; + ON_LARGE_KEY(d8 ordering[sizeof(__u64)]; + ) + d8 objectid[sizeof(__u64)]; +} +obj_key_id; + +/* Information sufficient to uniquely identify directory entry within + compressed directory item. + + For alignment issues see &obj_key_id above. +*/ +typedef struct de_id { + ON_LARGE_KEY(d8 ordering[sizeof(__u64)];) + d8 objectid[sizeof(__u64)]; + d8 offset[sizeof(__u64)]; +} +de_id; + +extern int inode_onwire_size(const struct inode *obj); +extern char *build_inode_onwire(const struct inode *obj, char *area); +extern char *locate_obj_key_id_onwire(char *area); +extern char *extract_obj_key_id_from_onwire(char *area, obj_key_id * key_id); + +extern int build_inode_key_id(const struct inode *obj, obj_key_id * id); +extern int extract_key_from_id(const obj_key_id * id, reiser4_key * key); +extern int build_obj_key_id(const reiser4_key * key, obj_key_id * id); +extern oid_t extract_dir_id_from_key(const reiser4_key * de_key); +extern int build_de_id(const struct inode *dir, const struct qstr *name, + de_id * id); +extern int build_de_id_by_key(const reiser4_key * entry_key, de_id * id); +extern int extract_key_from_de_id(const oid_t locality, const de_id * id, + reiser4_key * key); +extern cmp_t de_id_cmp(const de_id * id1, const de_id * id2); +extern cmp_t de_id_key_cmp(const de_id * id, const reiser4_key * key); + +extern int build_readdir_key_common(struct file *dir, reiser4_key * result); +extern void build_entry_key_common(const struct inode *dir, + const struct qstr *name, + reiser4_key * result); +extern void build_entry_key_stable_entry(const struct inode *dir, + const struct qstr *name, + reiser4_key * result); +extern int is_dot_key(const reiser4_key * key); +extern reiser4_key *build_sd_key(const struct inode *target, + reiser4_key * result); + +extern int is_longname_key(const reiser4_key * key); +extern int is_longname(const char *name, int len); +extern char *extract_name_from_key(const reiser4_key * key, char *buf); +extern char *reiser4_unpack_string(__u64 value, char *buf); +extern void complete_entry_key(const struct inode *dir, const char *name, + int len, reiser4_key *result); + +/* __KASSIGN_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/key.c b/fs/reiser4/key.c new file mode 100644 index 000000000000..0efd51832d1a --- /dev/null +++ b/fs/reiser4/key.c @@ -0,0 +1,138 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Key manipulations. */ + +#include "debug.h" +#include "key.h" +#include "super.h" +#include "reiser4.h" + +#include /* for __u?? */ + +/* Minimal possible key: all components are zero. It is presumed that this is + independent of key scheme. */ +static const reiser4_key MINIMAL_KEY = { + .el = { + 0ull, + ON_LARGE_KEY(0ull,) + 0ull, + 0ull + } +}; + +/* Maximal possible key: all components are ~0. It is presumed that this is + independent of key scheme. */ +static const reiser4_key MAXIMAL_KEY = { + .el = { + __constant_cpu_to_le64(~0ull), + ON_LARGE_KEY(__constant_cpu_to_le64(~0ull),) + __constant_cpu_to_le64(~0ull), + __constant_cpu_to_le64(~0ull) + } +}; + +/* Initialize key. */ +void reiser4_key_init(reiser4_key * key/* key to init */) +{ + assert("nikita-1169", key != NULL); + memset(key, 0, sizeof *key); +} + +/* minimal possible key in the tree. Return pointer to the static storage. */ +const reiser4_key * reiser4_min_key(void) +{ + return &MINIMAL_KEY; +} + +/* maximum possible key in the tree. Return pointer to the static storage. */ +const reiser4_key * reiser4_max_key(void) +{ + return &MAXIMAL_KEY; +} + +#if REISER4_DEBUG +/* debugging aid: print symbolic name of key type */ +static const char *type_name(unsigned int key_type/* key type */) +{ + switch (key_type) { + case KEY_FILE_NAME_MINOR: + return "file name"; + case KEY_SD_MINOR: + return "stat data"; + case KEY_ATTR_NAME_MINOR: + return "attr name"; + case KEY_ATTR_BODY_MINOR: + return "attr body"; + case KEY_BODY_MINOR: + return "file body"; + default: + return "unknown"; + } +} + +/* debugging aid: print human readable information about key */ +void reiser4_print_key(const char *prefix /* prefix to print */ , + const reiser4_key * key/* key to print */) +{ + /* turn bold on */ + /* printf ("\033[1m"); */ + if (key == NULL) + printk("%s: null key\n", prefix); + else { + if (REISER4_LARGE_KEY) + printk("%s: (%Lx:%x:%Lx:%Lx:%Lx:%Lx)", prefix, + get_key_locality(key), + get_key_type(key), + get_key_ordering(key), + get_key_band(key), + get_key_objectid(key), get_key_offset(key)); + else + printk("%s: (%Lx:%x:%Lx:%Lx:%Lx)", prefix, + get_key_locality(key), + get_key_type(key), + get_key_band(key), + get_key_objectid(key), get_key_offset(key)); + /* + * if this is a key of directory entry, try to decode part of + * a name stored in the key, and output it. + */ + if (get_key_type(key) == KEY_FILE_NAME_MINOR) { + char buf[DE_NAME_BUF_LEN]; + char *c; + + c = buf; + c = reiser4_unpack_string(get_key_ordering(key), c); + reiser4_unpack_string(get_key_fulloid(key), c); + printk("[%s", buf); + if (is_longname_key(key)) + /* + * only part of the name is stored in the key. + */ + printk("...]\n"); + else { + /* + * whole name is stored in the key. + */ + reiser4_unpack_string(get_key_offset(key), buf); + printk("%s]\n", buf); + } + } else { + printk("[%s]\n", type_name(get_key_type(key))); + } + } + /* turn bold off */ + /* printf ("\033[m\017"); */ +} + +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/key.h b/fs/reiser4/key.h new file mode 100644 index 000000000000..2ad4ee277e61 --- /dev/null +++ b/fs/reiser4/key.h @@ -0,0 +1,392 @@ +/* Copyright 2000, 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declarations of key-related data-structures and operations on keys. */ + +#if !defined(__REISER4_KEY_H__) +#define __REISER4_KEY_H__ + +#include "dformat.h" +#include "forward.h" +#include "debug.h" + +#include /* for __u?? */ + +/* Operations on keys in reiser4 tree */ + +/* No access to any of these fields shall be done except via a + wrapping macro/function, and that wrapping macro/function shall + convert to little endian order. Compare keys will consider cpu byte order. */ + +/* A storage layer implementation difference between a regular unix file body + and its attributes is in the typedef below which causes all of the attributes + of a file to be near in key to all of the other attributes for all of the + files within that directory, and not near to the file itself. It is + interesting to consider whether this is the wrong approach, and whether there + should be no difference at all. For current usage patterns this choice is + probably the right one. */ + +/* possible values for minor packing locality (4 bits required) */ +typedef enum { + /* file name */ + KEY_FILE_NAME_MINOR = 0, + /* stat-data */ + KEY_SD_MINOR = 1, + /* file attribute name */ + KEY_ATTR_NAME_MINOR = 2, + /* file attribute value */ + KEY_ATTR_BODY_MINOR = 3, + /* file body (tail or extent) */ + KEY_BODY_MINOR = 4, +} key_minor_locality; + +/* Everything stored in the tree has a unique key, which means that the tree is + (logically) fully ordered by key. Physical order is determined by dynamic + heuristics that attempt to reflect key order when allocating available space, + and by the repacker. It is stylistically better to put aggregation + information into the key. Thus, if you want to segregate extents from tails, + it is better to give them distinct minor packing localities rather than + changing block_alloc.c to check the node type when deciding where to allocate + the node. + + The need to randomly displace new directories and large files disturbs this + symmetry unfortunately. However, it should be noted that this is a need that + is not clearly established given the existence of a repacker. Also, in our + current implementation tails have a different minor packing locality from + extents, and no files have both extents and tails, so maybe symmetry can be + had without performance cost after all. Symmetry is what we ship for now.... +*/ + +/* Arbitrary major packing localities can be assigned to objects using + the reiser4(filenameA/..packing<=some_number) system call. + + In reiser4, the creat() syscall creates a directory + + whose default flow (that which is referred to if the directory is + read as a file) is the traditional unix file body. + + whose directory plugin is the 'filedir' + + whose major packing locality is that of the parent of the object created. + + The static_stat item is a particular commonly used directory + compression (the one for normal unix files). + + The filedir plugin checks to see if the static_stat item exists. + There is a unique key for static_stat. If yes, then it uses the + static_stat item for all of the values that it contains. The + static_stat item contains a flag for each stat it contains which + indicates whether one should look outside the static_stat item for its + contents. +*/ + +/* offset of fields in reiser4_key. Value of each element of this enum + is index within key (thought as array of __u64's) where this field + is. */ +typedef enum { + /* major "locale", aka dirid. Sits in 1st element */ + KEY_LOCALITY_INDEX = 0, + /* minor "locale", aka item type. Sits in 1st element */ + KEY_TYPE_INDEX = 0, + ON_LARGE_KEY(KEY_ORDERING_INDEX,) + /* "object band". Sits in 2nd element */ + KEY_BAND_INDEX, + /* objectid. Sits in 2nd element */ + KEY_OBJECTID_INDEX = KEY_BAND_INDEX, + /* full objectid. Sits in 2nd element */ + KEY_FULLOID_INDEX = KEY_BAND_INDEX, + /* Offset. Sits in 3rd element */ + KEY_OFFSET_INDEX, + /* Name hash. Sits in 3rd element */ + KEY_HASH_INDEX = KEY_OFFSET_INDEX, + KEY_CACHELINE_END = KEY_OFFSET_INDEX, + KEY_LAST_INDEX +} reiser4_key_field_index; + +/* key in reiser4 internal "balanced" tree. It is just array of three + 64bit integers in disk byte order (little-endian by default). This + array is actually indexed by reiser4_key_field. Each __u64 within + this array is called "element". Logical key component encoded within + elements are called "fields". + + We declare this as union with second component dummy to suppress + inconvenient array<->pointer casts implied in C. */ +union reiser4_key { + __le64 el[KEY_LAST_INDEX]; + int pad; +}; + +/* bitmasks showing where within reiser4_key particular key is stored. */ +/* major locality occupies higher 60 bits of the first element */ +#define KEY_LOCALITY_MASK 0xfffffffffffffff0ull + +/* minor locality occupies lower 4 bits of the first element */ +#define KEY_TYPE_MASK 0xfull + +/* controversial band occupies higher 4 bits of the 2nd element */ +#define KEY_BAND_MASK 0xf000000000000000ull + +/* objectid occupies lower 60 bits of the 2nd element */ +#define KEY_OBJECTID_MASK 0x0fffffffffffffffull + +/* full 64bit objectid*/ +#define KEY_FULLOID_MASK 0xffffffffffffffffull + +/* offset is just 3rd L.M.Nt itself */ +#define KEY_OFFSET_MASK 0xffffffffffffffffull + +/* ordering is whole second element */ +#define KEY_ORDERING_MASK 0xffffffffffffffffull + +/* how many bits key element should be shifted to left to get particular field + */ +typedef enum { + KEY_LOCALITY_SHIFT = 4, + KEY_TYPE_SHIFT = 0, + KEY_BAND_SHIFT = 60, + KEY_OBJECTID_SHIFT = 0, + KEY_FULLOID_SHIFT = 0, + KEY_OFFSET_SHIFT = 0, + KEY_ORDERING_SHIFT = 0, +} reiser4_key_field_shift; + +static inline __u64 +get_key_el(const reiser4_key * key, reiser4_key_field_index off) +{ + assert("nikita-753", key != NULL); + assert("nikita-754", off < KEY_LAST_INDEX); + return le64_to_cpu(get_unaligned(&key->el[off])); +} + +static inline void +set_key_el(reiser4_key * key, reiser4_key_field_index off, __u64 value) +{ + assert("nikita-755", key != NULL); + assert("nikita-756", off < KEY_LAST_INDEX); + put_unaligned(cpu_to_le64(value), &key->el[off]); +} + +/* macro to define getter and setter functions for field F with type T */ +#define DEFINE_KEY_FIELD(L, U, T) \ +static inline T get_key_ ## L(const reiser4_key *key) \ +{ \ + assert("nikita-750", key != NULL); \ + return (T) (get_key_el(key, KEY_ ## U ## _INDEX) & \ + KEY_ ## U ## _MASK) >> KEY_ ## U ## _SHIFT; \ +} \ + \ +static inline void set_key_ ## L(reiser4_key * key, T loc) \ +{ \ + __u64 el; \ + \ + assert("nikita-752", key != NULL); \ + \ + el = get_key_el(key, KEY_ ## U ## _INDEX); \ + /* clear field bits in the key */ \ + el &= ~KEY_ ## U ## _MASK; \ + /* actually it should be \ + \ + el |= ( loc << KEY_ ## U ## _SHIFT ) & KEY_ ## U ## _MASK; \ + \ + but we trust user to never pass values that wouldn't fit \ + into field. Clearing extra bits is one operation, but this \ + function is time-critical. \ + But check this in assertion. */ \ + assert("nikita-759", ((loc << KEY_ ## U ## _SHIFT) & \ + ~KEY_ ## U ## _MASK) == 0); \ + el |= (loc << KEY_ ## U ## _SHIFT); \ + set_key_el(key, KEY_ ## U ## _INDEX, el); \ +} + +typedef __u64 oid_t; + +/* define get_key_locality(), set_key_locality() */ +DEFINE_KEY_FIELD(locality, LOCALITY, oid_t); +/* define get_key_type(), set_key_type() */ +DEFINE_KEY_FIELD(type, TYPE, key_minor_locality); +/* define get_key_band(), set_key_band() */ +DEFINE_KEY_FIELD(band, BAND, __u64); +/* define get_key_objectid(), set_key_objectid() */ +DEFINE_KEY_FIELD(objectid, OBJECTID, oid_t); +/* define get_key_fulloid(), set_key_fulloid() */ +DEFINE_KEY_FIELD(fulloid, FULLOID, oid_t); +/* define get_key_offset(), set_key_offset() */ +DEFINE_KEY_FIELD(offset, OFFSET, __u64); +#if (REISER4_LARGE_KEY) +/* define get_key_ordering(), set_key_ordering() */ +DEFINE_KEY_FIELD(ordering, ORDERING, __u64); +#else +static inline __u64 get_key_ordering(const reiser4_key * key) +{ + return 0; +} + +static inline void set_key_ordering(reiser4_key * key, __u64 val) +{ +} +#endif + +/* key comparison result */ +typedef enum { LESS_THAN = -1, /* if first key is less than second */ + EQUAL_TO = 0, /* if keys are equal */ + GREATER_THAN = +1 /* if first key is greater than second */ +} cmp_t; + +void reiser4_key_init(reiser4_key * key); + +/* minimal possible key in the tree. Return pointer to the static storage. */ +extern const reiser4_key *reiser4_min_key(void); +extern const reiser4_key *reiser4_max_key(void); + +/* helper macro for keycmp() */ +#define KEY_DIFF(k1, k2, field) \ +({ \ + typeof(get_key_ ## field(k1)) f1; \ + typeof(get_key_ ## field(k2)) f2; \ + \ + f1 = get_key_ ## field(k1); \ + f2 = get_key_ ## field(k2); \ + \ + (f1 < f2) ? LESS_THAN : ((f1 == f2) ? EQUAL_TO : GREATER_THAN); \ +}) + +/* helper macro for keycmp() */ +#define KEY_DIFF_EL(k1, k2, off) \ +({ \ + __u64 e1; \ + __u64 e2; \ + \ + e1 = get_key_el(k1, off); \ + e2 = get_key_el(k2, off); \ + \ + (e1 < e2) ? LESS_THAN : ((e1 == e2) ? EQUAL_TO : GREATER_THAN); \ +}) + +/* compare `k1' and `k2'. This function is a heart of "key allocation + policy". All you need to implement new policy is to add yet another + clause here. */ +static inline cmp_t keycmp(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + cmp_t result; + + /* + * This function is the heart of reiser4 tree-routines. Key comparison + * is among most heavily used operations in the file system. + */ + + assert("nikita-439", k1 != NULL); + assert("nikita-440", k2 != NULL); + + /* there is no actual branch here: condition is compile time constant + * and constant folding and propagation ensures that only one branch + * is actually compiled in. */ + + if (REISER4_PLANA_KEY_ALLOCATION) { + /* if physical order of fields in a key is identical + with logical order, we can implement key comparison + as three 64bit comparisons. */ + /* logical order of fields in plan-a: + locality->type->objectid->offset. */ + /* compare locality and type at once */ + result = KEY_DIFF_EL(k1, k2, 0); + if (result == EQUAL_TO) { + /* compare objectid (and band if it's there) */ + result = KEY_DIFF_EL(k1, k2, 1); + /* compare offset */ + if (result == EQUAL_TO) { + result = KEY_DIFF_EL(k1, k2, 2); + if (REISER4_LARGE_KEY && result == EQUAL_TO) + result = KEY_DIFF_EL(k1, k2, 3); + } + } + } else if (REISER4_3_5_KEY_ALLOCATION) { + result = KEY_DIFF(k1, k2, locality); + if (result == EQUAL_TO) { + result = KEY_DIFF(k1, k2, objectid); + if (result == EQUAL_TO) { + result = KEY_DIFF(k1, k2, type); + if (result == EQUAL_TO) + result = KEY_DIFF(k1, k2, offset); + } + } + } else + impossible("nikita-441", "Unknown key allocation scheme!"); + return result; +} + +/* true if @k1 equals @k2 */ +static inline int keyeq(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1879", k1 != NULL); + assert("nikita-1880", k2 != NULL); + return !memcmp(k1, k2, sizeof *k1); +} + +/* true if @k1 is less than @k2 */ +static inline int keylt(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1952", k1 != NULL); + assert("nikita-1953", k2 != NULL); + return keycmp(k1, k2) == LESS_THAN; +} + +/* true if @k1 is less than or equal to @k2 */ +static inline int keyle(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1954", k1 != NULL); + assert("nikita-1955", k2 != NULL); + return keycmp(k1, k2) != GREATER_THAN; +} + +/* true if @k1 is greater than @k2 */ +static inline int keygt(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1959", k1 != NULL); + assert("nikita-1960", k2 != NULL); + return keycmp(k1, k2) == GREATER_THAN; +} + +/* true if @k1 is greater than or equal to @k2 */ +static inline int keyge(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1956", k1 != NULL); + assert("nikita-1957", k2 != NULL); /* October 4: sputnik launched + * November 3: Laika */ + return keycmp(k1, k2) != LESS_THAN; +} + +static inline void prefetchkey(reiser4_key * key) +{ + prefetch(key); + prefetch(&key->el[KEY_CACHELINE_END]); +} + +/* (%Lx:%x:%Lx:%Lx:%Lx:%Lx) = + 1 + 16 + 1 + 1 + 1 + 1 + 1 + 16 + 1 + 16 + 1 + 16 + 1 */ +/* size of a buffer suitable to hold human readable key representation */ +#define KEY_BUF_LEN (80) + +#if REISER4_DEBUG +extern void reiser4_print_key(const char *prefix, const reiser4_key * key); +#else +#define reiser4_print_key(p, k) noop +#endif + +/* __FS_REISERFS_KEY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/ktxnmgrd.c b/fs/reiser4/ktxnmgrd.c new file mode 100644 index 000000000000..b36215b3db43 --- /dev/null +++ b/fs/reiser4/ktxnmgrd.c @@ -0,0 +1,215 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* Transaction manager daemon. */ + +/* + * ktxnmgrd is a kernel daemon responsible for committing transactions. It is + * needed/important for the following reasons: + * + * 1. in reiser4 atom is not committed immediately when last transaction + * handle closes, unless atom is either too old or too large (see + * atom_should_commit()). This is done to avoid committing too frequently. + * because: + * + * 2. sometimes we don't want to commit atom when closing last transaction + * handle even if it is old and fat enough. For example, because we are at + * this point under directory semaphore, and committing would stall all + * accesses to this directory. + * + * ktxnmgrd binds its time sleeping on condition variable. When is awakes + * either due to (tunable) timeout or because it was explicitly woken up by + * call to ktxnmgrd_kick(), it scans list of all atoms and commits ones + * eligible. + * + */ + +#include "debug.h" +#include "txnmgr.h" +#include "tree.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "reiser4.h" + +#include /* for struct task_struct */ +#include +#include +#include +#include +#include +#include + +static int scan_mgr(struct super_block *); + +/* + * change current->comm so that ps, top, and friends will see changed + * state. This serves no useful purpose whatsoever, but also costs nothing. May + * be it will make lonely system administrator feeling less alone at 3 A.M. + */ +#define set_comm(state) \ + snprintf(current->comm, sizeof(current->comm), \ + "%s:%s:%s", __FUNCTION__, (super)->s_id, (state)) + +/** + * ktxnmgrd - kernel txnmgr daemon + * @arg: pointer to super block + * + * The background transaction manager daemon, started as a kernel thread during + * reiser4 initialization. + */ +static int ktxnmgrd(void *arg) +{ + struct super_block *super; + ktxnmgrd_context *ctx; + txn_mgr *mgr; + int done = 0; + + super = arg; + mgr = &get_super_private(super)->tmgr; + + /* + * do_fork() just copies task_struct into the new thread. ->fs_context + * shouldn't be copied of course. This shouldn't be a problem for the + * rest of the code though. + */ + current->journal_info = NULL; + ctx = mgr->daemon; + while (1) { + try_to_freeze(); + set_comm("wait"); + { + DEFINE_WAIT(__wait); + + prepare_to_wait(&ctx->wait, &__wait, + TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + done = 1; + else + schedule_timeout(ctx->timeout); + finish_wait(&ctx->wait, &__wait); + } + if (done) + break; + set_comm("run"); + spin_lock(&ctx->guard); + /* + * wait timed out or ktxnmgrd was woken up by explicit request + * to commit something. Scan list of atoms in txnmgr and look + * for too old atoms. + */ + do { + ctx->rescan = 0; + scan_mgr(super); + spin_lock(&ctx->guard); + if (ctx->rescan) { + /* + * the list could be modified while ctx + * spinlock was released, we have to repeat + * scanning from the beginning + */ + break; + } + } while (ctx->rescan); + spin_unlock(&ctx->guard); + } + return 0; +} + +#undef set_comm + +/** + * reiser4_init_ktxnmgrd - initialize ktxnmgrd context and start kernel daemon + * @super: pointer to super block + * + * Allocates and initializes ktxnmgrd_context, attaches it to transaction + * manager. Starts kernel txnmgr daemon. This is called on mount. + */ +int reiser4_init_ktxnmgrd(struct super_block *super) +{ + txn_mgr *mgr; + ktxnmgrd_context *ctx; + + mgr = &get_super_private(super)->tmgr; + + assert("zam-1014", mgr->daemon == NULL); + + ctx = kzalloc(sizeof(ktxnmgrd_context), reiser4_ctx_gfp_mask_get()); + if (!ctx) + return RETERR(-ENOMEM); + + assert("nikita-2442", ctx != NULL); + + init_waitqueue_head(&ctx->wait); + + /*kcond_init(&ctx->startup);*/ + spin_lock_init(&ctx->guard); + ctx->timeout = REISER4_TXNMGR_TIMEOUT; + ctx->rescan = 1; + mgr->daemon = ctx; + + ctx->tsk = kthread_run(ktxnmgrd, super, "ktxnmgrd"); + if (IS_ERR(ctx->tsk)) { + int ret = PTR_ERR(ctx->tsk); + mgr->daemon = NULL; + kfree(ctx); + return RETERR(ret); + } + return 0; +} + +void ktxnmgrd_kick(txn_mgr *mgr) +{ + assert("nikita-3234", mgr != NULL); + assert("nikita-3235", mgr->daemon != NULL); + wake_up(&mgr->daemon->wait); +} + +int is_current_ktxnmgrd(void) +{ + return (get_current_super_private()->tmgr.daemon->tsk == current); +} + +/** + * scan_mgr - commit atoms which are to be committed + * @super: super block to commit atoms of + * + * Commits old atoms. + */ +static int scan_mgr(struct super_block *super) +{ + int ret; + reiser4_context ctx; + + init_stack_context(&ctx, super); + + ret = commit_some_atoms(&get_super_private(super)->tmgr); + + reiser4_exit_context(&ctx); + return ret; +} + +/** + * reiser4_done_ktxnmgrd - stop kernel thread and frees ktxnmgrd context + * @mgr: + * + * This is called on umount. Stops ktxnmgrd and free t + */ +void reiser4_done_ktxnmgrd(struct super_block *super) +{ + txn_mgr *mgr; + + mgr = &get_super_private(super)->tmgr; + assert("zam-1012", mgr->daemon != NULL); + + kthread_stop(mgr->daemon->tsk); + kfree(mgr->daemon); + mgr->daemon = NULL; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/ktxnmgrd.h b/fs/reiser4/ktxnmgrd.h new file mode 100644 index 000000000000..d00f1d9e54ed --- /dev/null +++ b/fs/reiser4/ktxnmgrd.h @@ -0,0 +1,52 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Transaction manager daemon. See ktxnmgrd.c for comments. */ + +#ifndef __KTXNMGRD_H__ +#define __KTXNMGRD_H__ + +#include "txnmgr.h" + +#include +#include +#include +#include +#include +#include /* for struct task_struct */ + +/* in this structure all data necessary to start up, shut down and communicate + * with ktxnmgrd are kept. */ +struct ktxnmgrd_context { + /* wait queue head on which ktxnmgrd sleeps */ + wait_queue_head_t wait; + /* spin lock protecting all fields of this structure */ + spinlock_t guard; + /* timeout of sleeping on ->wait */ + signed long timeout; + /* kernel thread running ktxnmgrd */ + struct task_struct *tsk; + /* list of all file systems served by this ktxnmgrd */ + struct list_head queue; + /* should ktxnmgrd repeat scanning of atoms? */ + unsigned int rescan:1; +}; + +extern int reiser4_init_ktxnmgrd(struct super_block *); +extern void reiser4_done_ktxnmgrd(struct super_block *); + +extern void ktxnmgrd_kick(txn_mgr * mgr); +extern int is_current_ktxnmgrd(void); + +/* __KTXNMGRD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/lock.c b/fs/reiser4/lock.c new file mode 100644 index 000000000000..4af6fd0f9d2a --- /dev/null +++ b/fs/reiser4/lock.c @@ -0,0 +1,1237 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Traditional deadlock avoidance is achieved by acquiring all locks in a single + order. V4 balances the tree from the bottom up, and searches the tree from + the top down, and that is really the way we want it, so tradition won't work + for us. + + Instead we have two lock orderings, a high priority lock ordering, and a low + priority lock ordering. Each node in the tree has a lock in its znode. + + Suppose we have a set of processes which lock (R/W) tree nodes. Each process + has a set (maybe empty) of already locked nodes ("process locked set"). Each + process may have a pending lock request to a node locked by another process. + Note: we lock and unlock, but do not transfer locks: it is possible + transferring locks instead would save some bus locking.... + + Deadlock occurs when we have a loop constructed from process locked sets and + lock request vectors. + + NOTE: The reiser4 "tree" is a tree on disk, but its cached representation in + memory is extended with "znodes" with which we connect nodes with their left + and right neighbors using sibling pointers stored in the znodes. When we + perform balancing operations we often go from left to right and from right to + left. + + +-P1-+ +-P3-+ + |+--+| V1 |+--+| + ||N1|| -------> ||N3|| + |+--+| |+--+| + +----+ +----+ + ^ | + |V2 |V3 + | v + +---------P2---------+ + |+--+ +--+| + ||N2| -------- |N4|| + |+--+ +--+| + +--------------------+ + + We solve this by ensuring that only low priority processes lock in top to + bottom order and from right to left, and high priority processes lock from + bottom to top and left to right. + + ZAM-FIXME-HANS: order not just node locks in this way, order atom locks, and + kill those damn busy loops. + ANSWER(ZAM): atom locks (which are introduced by ASTAGE_CAPTURE_WAIT atom + stage) cannot be ordered that way. There are no rules what nodes can belong + to the atom and what nodes cannot. We cannot define what is right or left + direction, what is top or bottom. We can take immediate parent or side + neighbor of one node, but nobody guarantees that, say, left neighbor node is + not a far right neighbor for other nodes from the same atom. It breaks + deadlock avoidance rules and hi-low priority locking cannot be applied for + atom locks. + + How does it help to avoid deadlocks ? + + Suppose we have a deadlock with n processes. Processes from one priority + class never deadlock because they take locks in one consistent + order. + + So, any possible deadlock loop must have low priority as well as high + priority processes. There are no other lock priority levels except low and + high. We know that any deadlock loop contains at least one node locked by a + low priority process and requested by a high priority process. If this + situation is caught and resolved it is sufficient to avoid deadlocks. + + V4 DEADLOCK PREVENTION ALGORITHM IMPLEMENTATION. + + The deadlock prevention algorithm is based on comparing + priorities of node owners (processes which keep znode locked) and + requesters (processes which want to acquire a lock on znode). We + implement a scheme where low-priority owners yield locks to + high-priority requesters. We created a signal passing system that + is used to ask low-priority processes to yield one or more locked + znodes. + + The condition when a znode needs to change its owners is described by the + following formula: + + ############################################# + # # + # (number of high-priority requesters) > 0 # + # AND # + # (numbers of high-priority owners) == 0 # + # # + ############################################# + + Note that a low-priority process delays node releasing if another + high-priority process owns this node. So, slightly more strictly speaking, + to have a deadlock capable cycle you must have a loop in which a high + priority process is waiting on a low priority process to yield a node, which + is slightly different from saying a high priority process is waiting on a + node owned by a low priority process. + + It is enough to avoid deadlocks if we prevent any low-priority process from + falling asleep if its locked set contains a node which satisfies the + deadlock condition. + + That condition is implicitly or explicitly checked in all places where new + high-priority requests may be added or removed from node request queue or + high-priority process takes or releases a lock on node. The main + goal of these checks is to never lose the moment when node becomes "has + wrong owners" and send "must-yield-this-lock" signals to its low-pri owners + at that time. + + The information about received signals is stored in the per-process + structure (lock stack) and analyzed before a low-priority process goes to + sleep but after a "fast" attempt to lock a node fails. Any signal wakes + sleeping process up and forces him to re-check lock status and received + signal info. If "must-yield-this-lock" signals were received the locking + primitive (longterm_lock_znode()) fails with -E_DEADLOCK error code. + + V4 LOCKING DRAWBACKS + + If we have already balanced on one level, and we are propagating our changes + upward to a higher level, it could be very messy to surrender all locks on + the lower level because we put so much computational work into it, and + reverting them to their state before they were locked might be very complex. + We also don't want to acquire all locks before performing balancing because + that would either be almost as much work as the balancing, or it would be + too conservative and lock too much. We want balancing to be done only at + high priority. Yet, we might want to go to the left one node and use some + of its empty space... So we make one attempt at getting the node to the left + using try_lock, and if it fails we do without it, because we didn't really + need it, it was only a nice to have. + + LOCK STRUCTURES DESCRIPTION + + The following data structures are used in the reiser4 locking + implementation: + + All fields related to long-term locking are stored in znode->lock. + + The lock stack is a per thread object. It owns all znodes locked by the + thread. One znode may be locked by several threads in case of read lock or + one znode may be write locked by one thread several times. The special link + objects (lock handles) support n<->m relation between znodes and lock + owners. + + + + +---------+ +---------+ + | LS1 | | LS2 | + +---------+ +---------+ + ^ ^ + |---------------+ +----------+ + v v v v + +---------+ +---------+ +---------+ +---------+ + | LH1 | | LH2 | | LH3 | | LH4 | + +---------+ +---------+ +---------+ +---------+ + ^ ^ ^ ^ + | +------------+ | + v v v + +---------+ +---------+ +---------+ + | Z1 | | Z2 | | Z3 | + +---------+ +---------+ +---------+ + + Thread 1 locked znodes Z1 and Z2, thread 2 locked znodes Z2 and Z3. The + picture above shows that lock stack LS1 has a list of 2 lock handles LH1 and + LH2, lock stack LS2 has a list with lock handles LH3 and LH4 on it. Znode + Z1 is locked by only one thread, znode has only one lock handle LH1 on its + list, similar situation is for Z3 which is locked by the thread 2 only. Z2 + is locked (for read) twice by different threads and two lock handles are on + its list. Each lock handle represents a single relation of a locking of a + znode by a thread. Locking of a znode is an establishing of a locking + relation between the lock stack and the znode by adding of a new lock handle + to a list of lock handles, the lock stack. The lock stack links all lock + handles for all znodes locked by the lock stack. The znode list groups all + lock handles for all locks stacks which locked the znode. + + Yet another relation may exist between znode and lock owners. If lock + procedure cannot immediately take lock on an object it adds the lock owner + on special `requestors' list belongs to znode. That list represents a + queue of pending lock requests. Because one lock owner may request only + only one lock object at a time, it is a 1->n relation between lock objects + and a lock owner implemented as it is described above. Full information + (priority, pointers to lock and link objects) about each lock request is + stored in lock owner structure in `request' field. + + SHORT_TERM LOCKING + + This is a list of primitive operations over lock stacks / lock handles / + znodes and locking descriptions for them. + + 1. locking / unlocking which is done by two list insertion/deletion, one + to/from znode's list of lock handles, another one is to/from lock stack's + list of lock handles. The first insertion is protected by + znode->lock.guard spinlock. The list owned by the lock stack can be + modified only by thread who owns the lock stack and nobody else can + modify/read it. There is nothing to be protected by a spinlock or + something else. + + 2. adding/removing a lock request to/from znode requesters list. The rule is + that znode->lock.guard spinlock should be taken for this. + + 3. we can traverse list of lock handles and use references to lock stacks who + locked given znode if znode->lock.guard spinlock is taken. + + 4. If a lock stack is associated with a znode as a lock requestor or lock + owner its existence is guaranteed by znode->lock.guard spinlock. Some its + (lock stack's) fields should be protected from being accessed in parallel + by two or more threads. Please look at lock_stack structure definition + for the info how those fields are protected. */ + +/* Znode lock and capturing intertwining. */ +/* In current implementation we capture formatted nodes before locking + them. Take a look on longterm lock znode, reiser4_try_capture() request + precedes locking requests. The longterm_lock_znode function unconditionally + captures znode before even checking of locking conditions. + + Another variant is to capture znode after locking it. It was not tested, but + at least one deadlock condition is supposed to be there. One thread has + locked a znode (Node-1) and calls reiser4_try_capture() for it. + reiser4_try_capture() sleeps because znode's atom has CAPTURE_WAIT state. + Second thread is a flushing thread, its current atom is the atom Node-1 + belongs to. Second thread wants to lock Node-1 and sleeps because Node-1 + is locked by the first thread. The described situation is a deadlock. */ + +#include "debug.h" +#include "txnmgr.h" +#include "znode.h" +#include "jnode.h" +#include "tree.h" +#include "plugin/node/node.h" +#include "super.h" + +#include + +#if REISER4_DEBUG +static int request_is_deadlock_safe(znode * , znode_lock_mode, + znode_lock_request); +#endif + +/* Returns a lock owner associated with current thread */ +lock_stack *get_current_lock_stack(void) +{ + return &get_current_context()->stack; +} + +/* Wakes up all low priority owners informing them about possible deadlock */ +static void wake_up_all_lopri_owners(znode * node) +{ + lock_handle *handle; + + assert_spin_locked(&(node->lock.guard)); + list_for_each_entry(handle, &node->lock.owners, owners_link) { + assert("nikita-1832", handle->node == node); + /* count this signal in owner->nr_signaled */ + if (!handle->signaled) { + handle->signaled = 1; + atomic_inc(&handle->owner->nr_signaled); + /* Wake up a single process */ + reiser4_wake_up(handle->owner); + } + } +} + +/* Adds a lock to a lock owner, which means creating a link to the lock and + putting the link into the two lists all links are on (the doubly linked list + that forms the lock_stack, and the doubly linked list of links attached + to a lock. +*/ +static inline void +link_object(lock_handle * handle, lock_stack * owner, znode * node) +{ + assert("jmacd-810", handle->owner == NULL); + assert_spin_locked(&(node->lock.guard)); + + handle->owner = owner; + handle->node = node; + + assert("reiser4-4", + ergo(list_empty_careful(&owner->locks), owner->nr_locks == 0)); + + /* add lock handle to the end of lock_stack's list of locks */ + list_add_tail(&handle->locks_link, &owner->locks); + ON_DEBUG(owner->nr_locks++); + reiser4_ctx_gfp_mask_set(); + + /* add lock handle to the head of znode's list of owners */ + list_add(&handle->owners_link, &node->lock.owners); + handle->signaled = 0; +} + +/* Breaks a relation between a lock and its owner */ +static inline void unlink_object(lock_handle * handle) +{ + assert("zam-354", handle->owner != NULL); + assert("nikita-1608", handle->node != NULL); + assert_spin_locked(&(handle->node->lock.guard)); + assert("nikita-1829", handle->owner == get_current_lock_stack()); + assert("reiser4-5", handle->owner->nr_locks > 0); + + /* remove lock handle from lock_stack's list of locks */ + list_del(&handle->locks_link); + ON_DEBUG(handle->owner->nr_locks--); + reiser4_ctx_gfp_mask_set(); + assert("reiser4-6", + ergo(list_empty_careful(&handle->owner->locks), + handle->owner->nr_locks == 0)); + /* remove lock handle from znode's list of owners */ + list_del(&handle->owners_link); + /* indicates that lock handle is free now */ + handle->node = NULL; +#if REISER4_DEBUG + INIT_LIST_HEAD(&handle->locks_link); + INIT_LIST_HEAD(&handle->owners_link); + handle->owner = NULL; +#endif +} + +/* Actually locks an object knowing that we are able to do this */ +static void lock_object(lock_stack * owner) +{ + struct lock_request *request; + znode *node; + + request = &owner->request; + node = request->node; + assert_spin_locked(&(node->lock.guard)); + if (request->mode == ZNODE_READ_LOCK) { + node->lock.nr_readers++; + } else { + /* check that we don't switched from read to write lock */ + assert("nikita-1840", node->lock.nr_readers <= 0); + /* We allow recursive locking; a node can be locked several + times for write by same process */ + node->lock.nr_readers--; + } + + link_object(request->handle, owner, node); + + if (owner->curpri) + node->lock.nr_hipri_owners++; +} + +/* Check for recursive write locking */ +static int recursive(lock_stack * owner) +{ + int ret; + znode *node; + lock_handle *lh; + + node = owner->request.node; + + /* Owners list is not empty for a locked node */ + assert("zam-314", !list_empty_careful(&node->lock.owners)); + assert("nikita-1841", owner == get_current_lock_stack()); + assert_spin_locked(&(node->lock.guard)); + + lh = list_entry(node->lock.owners.next, lock_handle, owners_link); + ret = (lh->owner == owner); + + /* Recursive read locking should be done usual way */ + assert("zam-315", !ret || owner->request.mode == ZNODE_WRITE_LOCK); + /* mixing of read/write locks is not allowed */ + assert("zam-341", !ret || znode_is_wlocked(node)); + + return ret; +} + +#if REISER4_DEBUG +/* Returns true if the lock is held by the calling thread. */ +int znode_is_any_locked(const znode * node) +{ + lock_handle *handle; + lock_stack *stack; + int ret; + + if (!znode_is_locked(node)) + return 0; + + stack = get_current_lock_stack(); + + spin_lock_stack(stack); + + ret = 0; + + list_for_each_entry(handle, &stack->locks, locks_link) { + if (handle->node == node) { + ret = 1; + break; + } + } + + spin_unlock_stack(stack); + + return ret; +} + +#endif + +/* Returns true if a write lock is held by the calling thread. */ +int znode_is_write_locked(const znode * node) +{ + lock_stack *stack; + lock_handle *handle; + + assert("jmacd-8765", node != NULL); + + if (!znode_is_wlocked(node)) + return 0; + + stack = get_current_lock_stack(); + + /* + * When znode is write locked, all owner handles point to the same lock + * stack. Get pointer to lock stack from the first lock handle from + * znode's owner list + */ + handle = list_entry(node->lock.owners.next, lock_handle, owners_link); + + return (handle->owner == stack); +} + +/* This "deadlock" condition is the essential part of reiser4 locking + implementation. This condition is checked explicitly by calling + check_deadlock_condition() or implicitly in all places where znode lock + state (set of owners and request queue) is changed. Locking code is + designed to use this condition to trigger procedure of passing object from + low priority owner(s) to high priority one(s). + + The procedure results in passing an event (setting lock_handle->signaled + flag) and counting this event in nr_signaled field of owner's lock stack + object and wakeup owner's process. +*/ +static inline int check_deadlock_condition(znode * node) +{ + assert_spin_locked(&(node->lock.guard)); + return node->lock.nr_hipri_requests > 0 + && node->lock.nr_hipri_owners == 0; +} + +static int check_livelock_condition(znode * node, znode_lock_mode mode) +{ + zlock * lock = &node->lock; + + return mode == ZNODE_READ_LOCK && + lock->nr_readers >= 0 && lock->nr_hipri_write_requests > 0; +} + +/* checks lock/request compatibility */ +static int can_lock_object(lock_stack * owner) +{ + znode *node = owner->request.node; + + assert_spin_locked(&(node->lock.guard)); + + /* See if the node is disconnected. */ + if (unlikely(ZF_ISSET(node, JNODE_IS_DYING))) + return RETERR(-EINVAL); + + /* Do not ever try to take a lock if we are going in low priority + direction and a node have a high priority request without high + priority owners. */ + if (unlikely(!owner->curpri && check_deadlock_condition(node))) + return RETERR(-E_REPEAT); + if (unlikely(owner->curpri && + check_livelock_condition(node, owner->request.mode))) + return RETERR(-E_REPEAT); + if (unlikely(!is_lock_compatible(node, owner->request.mode))) + return RETERR(-E_REPEAT); + return 0; +} + +/* Setting of a high priority to the process. It clears "signaled" flags + because znode locked by high-priority process can't satisfy our "deadlock + condition". */ +static void set_high_priority(lock_stack * owner) +{ + assert("nikita-1846", owner == get_current_lock_stack()); + /* Do nothing if current priority is already high */ + if (!owner->curpri) { + /* We don't need locking for owner->locks list, because, this + * function is only called with the lock stack of the current + * thread, and no other thread can play with owner->locks list + * and/or change ->node pointers of lock handles in this list. + * + * (Interrupts also are not involved.) + */ + lock_handle *item = list_entry(owner->locks.next, lock_handle, + locks_link); + while (&owner->locks != &item->locks_link) { + znode *node = item->node; + + spin_lock_zlock(&node->lock); + + node->lock.nr_hipri_owners++; + + /* we can safely set signaled to zero, because + previous statement (nr_hipri_owners ++) guarantees + that signaled will be never set again. */ + item->signaled = 0; + spin_unlock_zlock(&node->lock); + + item = list_entry(item->locks_link.next, lock_handle, + locks_link); + } + owner->curpri = 1; + atomic_set(&owner->nr_signaled, 0); + } +} + +/* Sets a low priority to the process. */ +static void set_low_priority(lock_stack * owner) +{ + assert("nikita-3075", owner == get_current_lock_stack()); + /* Do nothing if current priority is already low */ + if (owner->curpri) { + /* scan all locks (lock handles) held by @owner, which is + actually current thread, and check whether we are reaching + deadlock possibility anywhere. + */ + lock_handle *handle = list_entry(owner->locks.next, lock_handle, + locks_link); + while (&owner->locks != &handle->locks_link) { + znode *node = handle->node; + spin_lock_zlock(&node->lock); + /* this thread just was hipri owner of @node, so + nr_hipri_owners has to be greater than zero. */ + assert("nikita-1835", node->lock.nr_hipri_owners > 0); + node->lock.nr_hipri_owners--; + /* If we have deadlock condition, adjust a nr_signaled + field. It is enough to set "signaled" flag only for + current process, other low-pri owners will be + signaled and waken up after current process unlocks + this object and any high-priority requestor takes + control. */ + if (check_deadlock_condition(node) + && !handle->signaled) { + handle->signaled = 1; + atomic_inc(&owner->nr_signaled); + } + spin_unlock_zlock(&node->lock); + handle = list_entry(handle->locks_link.next, + lock_handle, locks_link); + } + owner->curpri = 0; + } +} + +static void remove_lock_request(lock_stack * requestor) +{ + zlock * lock = &requestor->request.node->lock; + + if (requestor->curpri) { + assert("nikita-1838", lock->nr_hipri_requests > 0); + lock->nr_hipri_requests--; + if (requestor->request.mode == ZNODE_WRITE_LOCK) + lock->nr_hipri_write_requests--; + } + list_del(&requestor->requestors_link); +} + +static void invalidate_all_lock_requests(znode * node) +{ + lock_stack *requestor, *tmp; + + assert_spin_locked(&(node->lock.guard)); + + list_for_each_entry_safe(requestor, tmp, &node->lock.requestors, + requestors_link) { + remove_lock_request(requestor); + requestor->request.ret_code = -EINVAL; + reiser4_wake_up(requestor); + requestor->request.mode = ZNODE_NO_LOCK; + } +} + +static void dispatch_lock_requests(znode * node) +{ + lock_stack *requestor, *tmp; + + assert_spin_locked(&(node->lock.guard)); + + list_for_each_entry_safe(requestor, tmp, &node->lock.requestors, + requestors_link) { + if (znode_is_write_locked(node)) + break; + if (!can_lock_object(requestor)) { + lock_object(requestor); + remove_lock_request(requestor); + requestor->request.ret_code = 0; + reiser4_wake_up(requestor); + requestor->request.mode = ZNODE_NO_LOCK; + } + } +} + +/* release long-term lock, acquired by longterm_lock_znode() */ +void longterm_unlock_znode(lock_handle * handle) +{ + znode *node = handle->node; + lock_stack *oldowner = handle->owner; + int hipri; + int readers; + int rdelta; + int youdie; + + /* + * this is time-critical and highly optimized code. Modify carefully. + */ + + assert("jmacd-1021", handle != NULL); + assert("jmacd-1022", handle->owner != NULL); + assert("nikita-1392", LOCK_CNT_GTZ(long_term_locked_znode)); + + assert("zam-130", oldowner == get_current_lock_stack()); + + LOCK_CNT_DEC(long_term_locked_znode); + + /* + * to minimize amount of operations performed under lock, pre-compute + * all variables used within critical section. This makes code + * obscure. + */ + + /* was this lock of hi or lo priority */ + hipri = oldowner->curpri ? 1 : 0; + /* number of readers */ + readers = node->lock.nr_readers; + /* +1 if write lock, -1 if read lock */ + rdelta = (readers > 0) ? -1 : +1; + /* true if node is to die and write lock is released */ + youdie = ZF_ISSET(node, JNODE_HEARD_BANSHEE) && (readers < 0); + + spin_lock_zlock(&node->lock); + + assert("zam-101", znode_is_locked(node)); + + /* Adjust a number of high priority owners of this lock */ + assert("nikita-1836", node->lock.nr_hipri_owners >= hipri); + node->lock.nr_hipri_owners -= hipri; + + /* Handle znode deallocation on last write-lock release. */ + if (znode_is_wlocked_once(node)) { + if (youdie) { + forget_znode(handle); + assert("nikita-2191", znode_invariant(node)); + zput(node); + return; + } + } + + if (handle->signaled) + atomic_dec(&oldowner->nr_signaled); + + /* Unlocking means owner<->object link deletion */ + unlink_object(handle); + + /* This is enough to be sure whether an object is completely + unlocked. */ + node->lock.nr_readers += rdelta; + + /* If the node is locked it must have an owners list. Likewise, if + the node is unlocked it must have an empty owners list. */ + assert("zam-319", equi(znode_is_locked(node), + !list_empty_careful(&node->lock.owners))); + +#if REISER4_DEBUG + if (!znode_is_locked(node)) + ++node->times_locked; +#endif + + /* If there are pending lock requests we wake up a requestor */ + if (!znode_is_wlocked(node)) + dispatch_lock_requests(node); + if (check_deadlock_condition(node)) + wake_up_all_lopri_owners(node); + spin_unlock_zlock(&node->lock); + + /* minus one reference from handle->node */ + assert("nikita-2190", znode_invariant(node)); + ON_DEBUG(check_lock_data()); + ON_DEBUG(check_lock_node_data(node)); + zput(node); +} + +/* final portion of longterm-lock */ +static int +lock_tail(lock_stack * owner, int ok, znode_lock_mode mode) +{ + znode *node = owner->request.node; + + assert_spin_locked(&(node->lock.guard)); + + /* If we broke with (ok == 0) it means we can_lock, now do it. */ + if (ok == 0) { + lock_object(owner); + owner->request.mode = 0; + /* count a reference from lockhandle->node + + znode was already referenced at the entry to this function, + hence taking spin-lock here is not necessary (see comment + in the zref()). + */ + zref(node); + + LOCK_CNT_INC(long_term_locked_znode); + } + spin_unlock_zlock(&node->lock); + ON_DEBUG(check_lock_data()); + ON_DEBUG(check_lock_node_data(node)); + return ok; +} + +/* + * version of longterm_znode_lock() optimized for the most common case: read + * lock without any special flags. This is the kind of lock that any tree + * traversal takes on the root node of the tree, which is very frequent. + */ +static int longterm_lock_tryfast(lock_stack * owner) +{ + int result; + znode *node; + zlock *lock; + + node = owner->request.node; + lock = &node->lock; + + assert("nikita-3340", reiser4_schedulable()); + assert("nikita-3341", request_is_deadlock_safe(node, + ZNODE_READ_LOCK, + ZNODE_LOCK_LOPRI)); + spin_lock_zlock(lock); + result = can_lock_object(owner); + spin_unlock_zlock(lock); + + if (likely(result != -EINVAL)) { + spin_lock_znode(node); + result = reiser4_try_capture(ZJNODE(node), ZNODE_READ_LOCK, 0); + spin_unlock_znode(node); + spin_lock_zlock(lock); + if (unlikely(result != 0)) { + owner->request.mode = 0; + } else { + result = can_lock_object(owner); + if (unlikely(result == -E_REPEAT)) { + /* fall back to longterm_lock_znode() */ + spin_unlock_zlock(lock); + return 1; + } + } + return lock_tail(owner, result, ZNODE_READ_LOCK); + } else + return 1; +} + +/* locks given lock object */ +int longterm_lock_znode( + /* local link object (allocated by lock owner + * thread, usually on its own stack) */ + lock_handle * handle, + /* znode we want to lock. */ + znode * node, + /* {ZNODE_READ_LOCK, ZNODE_WRITE_LOCK}; */ + znode_lock_mode mode, + /* {0, -EINVAL, -E_DEADLOCK}, see return codes + description. */ + znode_lock_request request) { + int ret; + int hipri = (request & ZNODE_LOCK_HIPRI) != 0; + int non_blocking = 0; + int has_atom; + txn_capture cap_flags; + zlock *lock; + txn_handle *txnh; + tree_level level; + + /* Get current process context */ + lock_stack *owner = get_current_lock_stack(); + + /* Check that the lock handle is initialized and isn't already being + * used. */ + assert("jmacd-808", handle->owner == NULL); + assert("nikita-3026", reiser4_schedulable()); + assert("nikita-3219", request_is_deadlock_safe(node, mode, request)); + assert("zam-1056", atomic_read(&ZJNODE(node)->x_count) > 0); + /* long term locks are not allowed in the VM contexts (->writepage(), + * prune_{d,i}cache()). + * + * FIXME this doesn't work due to unused-dentry-with-unlinked-inode + * bug caused by d_splice_alias() only working for directories. + */ + assert("nikita-3547", 1 || ((current->flags & PF_MEMALLOC) == 0)); + assert("zam-1055", mode != ZNODE_NO_LOCK); + + cap_flags = 0; + if (request & ZNODE_LOCK_NONBLOCK) { + cap_flags |= TXN_CAPTURE_NONBLOCKING; + non_blocking = 1; + } + + if (request & ZNODE_LOCK_DONT_FUSE) + cap_flags |= TXN_CAPTURE_DONT_FUSE; + + /* If we are changing our process priority we must adjust a number + of high priority owners for each znode that we already lock */ + if (hipri) { + set_high_priority(owner); + } else { + set_low_priority(owner); + } + + level = znode_get_level(node); + + /* Fill request structure with our values. */ + owner->request.mode = mode; + owner->request.handle = handle; + owner->request.node = node; + + txnh = get_current_context()->trans; + lock = &node->lock; + + if (mode == ZNODE_READ_LOCK && request == 0) { + ret = longterm_lock_tryfast(owner); + if (ret <= 0) + return ret; + } + + has_atom = (txnh->atom != NULL); + + /* Synchronize on node's zlock guard lock. */ + spin_lock_zlock(lock); + + if (znode_is_locked(node) && + mode == ZNODE_WRITE_LOCK && recursive(owner)) + return lock_tail(owner, 0, mode); + + for (;;) { + /* Check the lock's availability: if it is unavaiable we get + E_REPEAT, 0 indicates "can_lock", otherwise the node is + invalid. */ + ret = can_lock_object(owner); + + if (unlikely(ret == -EINVAL)) { + /* @node is dying. Leave it alone. */ + break; + } + + if (unlikely(ret == -E_REPEAT && non_blocking)) { + /* either locking of @node by the current thread will + * lead to the deadlock, or lock modes are + * incompatible. */ + break; + } + + assert("nikita-1844", (ret == 0) + || ((ret == -E_REPEAT) && !non_blocking)); + /* If we can get the lock... Try to capture first before + taking the lock. */ + + /* first handle commonest case where node and txnh are already + * in the same atom. */ + /* safe to do without taking locks, because: + * + * 1. read of aligned word is atomic with respect to writes to + * this word + * + * 2. false negatives are handled in reiser4_try_capture(). + * + * 3. false positives are impossible. + * + * PROOF: left as an exercise to the curious reader. + * + * Just kidding. Here is one: + * + * At the time T0 txnh->atom is stored in txnh_atom. + * + * At the time T1 node->atom is stored in node_atom. + * + * At the time T2 we observe that + * + * txnh_atom != NULL && node_atom == txnh_atom. + * + * Imagine that at this moment we acquire node and txnh spin + * lock in this order. Suppose that under spin lock we have + * + * node->atom != txnh->atom, (S1) + * + * at the time T3. + * + * txnh->atom != NULL still, because txnh is open by the + * current thread. + * + * Suppose node->atom == NULL, that is, node was un-captured + * between T1, and T3. But un-capturing of formatted node is + * always preceded by the call to reiser4_invalidate_lock(), + * which marks znode as JNODE_IS_DYING under zlock spin + * lock. Contradiction, because can_lock_object() above checks + * for JNODE_IS_DYING. Hence, node->atom != NULL at T3. + * + * Suppose that node->atom != node_atom, that is, atom, node + * belongs to was fused into another atom: node_atom was fused + * into node->atom. Atom of txnh was equal to node_atom at T2, + * which means that under spin lock, txnh->atom == node->atom, + * because txnh->atom can only follow fusion + * chain. Contradicts S1. + * + * The same for hypothesis txnh->atom != txnh_atom. Hence, + * node->atom == node_atom == txnh_atom == txnh->atom. Again + * contradicts S1. Hence S1 is false. QED. + * + */ + + if (likely(has_atom && ZJNODE(node)->atom == txnh->atom)) { + ; + } else { + /* + * unlock zlock spin lock here. It is possible for + * longterm_unlock_znode() to sneak in here, but there + * is no harm: reiser4_invalidate_lock() will mark znode + * as JNODE_IS_DYING and this will be noted by + * can_lock_object() below. + */ + spin_unlock_zlock(lock); + spin_lock_znode(node); + ret = reiser4_try_capture(ZJNODE(node), mode, + cap_flags); + spin_unlock_znode(node); + spin_lock_zlock(lock); + if (unlikely(ret != 0)) { + /* In the failure case, the txnmgr releases + the znode's lock (or in some cases, it was + released a while ago). There's no need to + reacquire it so we should return here, + avoid releasing the lock. */ + owner->request.mode = 0; + break; + } + + /* Check the lock's availability again -- this is + because under some circumstances the capture code + has to release and reacquire the znode spinlock. */ + ret = can_lock_object(owner); + } + + /* This time, a return of (ret == 0) means we can lock, so we + should break out of the loop. */ + if (likely(ret != -E_REPEAT || non_blocking)) + break; + + /* Lock is unavailable, we have to wait. */ + ret = reiser4_prepare_to_sleep(owner); + if (unlikely(ret != 0)) + break; + + assert_spin_locked(&(node->lock.guard)); + if (hipri) { + /* If we are going in high priority direction then + increase high priority requests counter for the + node */ + lock->nr_hipri_requests++; + if (mode == ZNODE_WRITE_LOCK) + lock->nr_hipri_write_requests++; + /* If there are no high priority owners for a node, + then immediately wake up low priority owners, so + they can detect possible deadlock */ + if (lock->nr_hipri_owners == 0) + wake_up_all_lopri_owners(node); + } + list_add_tail(&owner->requestors_link, &lock->requestors); + + /* Ok, here we have prepared a lock request, so unlock + a znode ... */ + spin_unlock_zlock(lock); + /* ... and sleep */ + reiser4_go_to_sleep(owner); + if (owner->request.mode == ZNODE_NO_LOCK) + goto request_is_done; + spin_lock_zlock(lock); + if (owner->request.mode == ZNODE_NO_LOCK) { + spin_unlock_zlock(lock); +request_is_done: + if (owner->request.ret_code == 0) { + LOCK_CNT_INC(long_term_locked_znode); + zref(node); + } + return owner->request.ret_code; + } + remove_lock_request(owner); + } + + return lock_tail(owner, ret, mode); +} + +/* lock object invalidation means changing of lock object state to `INVALID' + and waiting for all other processes to cancel theirs lock requests. */ +void reiser4_invalidate_lock(lock_handle * handle /* path to lock + * owner and lock + * object is being + * invalidated. */ ) +{ + znode *node = handle->node; + lock_stack *owner = handle->owner; + + assert("zam-325", owner == get_current_lock_stack()); + assert("zam-103", znode_is_write_locked(node)); + assert("nikita-1393", !ZF_ISSET(node, JNODE_LEFT_CONNECTED)); + assert("nikita-1793", !ZF_ISSET(node, JNODE_RIGHT_CONNECTED)); + assert("nikita-1394", ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert("nikita-3097", znode_is_wlocked_once(node)); + assert_spin_locked(&(node->lock.guard)); + + if (handle->signaled) + atomic_dec(&owner->nr_signaled); + + ZF_SET(node, JNODE_IS_DYING); + unlink_object(handle); + node->lock.nr_readers = 0; + + invalidate_all_lock_requests(node); + spin_unlock_zlock(&node->lock); +} + +/* Initializes lock_stack. */ +void init_lock_stack(lock_stack * owner /* pointer to + * allocated + * structure. */ ) +{ + INIT_LIST_HEAD(&owner->locks); + INIT_LIST_HEAD(&owner->requestors_link); + spin_lock_init(&owner->sguard); + owner->curpri = 1; + init_waitqueue_head(&owner->wait); +} + +/* Initializes lock object. */ +void reiser4_init_lock(zlock * lock /* pointer on allocated + * uninitialized lock object + * structure. */ ) +{ + memset(lock, 0, sizeof(zlock)); + spin_lock_init(&lock->guard); + INIT_LIST_HEAD(&lock->requestors); + INIT_LIST_HEAD(&lock->owners); +} + +/* Transfer a lock handle (presumably so that variables can be moved between + stack and heap locations). */ +static void +move_lh_internal(lock_handle * new, lock_handle * old, int unlink_old) +{ + znode *node = old->node; + lock_stack *owner = old->owner; + int signaled; + + /* locks_list, modified by link_object() is not protected by + anything. This is valid because only current thread ever modifies + locks_list of its lock_stack. + */ + assert("nikita-1827", owner == get_current_lock_stack()); + assert("nikita-1831", new->owner == NULL); + + spin_lock_zlock(&node->lock); + + signaled = old->signaled; + if (unlink_old) { + unlink_object(old); + } else { + if (node->lock.nr_readers > 0) { + node->lock.nr_readers += 1; + } else { + node->lock.nr_readers -= 1; + } + if (signaled) + atomic_inc(&owner->nr_signaled); + if (owner->curpri) + node->lock.nr_hipri_owners += 1; + LOCK_CNT_INC(long_term_locked_znode); + + zref(node); + } + link_object(new, owner, node); + new->signaled = signaled; + + spin_unlock_zlock(&node->lock); +} + +void move_lh(lock_handle * new, lock_handle * old) +{ + move_lh_internal(new, old, /*unlink_old */ 1); +} + +void copy_lh(lock_handle * new, lock_handle * old) +{ + move_lh_internal(new, old, /*unlink_old */ 0); +} + +/* after getting -E_DEADLOCK we unlock znodes until this function returns false + */ +int reiser4_check_deadlock(void) +{ + lock_stack *owner = get_current_lock_stack(); + return atomic_read(&owner->nr_signaled) != 0; +} + +/* Before going to sleep we re-check "release lock" requests which might come + from threads with hi-pri lock priorities. */ +int reiser4_prepare_to_sleep(lock_stack * owner) +{ + assert("nikita-1847", owner == get_current_lock_stack()); + + /* We return -E_DEADLOCK if one or more "give me the lock" messages are + * counted in nr_signaled */ + if (unlikely(atomic_read(&owner->nr_signaled) != 0)) { + assert("zam-959", !owner->curpri); + return RETERR(-E_DEADLOCK); + } + return 0; +} + +/* Wakes up a single thread */ +void __reiser4_wake_up(lock_stack * owner) +{ + atomic_set(&owner->wakeup, 1); + wake_up(&owner->wait); +} + +/* Puts a thread to sleep */ +void reiser4_go_to_sleep(lock_stack * owner) +{ + /* Well, we might sleep here, so holding of any spinlocks is no-no */ + assert("nikita-3027", reiser4_schedulable()); + + wait_event(owner->wait, atomic_read(&owner->wakeup)); + atomic_set(&owner->wakeup, 0); +} + +int lock_stack_isclean(lock_stack * owner) +{ + if (list_empty_careful(&owner->locks)) { + assert("zam-353", atomic_read(&owner->nr_signaled) == 0); + return 1; + } + + return 0; +} + +#if REISER4_DEBUG + +/* + * debugging functions + */ + +static void list_check(struct list_head *head) +{ + struct list_head *pos; + + list_for_each(pos, head) + assert("", (pos->prev != NULL && pos->next != NULL && + pos->prev->next == pos && pos->next->prev == pos)); +} + +/* check consistency of locking data-structures hanging of the @stack */ +static void check_lock_stack(lock_stack * stack) +{ + spin_lock_stack(stack); + /* check that stack->locks is not corrupted */ + list_check(&stack->locks); + spin_unlock_stack(stack); +} + +/* check consistency of locking data structures */ +void check_lock_data(void) +{ + check_lock_stack(&get_current_context()->stack); +} + +/* check consistency of locking data structures for @node */ +void check_lock_node_data(znode * node) +{ + spin_lock_zlock(&node->lock); + list_check(&node->lock.owners); + list_check(&node->lock.requestors); + spin_unlock_zlock(&node->lock); +} + +/* check that given lock request is dead lock safe. This check is, of course, + * not exhaustive. */ +static int +request_is_deadlock_safe(znode * node, znode_lock_mode mode, + znode_lock_request request) +{ + lock_stack *owner; + + owner = get_current_lock_stack(); + /* + * check that hipri lock request is not issued when there are locked + * nodes at the higher levels. + */ + if (request & ZNODE_LOCK_HIPRI && !(request & ZNODE_LOCK_NONBLOCK) && + znode_get_level(node) != 0) { + lock_handle *item; + + list_for_each_entry(item, &owner->locks, locks_link) { + znode *other; + + other = item->node; + + if (znode_get_level(other) == 0) + continue; + if (znode_get_level(other) > znode_get_level(node)) + return 0; + } + } + return 1; +} + +#endif + +/* return pointer to static storage with name of lock_mode. For + debugging */ +const char *lock_mode_name(znode_lock_mode lock/* lock mode to get name of */) +{ + if (lock == ZNODE_READ_LOCK) + return "read"; + else if (lock == ZNODE_WRITE_LOCK) + return "write"; + else { + static char buf[30]; + + sprintf(buf, "unknown: %i", lock); + return buf; + } +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 79 + End: +*/ diff --git a/fs/reiser4/lock.h b/fs/reiser4/lock.h new file mode 100644 index 000000000000..e74ed8faad58 --- /dev/null +++ b/fs/reiser4/lock.h @@ -0,0 +1,250 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Long term locking data structures. See lock.c for details. */ + +#ifndef __LOCK_H__ +#define __LOCK_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/node/node.h" +#include "txnmgr.h" +#include "readahead.h" + +#include +#include +#include /* for PAGE_CACHE_SIZE */ +#include +#include + +/* Per-znode lock object */ +struct zlock { + spinlock_t guard; + /* The number of readers if positive; the number of recursively taken + write locks if negative. Protected by zlock spin lock. */ + int nr_readers; + /* A number of processes (lock_stacks) that have this object + locked with high priority */ + unsigned nr_hipri_owners; + /* A number of attempts to lock znode in high priority direction */ + unsigned nr_hipri_requests; + /* A linked list of lock_handle objects that contains pointers + for all lock_stacks which have this lock object locked */ + unsigned nr_hipri_write_requests; + struct list_head owners; + /* A linked list of lock_stacks that wait for this lock */ + struct list_head requestors; +}; + +static inline void spin_lock_zlock(zlock *lock) +{ + /* check that zlock is not locked */ + assert("", LOCK_CNT_NIL(spin_locked_zlock)); + /* check that spinlocks of lower priorities are not held */ + assert("", LOCK_CNT_NIL(spin_locked_stack)); + + spin_lock(&lock->guard); + + LOCK_CNT_INC(spin_locked_zlock); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_unlock_zlock(zlock *lock) +{ + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_zlock)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_zlock); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&lock->guard); +} + +#define lock_is_locked(lock) ((lock)->nr_readers != 0) +#define lock_is_rlocked(lock) ((lock)->nr_readers > 0) +#define lock_is_wlocked(lock) ((lock)->nr_readers < 0) +#define lock_is_wlocked_once(lock) ((lock)->nr_readers == -1) +#define lock_can_be_rlocked(lock) ((lock)->nr_readers >= 0) +#define lock_mode_compatible(lock, mode) \ + (((mode) == ZNODE_WRITE_LOCK && !lock_is_locked(lock)) || \ + ((mode) == ZNODE_READ_LOCK && lock_can_be_rlocked(lock))) + +/* Since we have R/W znode locks we need additional bidirectional `link' + objects to implement n<->m relationship between lock owners and lock + objects. We call them `lock handles'. + + Locking: see lock.c/"SHORT-TERM LOCKING" +*/ +struct lock_handle { + /* This flag indicates that a signal to yield a lock was passed to + lock owner and counted in owner->nr_signalled + + Locking: this is accessed under spin lock on ->node. + */ + int signaled; + /* A link to owner of a lock */ + lock_stack *owner; + /* A link to znode locked */ + znode *node; + /* A list of all locks for a process */ + struct list_head locks_link; + /* A list of all owners for a znode */ + struct list_head owners_link; +}; + +struct lock_request { + /* A pointer to uninitialized link object */ + lock_handle *handle; + /* A pointer to the object we want to lock */ + znode *node; + /* Lock mode (ZNODE_READ_LOCK or ZNODE_WRITE_LOCK) */ + znode_lock_mode mode; + /* how dispatch_lock_requests() returns lock request result code */ + int ret_code; +}; + +/* A lock stack structure for accumulating locks owned by a process */ +struct lock_stack { + /* A guard lock protecting a lock stack */ + spinlock_t sguard; + /* number of znodes which were requested by high priority processes */ + atomic_t nr_signaled; + /* Current priority of a process + + This is only accessed by the current thread and thus requires no + locking. + */ + int curpri; + /* A list of all locks owned by this process. Elements can be added to + * this list only by the current thread. ->node pointers in this list + * can be only changed by the current thread. */ + struct list_head locks; + /* When lock_stack waits for the lock, it puts itself on double-linked + requestors list of that lock */ + struct list_head requestors_link; + /* Current lock request info. + + This is only accessed by the current thread and thus requires no + locking. + */ + struct lock_request request; + /* the following two fields are the lock stack's + * synchronization object to use with the standard linux/wait.h + * interface. See reiser4_go_to_sleep and __reiser4_wake_up for + * usage details. */ + wait_queue_head_t wait; + atomic_t wakeup; +#if REISER4_DEBUG + int nr_locks; /* number of lock handles in the above list */ +#endif +}; + +/* + User-visible znode locking functions +*/ + +extern int longterm_lock_znode(lock_handle * handle, + znode * node, + znode_lock_mode mode, + znode_lock_request request); + +extern void longterm_unlock_znode(lock_handle * handle); + +extern int reiser4_check_deadlock(void); + +extern lock_stack *get_current_lock_stack(void); + +extern void init_lock_stack(lock_stack * owner); +extern void reiser4_init_lock(zlock * lock); + +static inline void init_lh(lock_handle *lh) +{ +#if REISER4_DEBUG + memset(lh, 0, sizeof *lh); + INIT_LIST_HEAD(&lh->locks_link); + INIT_LIST_HEAD(&lh->owners_link); +#else + lh->node = NULL; +#endif +} + +static inline void done_lh(lock_handle *lh) +{ + assert("zam-342", lh != NULL); + if (lh->node != NULL) + longterm_unlock_znode(lh); +} + +extern void move_lh(lock_handle * new, lock_handle * old); +extern void copy_lh(lock_handle * new, lock_handle * old); + +extern int reiser4_prepare_to_sleep(lock_stack * owner); +extern void reiser4_go_to_sleep(lock_stack * owner); +extern void __reiser4_wake_up(lock_stack * owner); + +extern int lock_stack_isclean(lock_stack * owner); + +/* zlock object state check macros: only used in assertions. Both forms imply + that the lock is held by the current thread. */ +extern int znode_is_write_locked(const znode *); +extern void reiser4_invalidate_lock(lock_handle *); + +/* lock ordering is: first take zlock spin lock, then lock stack spin lock */ +#define spin_ordering_pred_stack(stack) \ + (LOCK_CNT_NIL(spin_locked_stack) && \ + LOCK_CNT_NIL(spin_locked_txnmgr) && \ + LOCK_CNT_NIL(spin_locked_inode) && \ + LOCK_CNT_NIL(rw_locked_cbk_cache) && \ + LOCK_CNT_NIL(spin_locked_super_eflush)) + +static inline void spin_lock_stack(lock_stack *stack) +{ + assert("", spin_ordering_pred_stack(stack)); + spin_lock(&(stack->sguard)); + LOCK_CNT_INC(spin_locked_stack); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_unlock_stack(lock_stack *stack) +{ + assert_spin_locked(&(stack->sguard)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_stack)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + LOCK_CNT_DEC(spin_locked_stack); + LOCK_CNT_DEC(spin_locked); + spin_unlock(&(stack->sguard)); +} + +static inline void reiser4_wake_up(lock_stack * owner) +{ + spin_lock_stack(owner); + __reiser4_wake_up(owner); + spin_unlock_stack(owner); +} + +const char *lock_mode_name(znode_lock_mode lock); + +#if REISER4_DEBUG +extern void check_lock_data(void); +extern void check_lock_node_data(znode * node); +#else +#define check_lock_data() noop +#define check_lock_node_data() noop +#endif + +/* __LOCK_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/oid.c b/fs/reiser4/oid.c new file mode 100644 index 000000000000..623f52c6f9d7 --- /dev/null +++ b/fs/reiser4/oid.c @@ -0,0 +1,141 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "debug.h" +#include "super.h" +#include "txnmgr.h" + +/* we used to have oid allocation plugin. It was removed because it + was recognized as providing unneeded level of abstraction. If one + ever will find it useful - look at yet_unneeded_abstractions/oid +*/ + +/* + * initialize in-memory data for oid allocator at @super. @nr_files and @next + * are provided by disk format plugin that reads them from the disk during + * mount. + */ +int oid_init_allocator(struct super_block *super, oid_t nr_files, oid_t next) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(super); + + sbinfo->next_to_use = next; + sbinfo->oids_in_use = nr_files; + return 0; +} + +/* + * allocate oid and return it. ABSOLUTE_MAX_OID is returned when allocator + * runs out of oids. + */ +oid_t oid_allocate(struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + oid_t oid; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + if (sbinfo->next_to_use != ABSOLUTE_MAX_OID) { + oid = sbinfo->next_to_use++; + sbinfo->oids_in_use++; + } else + oid = ABSOLUTE_MAX_OID; + spin_unlock_reiser4_super(sbinfo); + return oid; +} + +/* + * Tell oid allocator that @oid is now free. + */ +int oid_release(struct super_block *super, oid_t oid UNUSED_ARG) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + sbinfo->oids_in_use--; + spin_unlock_reiser4_super(sbinfo); + return 0; +} + +/* + * return next @oid that would be allocated (i.e., returned by oid_allocate()) + * without actually allocating it. This is used by disk format plugin to save + * oid allocator state on the disk. + */ +oid_t oid_next(const struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + oid_t oid; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + oid = sbinfo->next_to_use; + spin_unlock_reiser4_super(sbinfo); + return oid; +} + +/* + * returns number of currently used oids. This is used by statfs(2) to report + * number of "inodes" and by disk format plugin to save oid allocator state on + * the disk. + */ +long oids_used(const struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + oid_t used; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + used = sbinfo->oids_in_use; + spin_unlock_reiser4_super(sbinfo); + if (used < (__u64) ((long)~0) >> 1) + return (long)used; + else + return (long)-1; +} + +/* + * Count oid as allocated in atom. This is done after call to oid_allocate() + * at the point when we are irrevocably committed to creation of the new file + * (i.e., when oid allocation cannot be any longer rolled back due to some + * error). + */ +void oid_count_allocated(void) +{ + txn_atom *atom; + + atom = get_current_atom_locked(); + atom->nr_objects_created++; + spin_unlock_atom(atom); +} + +/* + * Count oid as free in atom. This is done after call to oid_release() at the + * point when we are irrevocably committed to the deletion of the file (i.e., + * when oid release cannot be any longer rolled back due to some error). + */ +void oid_count_released(void) +{ + txn_atom *atom; + + atom = get_current_atom_locked(); + atom->nr_objects_deleted++; + spin_unlock_atom(atom); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/page_cache.c b/fs/reiser4/page_cache.c new file mode 100644 index 000000000000..8cca578674c3 --- /dev/null +++ b/fs/reiser4/page_cache.c @@ -0,0 +1,691 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Memory pressure hooks. Fake inodes handling. */ + +/* GLOSSARY + + . Formatted and unformatted nodes. + Elements of reiser4 balanced tree to store data and metadata. + Unformatted nodes are pointed to by extent pointers. Such nodes + are used to store data of large objects. Unlike unformatted nodes, + formatted ones have associated format described by node4X plugin. + + . Jnode (or journal node) + The in-memory header which is used to track formatted and unformatted + nodes, bitmap nodes, etc. In particular, jnodes are used to track + transactional information associated with each block(see reiser4/jnode.c + for details). + + . Znode + The in-memory header which is used to track formatted nodes. Contains + embedded jnode (see reiser4/znode.c for details). +*/ + +/* We store all file system meta data (and data, of course) in the page cache. + + What does this mean? In stead of using bread/brelse we create special + "fake" inode (one per super block) and store content of formatted nodes + into pages bound to this inode in the page cache. In newer kernels bread() + already uses inode attached to block device (bd_inode). Advantage of having + our own fake inode is that we can install appropriate methods in its + address_space operations. Such methods are called by VM on memory pressure + (or during background page flushing) and we can use them to react + appropriately. + + In initial version we only support one block per page. Support for multiple + blocks per page is complicated by relocation. + + To each page, used by reiser4, jnode is attached. jnode is analogous to + buffer head. Difference is that jnode is bound to the page permanently: + jnode cannot be removed from memory until its backing page is. + + jnode contain pointer to page (->pg field) and page contain pointer to + jnode in ->private field. Pointer from jnode to page is protected to by + jnode's spinlock and pointer from page to jnode is protected by page lock + (PG_locked bit). Lock ordering is: first take page lock, then jnode spin + lock. To go into reverse direction use jnode_lock_page() function that uses + standard try-lock-and-release device. + + Properties: + + 1. when jnode-to-page mapping is established (by jnode_attach_page()), page + reference counter is increased. + + 2. when jnode-to-page mapping is destroyed (by page_clear_jnode(), page + reference counter is decreased. + + 3. on jload() reference counter on jnode page is increased, page is + kmapped and `referenced'. + + 4. on jrelse() inverse operations are performed. + + 5. kmapping/kunmapping of unformatted pages is done by read/write methods. + + DEADLOCKS RELATED TO MEMORY PRESSURE. [OUTDATED. Only interesting + historically.] + + [In the following discussion, `lock' invariably means long term lock on + znode.] (What about page locks?) + + There is some special class of deadlock possibilities related to memory + pressure. Locks acquired by other reiser4 threads are accounted for in + deadlock prevention mechanism (lock.c), but when ->vm_writeback() is + invoked additional hidden arc is added to the locking graph: thread that + tries to allocate memory waits for ->vm_writeback() to finish. If this + thread keeps lock and ->vm_writeback() tries to acquire this lock, deadlock + prevention is useless. + + Another related problem is possibility for ->vm_writeback() to run out of + memory itself. This is not a problem for ext2 and friends, because their + ->vm_writeback() don't allocate much memory, but reiser4 flush is + definitely able to allocate huge amounts of memory. + + It seems that there is no reliable way to cope with the problems above. In + stead it was decided that ->vm_writeback() (as invoked in the kswapd + context) wouldn't perform any flushing itself, but rather should just wake + up some auxiliary thread dedicated for this purpose (or, the same thread + that does periodic commit of old atoms (ktxnmgrd.c)). + + Details: + + 1. Page is called `reclaimable' against particular reiser4 mount F if this + page can be ultimately released by try_to_free_pages() under presumptions + that: + + a. ->vm_writeback() for F is no-op, and + + b. none of the threads accessing F are making any progress, and + + c. other reiser4 mounts obey the same memory reservation protocol as F + (described below). + + For example, clean un-pinned page, or page occupied by ext2 data are + reclaimable against any reiser4 mount. + + When there is more than one reiser4 mount in a system, condition (c) makes + reclaim-ability not easily verifiable beyond trivial cases mentioned above. + + THIS COMMENT IS VALID FOR "MANY BLOCKS ON PAGE" CASE + + Fake inode is used to bound formatted nodes and each node is indexed within + fake inode by its block number. If block size of smaller than page size, it + may so happen that block mapped to the page with formatted node is occupied + by unformatted node or is unallocated. This lead to some complications, + because flushing whole page can lead to an incorrect overwrite of + unformatted node that is moreover, can be cached in some other place as + part of the file body. To avoid this, buffers for unformatted nodes are + never marked dirty. Also pages in the fake are never marked dirty. This + rules out usage of ->writepage() as memory pressure hook. In stead + ->releasepage() is used. + + Josh is concerned that page->buffer is going to die. This should not pose + significant problem though, because we need to add some data structures to + the page anyway (jnode) and all necessary book keeping can be put there. + +*/ + +/* Life cycle of pages/nodes. + + jnode contains reference to page and page contains reference back to + jnode. This reference is counted in page ->count. Thus, page bound to jnode + cannot be released back into free pool. + + 1. Formatted nodes. + + 1. formatted node is represented by znode. When new znode is created its + ->pg pointer is NULL initially. + + 2. when node content is loaded into znode (by call to zload()) for the + first time following happens (in call to ->read_node() or + ->allocate_node()): + + 1. new page is added to the page cache. + + 2. this page is attached to znode and its ->count is increased. + + 3. page is kmapped. + + 3. if more calls to zload() follow (without corresponding zrelses), page + counter is left intact and in its stead ->d_count is increased in znode. + + 4. each call to zrelse decreases ->d_count. When ->d_count drops to zero + ->release_node() is called and page is kunmapped as result. + + 5. at some moment node can be captured by a transaction. Its ->x_count + is then increased by transaction manager. + + 6. if node is removed from the tree (empty node with JNODE_HEARD_BANSHEE + bit set) following will happen (also see comment at the top of znode.c): + + 1. when last lock is released, node will be uncaptured from + transaction. This released reference that transaction manager acquired + at the step 5. + + 2. when last reference is released, zput() detects that node is + actually deleted and calls ->delete_node() + operation. page_cache_delete_node() implementation detaches jnode from + page and releases page. + + 7. otherwise (node wasn't removed from the tree), last reference to + znode will be released after transaction manager committed transaction + node was in. This implies squallocing of this node (see + flush.c). Nothing special happens at this point. Znode is still in the + hash table and page is still attached to it. + + 8. znode is actually removed from the memory because of the memory + pressure, or during umount (znodes_tree_done()). Anyway, znode is + removed by the call to zdrop(). At this moment, page is detached from + znode and removed from the inode address space. + +*/ + +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "super.h" +#include "entd.h" +#include "page_cache.h" +#include "ktxnmgrd.h" + +#include +#include +#include /* for struct page */ +#include /* for struct page */ +#include +#include +#include +#include + +static struct bio *page_bio(struct page *, jnode * , int rw, gfp_t gfp); + +static struct address_space_operations formatted_fake_as_ops; + +static const oid_t fake_ino = 0x1; +static const oid_t bitmap_ino = 0x2; +static const oid_t cc_ino = 0x3; + +static void +init_fake_inode(struct super_block *super, struct inode *fake, + struct inode **pfake) +{ + assert("nikita-2168", fake->i_state & I_NEW); + fake->i_mapping->a_ops = &formatted_fake_as_ops; + inode_attach_wb(fake, NULL); + *pfake = fake; + /* NOTE-NIKITA something else? */ + unlock_new_inode(fake); +} + +/** + * reiser4_init_formatted_fake - iget inodes for formatted nodes and bitmaps + * @super: super block to init fake inode for + * + * Initializes fake inode to which formatted nodes are bound in the page cache + * and inode for bitmaps. + */ +int reiser4_init_formatted_fake(struct super_block *super) +{ + struct inode *fake; + struct inode *bitmap; + struct inode *cc; + reiser4_super_info_data *sinfo; + + assert("nikita-1703", super != NULL); + + sinfo = get_super_private_nocheck(super); + fake = iget_locked(super, oid_to_ino(fake_ino)); + + if (fake != NULL) { + init_fake_inode(super, fake, &sinfo->fake); + + bitmap = iget_locked(super, oid_to_ino(bitmap_ino)); + if (bitmap != NULL) { + init_fake_inode(super, bitmap, &sinfo->bitmap); + + cc = iget_locked(super, oid_to_ino(cc_ino)); + if (cc != NULL) { + init_fake_inode(super, cc, &sinfo->cc); + return 0; + } else { + iput(sinfo->fake); + iput(sinfo->bitmap); + sinfo->fake = NULL; + sinfo->bitmap = NULL; + } + } else { + iput(sinfo->fake); + sinfo->fake = NULL; + } + } + return RETERR(-ENOMEM); +} + +/** + * reiser4_done_formatted_fake - release inode used by formatted nodes and bitmaps + * @super: super block to init fake inode for + * + * Releases inodes which were used as address spaces of bitmap and formatted + * nodes. + */ +void reiser4_done_formatted_fake(struct super_block *super) +{ + reiser4_super_info_data *sinfo; + + sinfo = get_super_private_nocheck(super); + + if (sinfo->fake != NULL) { + iput(sinfo->fake); + sinfo->fake = NULL; + } + + if (sinfo->bitmap != NULL) { + iput(sinfo->bitmap); + sinfo->bitmap = NULL; + } + + if (sinfo->cc != NULL) { + iput(sinfo->cc); + sinfo->cc = NULL; + } + return; +} + +void reiser4_wait_page_writeback(struct page *page) +{ + assert("zam-783", PageLocked(page)); + + do { + unlock_page(page); + wait_on_page_writeback(page); + lock_page(page); + } while (PageWriteback(page)); +} + +/* return tree @page is in */ +reiser4_tree *reiser4_tree_by_page(const struct page *page/* page to query */) +{ + assert("nikita-2461", page != NULL); + return &get_super_private(page->mapping->host->i_sb)->tree; +} + +/* completion handler for single page bio-based read. + + mpage_end_io_read() would also do. But it's static. + +*/ +static void end_bio_single_page_read(struct bio *bio) +{ + struct page *page; + + page = bio->bi_io_vec[0].bv_page; + + if (!bio->bi_status) + SetPageUptodate(page); + else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + bio_put(bio); +} + +/* completion handler for single page bio-based write. + + mpage_end_io_write() would also do. But it's static. + +*/ +static void end_bio_single_page_write(struct bio *bio) +{ + struct page *page; + + page = bio->bi_io_vec[0].bv_page; + + if (bio->bi_status) + SetPageError(page); + end_page_writeback(page); + bio_put(bio); +} + +/* ->readpage() method for formatted nodes */ +static int formatted_readpage(struct file *f UNUSED_ARG, + struct page *page/* page to read */) +{ + assert("nikita-2412", PagePrivate(page) && jprivate(page)); + return reiser4_page_io(page, jprivate(page), READ, + reiser4_ctx_gfp_mask_get()); +} + +/** + * reiser4_page_io - submit single-page bio request + * @page: page to perform io for + * @node: jnode of page + * @rw: read or write + * @gfp: gfp mask for bio allocation + * + * Submits single page read or write. + */ +int reiser4_page_io(struct page *page, jnode *node, int rw, gfp_t gfp) +{ + struct bio *bio; + int result; + + assert("nikita-2094", page != NULL); + assert("nikita-2226", PageLocked(page)); + assert("nikita-2634", node != NULL); + assert("nikita-2893", rw == READ || rw == WRITE); + + if (rw) { + if (unlikely(page->mapping->host->i_sb->s_flags & MS_RDONLY)) { + unlock_page(page); + return 0; + } + } + + bio = page_bio(page, node, rw, gfp); + if (!IS_ERR(bio)) { + if (rw == WRITE) { + set_page_writeback(page); + unlock_page(page); + } + bio_set_op_attrs(bio, rw, 0); + submit_bio(bio); + result = 0; + } else { + unlock_page(page); + result = PTR_ERR(bio); + } + + return result; +} + +/* helper function to construct bio for page */ +static struct bio *page_bio(struct page *page, jnode * node, int rw, gfp_t gfp) +{ + struct bio *bio; + assert("nikita-2092", page != NULL); + assert("nikita-2633", node != NULL); + + /* Simple implementation in the assumption that blocksize == pagesize. + + We only have to submit one block, but submit_bh() will allocate bio + anyway, so lets use all the bells-and-whistles of bio code. + */ + + bio = bio_alloc(gfp, 1); + if (bio != NULL) { + int blksz; + struct super_block *super; + reiser4_block_nr blocknr; + + super = page->mapping->host->i_sb; + assert("nikita-2029", super != NULL); + blksz = super->s_blocksize; + assert("nikita-2028", blksz == (int)PAGE_SIZE); + + spin_lock_jnode(node); + blocknr = *jnode_get_io_block(node); + spin_unlock_jnode(node); + + assert("nikita-2275", blocknr != (reiser4_block_nr) 0); + assert("nikita-2276", !reiser4_blocknr_is_fake(&blocknr)); + + bio_set_dev(bio, super->s_bdev); + /* fill bio->bi_iter.bi_sector before calling bio_add_page(), because + * q->merge_bvec_fn may want to inspect it (see + * drivers/md/linear.c:linear_mergeable_bvec() for example. */ + bio->bi_iter.bi_sector = blocknr * (blksz >> 9); + + if (!bio_add_page(bio, page, blksz, 0)) { + warning("nikita-3452", + "Single page bio cannot be constructed"); + return ERR_PTR(RETERR(-EINVAL)); + } + + /* bio -> bi_idx is filled by bio_init() */ + bio->bi_end_io = (rw == READ) ? + end_bio_single_page_read : end_bio_single_page_write; + + return bio; + } else + return ERR_PTR(RETERR(-ENOMEM)); +} + +#if 0 +static int can_hit_entd(reiser4_context *ctx, struct super_block *s) +{ + if (ctx == NULL || ((unsigned long)ctx->magic) != context_magic) + return 1; + if (ctx->super != s) + return 1; + if (get_super_private(s)->entd.tsk == current) + return 0; + if (!lock_stack_isclean(&ctx->stack)) + return 0; + if (ctx->trans->atom != NULL) + return 0; + return 1; +} +#endif + +/** + * reiser4_writepage - writepage of struct address_space_operations + * @page: page to write + * @wbc: + * + * + */ +/* Common memory pressure notification. */ +int reiser4_writepage(struct page *page, + struct writeback_control *wbc) +{ + /* + * assert("edward-1562", + * can_hit_entd(get_current_context_check(), sb)); + */ + assert("vs-828", PageLocked(page)); + + return write_page_by_ent(page, wbc); +} + +/* ->set_page_dirty() method of formatted address_space */ +static int formatted_set_page_dirty(struct page *page) +{ + assert("nikita-2173", page != NULL); + BUG(); + return __set_page_dirty_nobuffers(page); +} + +/* writepages method of address space operations in reiser4 is used to involve + into transactions pages which are dirtied via mmap. Only regular files can + have such pages. Fake inode is used to access formatted nodes via page + cache. As formatted nodes can never be mmaped, fake inode's writepages has + nothing to do */ +static int +writepages_fake(struct address_space *mapping, struct writeback_control *wbc) +{ + return 0; +} + +/* address space operations for the fake inode */ +static struct address_space_operations formatted_fake_as_ops = { + /* Perform a writeback of a single page as a memory-freeing + * operation. */ + .writepage = reiser4_writepage, + /* this is called to read formatted node */ + .readpage = formatted_readpage, + /* ->sync_page() method of fake inode address space operations. Called + from wait_on_page() and lock_page(). + + This is most annoyingly misnomered method. Actually it is called + from wait_on_page_bit() and lock_page() and its purpose is to + actually start io by jabbing device drivers. + .sync_page = block_sync_page, + */ + /* Write back some dirty pages from this mapping. Called from sync. + called during sync (pdflush) */ + .writepages = writepages_fake, + /* Set a page dirty */ + .set_page_dirty = formatted_set_page_dirty, + /* used for read-ahead. Not applicable */ + .readpages = NULL, + .write_begin = NULL, + .write_end = NULL, + .bmap = NULL, + /* called just before page is being detached from inode mapping and + removed from memory. Called on truncate, cut/squeeze, and + umount. */ + .invalidatepage = reiser4_invalidatepage, + /* this is called by shrink_cache() so that file system can try to + release objects (jnodes, buffers, journal heads) attached to page + and, may be made page itself free-able. + */ + .releasepage = reiser4_releasepage, + .direct_IO = NULL, + .migratepage = reiser4_migratepage +}; + +/* called just before page is released (no longer used by reiser4). Callers: + jdelete() and extent2tail(). */ +void reiser4_drop_page(struct page *page) +{ + assert("nikita-2181", PageLocked(page)); + clear_page_dirty_for_io(page); + ClearPageUptodate(page); +#if defined(PG_skipped) + ClearPageSkipped(page); +#endif + unlock_page(page); +} + +#define JNODE_GANG_SIZE (16) + +/* find all jnodes from range specified and invalidate them */ +static int +truncate_jnodes_range(struct inode *inode, pgoff_t from, pgoff_t count) +{ + reiser4_inode *info; + int truncated_jnodes; + reiser4_tree *tree; + unsigned long index; + unsigned long end; + + if (inode_file_plugin(inode) == + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) + /* + * No need to get rid of jnodes here: if the single jnode of + * page cluster did not have page, then it was found and killed + * before in + * truncate_complete_page_cluster()->jput()->jput_final(), + * otherwise it will be dropped by reiser4_invalidatepage() + */ + return 0; + truncated_jnodes = 0; + + info = reiser4_inode_data(inode); + tree = reiser4_tree_by_inode(inode); + + index = from; + end = from + count; + + while (1) { + jnode *gang[JNODE_GANG_SIZE]; + int taken; + int i; + jnode *node; + + assert("nikita-3466", index <= end); + + read_lock_tree(tree); + taken = + radix_tree_gang_lookup(jnode_tree_by_reiser4_inode(info), + (void **)gang, index, + JNODE_GANG_SIZE); + for (i = 0; i < taken; ++i) { + node = gang[i]; + if (index_jnode(node) < end) + jref(node); + else + gang[i] = NULL; + } + read_unlock_tree(tree); + + for (i = 0; i < taken; ++i) { + node = gang[i]; + if (node != NULL) { + index = max(index, index_jnode(node)); + spin_lock_jnode(node); + assert("edward-1457", node->pg == NULL); + /* this is always called after + truncate_inode_pages_range(). Therefore, here + jnode can not have page. New pages can not be + created because truncate_jnodes_range goes + under exclusive access on file obtained, + where as new page creation requires + non-exclusive access obtained */ + JF_SET(node, JNODE_HEARD_BANSHEE); + reiser4_uncapture_jnode(node); + unhash_unformatted_jnode(node); + truncated_jnodes++; + jput(node); + } else + break; + } + if (i != taken || taken == 0) + break; + } + return truncated_jnodes; +} + +/* Truncating files in reiser4: problems and solutions. + + VFS calls fs's truncate after it has called truncate_inode_pages() + to get rid of pages corresponding to part of file being truncated. + In reiser4 it may cause existence of unallocated extents which do + not have jnodes. Flush code does not expect that. Solution of this + problem is straightforward. As vfs's truncate is implemented using + setattr operation, it seems reasonable to have ->setattr() that + will cut file body. However, flush code also does not expect dirty + pages without parent items, so it is impossible to cut all items, + then truncate all pages in two steps. We resolve this problem by + cutting items one-by-one. Each such fine-grained step performed + under longterm znode lock calls at the end ->kill_hook() method of + a killed item to remove its binded pages and jnodes. + + The following function is a common part of mentioned kill hooks. + Also, this is called before tail-to-extent conversion (to not manage + few copies of the data). +*/ +void reiser4_invalidate_pages(struct address_space *mapping, pgoff_t from, + unsigned long count, int even_cows) +{ + loff_t from_bytes, count_bytes; + + if (count == 0) + return; + from_bytes = ((loff_t) from) << PAGE_SHIFT; + count_bytes = ((loff_t) count) << PAGE_SHIFT; + + unmap_mapping_range(mapping, from_bytes, count_bytes, even_cows); + truncate_inode_pages_range(mapping, from_bytes, + from_bytes + count_bytes - 1); + truncate_jnodes_range(mapping->host, from, count); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/page_cache.h b/fs/reiser4/page_cache.h new file mode 100644 index 000000000000..32106f17b454 --- /dev/null +++ b/fs/reiser4/page_cache.h @@ -0,0 +1,64 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ +/* Memory pressure hooks. Fake inodes handling. See page_cache.c. */ + +#if !defined(__REISER4_PAGE_CACHE_H__) +#define __REISER4_PAGE_CACHE_H__ + +#include "forward.h" +#include "context.h" /* for reiser4_ctx_gfp_mask_get() */ + +#include /* for struct super_block, address_space */ +#include /* for struct page */ +#include /* for lock_page() */ +#include /* for __vmalloc() */ + +extern int reiser4_init_formatted_fake(struct super_block *); +extern void reiser4_done_formatted_fake(struct super_block *); + +extern reiser4_tree *reiser4_tree_by_page(const struct page *); + +extern void reiser4_wait_page_writeback(struct page *); +static inline void lock_and_wait_page_writeback(struct page *page) +{ + lock_page(page); + if (unlikely(PageWriteback(page))) + reiser4_wait_page_writeback(page); +} + +#define jprivate(page) ((jnode *)page_private(page)) + +extern int reiser4_page_io(struct page *, jnode *, int rw, gfp_t); +extern void reiser4_drop_page(struct page *); +extern void reiser4_invalidate_pages(struct address_space *, pgoff_t from, + unsigned long count, int even_cows); +extern void capture_reiser4_inodes(struct super_block *, + struct writeback_control *); +static inline void *reiser4_vmalloc(unsigned long size) +{ + return __vmalloc(size, + reiser4_ctx_gfp_mask_get() | __GFP_HIGHMEM, + PAGE_KERNEL); +} + +#define PAGECACHE_TAG_REISER4_MOVED PAGECACHE_TAG_DIRTY + +#if REISER4_DEBUG +extern void print_page(const char *prefix, struct page *page); +#else +#define print_page(prf, p) noop +#endif + +/* __REISER4_PAGE_CACHE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/Makefile b/fs/reiser4/plugin/Makefile new file mode 100644 index 000000000000..4b2c9f8bab6e --- /dev/null +++ b/fs/reiser4/plugin/Makefile @@ -0,0 +1,26 @@ +obj-$(CONFIG_REISER4_FS) += plugins.o + +plugins-objs := \ + plugin.o \ + plugin_set.o \ + object.o \ + inode_ops.o \ + inode_ops_rename.o \ + file_ops.o \ + file_ops_readdir.o \ + file_plugin_common.o \ + dir_plugin_common.o \ + digest.o \ + hash.o \ + fibration.o \ + tail_policy.o \ + regular.o + +obj-$(CONFIG_REISER4_FS) += item/ +obj-$(CONFIG_REISER4_FS) += file/ +obj-$(CONFIG_REISER4_FS) += dir/ +obj-$(CONFIG_REISER4_FS) += node/ +obj-$(CONFIG_REISER4_FS) += compress/ +obj-$(CONFIG_REISER4_FS) += space/ +obj-$(CONFIG_REISER4_FS) += disk_format/ +obj-$(CONFIG_REISER4_FS) += security/ diff --git a/fs/reiser4/plugin/cluster.c b/fs/reiser4/plugin/cluster.c new file mode 100644 index 000000000000..f54b19ea6887 --- /dev/null +++ b/fs/reiser4/plugin/cluster.c @@ -0,0 +1,72 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Contains reiser4 cluster plugins (see + http://www.namesys.com/cryptcompress_design.html + "Concepts of clustering" for details). */ + +#include "plugin_header.h" +#include "plugin.h" +#include "../inode.h" + +static int change_cluster(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + assert("edward-1324", inode != NULL); + assert("edward-1325", plugin != NULL); + assert("edward-1326", is_reiser4_inode(inode)); + assert("edward-1327", plugin->h.type_id == REISER4_CLUSTER_PLUGIN_TYPE); + + /* Can't change the cluster plugin for already existent regular files */ + if (!plugin_of_group(inode_file_plugin(inode), REISER4_DIRECTORY_FILE)) + return RETERR(-EINVAL); + + /* If matches, nothing to change. */ + if (inode_hash_plugin(inode) != NULL && + inode_hash_plugin(inode)->h.id == plugin->h.id) + return 0; + + return aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_CLUSTER, plugin); +} + +static reiser4_plugin_ops cluster_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = &change_cluster +}; + +#define SUPPORT_CLUSTER(SHIFT, ID, LABEL, DESC) \ + [CLUSTER_ ## ID ## _ID] = { \ + .h = { \ + .type_id = REISER4_CLUSTER_PLUGIN_TYPE, \ + .id = CLUSTER_ ## ID ## _ID, \ + .pops = &cluster_plugin_ops, \ + .label = LABEL, \ + .desc = DESC, \ + .linkage = {NULL, NULL} \ + }, \ + .shift = SHIFT \ + } + +cluster_plugin cluster_plugins[LAST_CLUSTER_ID] = { + SUPPORT_CLUSTER(16, 64K, "64K", "Large"), + SUPPORT_CLUSTER(15, 32K, "32K", "Big"), + SUPPORT_CLUSTER(14, 16K, "16K", "Average"), + SUPPORT_CLUSTER(13, 8K, "8K", "Small"), + SUPPORT_CLUSTER(12, 4K, "4K", "Minimal") +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/cluster.h b/fs/reiser4/plugin/cluster.h new file mode 100644 index 000000000000..6bf931609fca --- /dev/null +++ b/fs/reiser4/plugin/cluster.h @@ -0,0 +1,410 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* This file contains size/offset translators, modulators + and other helper functions. */ + +#if !defined(__FS_REISER4_CLUSTER_H__) +#define __FS_REISER4_CLUSTER_H__ + +#include "../inode.h" + +static inline int inode_cluster_shift(struct inode *inode) +{ + assert("edward-92", inode != NULL); + assert("edward-93", reiser4_inode_data(inode) != NULL); + + return inode_cluster_plugin(inode)->shift; +} + +static inline unsigned cluster_nrpages_shift(struct inode *inode) +{ + return inode_cluster_shift(inode) - PAGE_SHIFT; +} + +/* cluster size in page units */ +static inline unsigned cluster_nrpages(struct inode *inode) +{ + return 1U << cluster_nrpages_shift(inode); +} + +static inline size_t inode_cluster_size(struct inode *inode) +{ + assert("edward-96", inode != NULL); + + return 1U << inode_cluster_shift(inode); +} + +static inline cloff_t pg_to_clust(pgoff_t idx, struct inode *inode) +{ + return idx >> cluster_nrpages_shift(inode); +} + +static inline pgoff_t clust_to_pg(cloff_t idx, struct inode *inode) +{ + return idx << cluster_nrpages_shift(inode); +} + +static inline pgoff_t pg_to_clust_to_pg(pgoff_t idx, struct inode *inode) +{ + return clust_to_pg(pg_to_clust(idx, inode), inode); +} + +static inline pgoff_t off_to_pg(loff_t off) +{ + return (off >> PAGE_SHIFT); +} + +static inline loff_t pg_to_off(pgoff_t idx) +{ + return ((loff_t) (idx) << PAGE_SHIFT); +} + +static inline cloff_t off_to_clust(loff_t off, struct inode *inode) +{ + return off >> inode_cluster_shift(inode); +} + +static inline loff_t clust_to_off(cloff_t idx, struct inode *inode) +{ + return (loff_t) idx << inode_cluster_shift(inode); +} + +static inline loff_t off_to_clust_to_off(loff_t off, struct inode *inode) +{ + return clust_to_off(off_to_clust(off, inode), inode); +} + +static inline pgoff_t off_to_clust_to_pg(loff_t off, struct inode *inode) +{ + return clust_to_pg(off_to_clust(off, inode), inode); +} + +static inline unsigned off_to_pgoff(loff_t off) +{ + return off & (PAGE_SIZE - 1); +} + +static inline unsigned off_to_cloff(loff_t off, struct inode *inode) +{ + return off & ((loff_t) (inode_cluster_size(inode)) - 1); +} + +static inline pgoff_t offset_in_clust(struct page *page) +{ + assert("edward-1488", page != NULL); + assert("edward-1489", page->mapping != NULL); + + return page_index(page) & ((cluster_nrpages(page->mapping->host)) - 1); +} + +static inline int first_page_in_cluster(struct page *page) +{ + return offset_in_clust(page) == 0; +} + +static inline int last_page_in_cluster(struct page *page) +{ + return offset_in_clust(page) == + cluster_nrpages(page->mapping->host) - 1; +} + +static inline unsigned +pg_to_off_to_cloff(unsigned long idx, struct inode *inode) +{ + return off_to_cloff(pg_to_off(idx), inode); +} + +/*********************** Size translators **************************/ + +/* Translate linear size. + * New units are (1 << @blk_shift) times larger, then old ones. + * In other words, calculate number of logical blocks, occupied + * by @count elements + */ +static inline unsigned long size_in_blocks(loff_t count, unsigned blkbits) +{ + return (count + (1UL << blkbits) - 1) >> blkbits; +} + +/* size in pages */ +static inline pgoff_t size_in_pages(loff_t size) +{ + return size_in_blocks(size, PAGE_SHIFT); +} + +/* size in logical clusters */ +static inline cloff_t size_in_lc(loff_t size, struct inode *inode) +{ + return size_in_blocks(size, inode_cluster_shift(inode)); +} + +/* size in pages to the size in page clusters */ +static inline cloff_t sp_to_spcl(pgoff_t size, struct inode *inode) +{ + return size_in_blocks(size, cluster_nrpages_shift(inode)); +} + +/*********************** Size modulators ***************************/ + +/* + Modulate linear size by nominated block size and offset. + + The "finite" function (which is zero almost everywhere). + How much is a height of the figure at a position @pos, + when trying to construct rectangle of height (1 << @blkbits), + and square @size. + + ****** + ******* + ******* + ******* + ----------> pos +*/ +static inline unsigned __mbb(loff_t size, unsigned long pos, int blkbits) +{ + unsigned end = size >> blkbits; + if (pos < end) + return 1U << blkbits; + if (unlikely(pos > end)) + return 0; + return size & ~(~0ull << blkbits); +} + +/* the same as above, but block size is page size */ +static inline unsigned __mbp(loff_t size, pgoff_t pos) +{ + return __mbb(size, pos, PAGE_SHIFT); +} + +/* number of file's bytes in the nominated logical cluster */ +static inline unsigned lbytes(cloff_t index, struct inode *inode) +{ + return __mbb(i_size_read(inode), index, inode_cluster_shift(inode)); +} + +/* number of file's bytes in the nominated page */ +static inline unsigned pbytes(pgoff_t index, struct inode *inode) +{ + return __mbp(i_size_read(inode), index); +} + +/** + * number of pages occuped by @win->count bytes starting from + * @win->off at logical cluster defined by @win. This is exactly + * a number of pages to be modified and dirtied in any cluster operation. + */ +static inline pgoff_t win_count_to_nrpages(struct reiser4_slide * win) +{ + return ((win->off + win->count + + (1UL << PAGE_SHIFT) - 1) >> PAGE_SHIFT) - + off_to_pg(win->off); +} + +/* return true, if logical cluster is not occupied by the file */ +static inline int new_logical_cluster(struct cluster_handle *clust, + struct inode *inode) +{ + return clust_to_off(clust->index, inode) >= i_size_read(inode); +} + +/* return true, if pages @p1 and @p2 are of the same page cluster */ +static inline int same_page_cluster(struct page *p1, struct page *p2) +{ + assert("edward-1490", p1 != NULL); + assert("edward-1491", p2 != NULL); + assert("edward-1492", p1->mapping != NULL); + assert("edward-1493", p2->mapping != NULL); + + return (pg_to_clust(page_index(p1), p1->mapping->host) == + pg_to_clust(page_index(p2), p2->mapping->host)); +} + +static inline int cluster_is_complete(struct cluster_handle *clust, + struct inode *inode) +{ + return clust->tc.lsize == inode_cluster_size(inode); +} + +static inline void reiser4_slide_init(struct reiser4_slide *win) +{ + assert("edward-1084", win != NULL); + memset(win, 0, sizeof *win); +} + +static inline tfm_action +cluster_get_tfm_act(struct tfm_cluster *tc) +{ + assert("edward-1356", tc != NULL); + return tc->act; +} + +static inline void +cluster_set_tfm_act(struct tfm_cluster *tc, tfm_action act) +{ + assert("edward-1356", tc != NULL); + tc->act = act; +} + +static inline void cluster_init_act(struct cluster_handle *clust, + tfm_action act, + struct reiser4_slide *window) +{ + assert("edward-84", clust != NULL); + memset(clust, 0, sizeof *clust); + cluster_set_tfm_act(&clust->tc, act); + clust->dstat = INVAL_DISK_CLUSTER; + clust->win = window; +} + +static inline void cluster_init_read(struct cluster_handle *clust, + struct reiser4_slide *window) +{ + cluster_init_act(clust, TFMA_READ, window); +} + +static inline void cluster_init_write(struct cluster_handle *clust, + struct reiser4_slide *window) +{ + cluster_init_act(clust, TFMA_WRITE, window); +} + +/* true if @p1 and @p2 are items of the same disk cluster */ +static inline int same_disk_cluster(const coord_t *p1, const coord_t *p2) +{ + /* drop this if you have other items to aggregate */ + assert("edward-1494", item_id_by_coord(p1) == CTAIL_ID); + + return item_plugin_by_coord(p1)->b.mergeable(p1, p2); +} + +static inline int dclust_get_extension_dsize(hint_t *hint) +{ + return hint->ext_coord.extension.ctail.dsize; +} + +static inline void dclust_set_extension_dsize(hint_t *hint, int dsize) +{ + hint->ext_coord.extension.ctail.dsize = dsize; +} + +static inline int dclust_get_extension_shift(hint_t *hint) +{ + return hint->ext_coord.extension.ctail.shift; +} + +static inline int dclust_get_extension_ncount(hint_t *hint) +{ + return hint->ext_coord.extension.ctail.ncount; +} + +static inline void dclust_inc_extension_ncount(hint_t *hint) +{ + hint->ext_coord.extension.ctail.ncount++; +} + +static inline void dclust_init_extension(hint_t *hint) +{ + memset(&hint->ext_coord.extension.ctail, 0, + sizeof(hint->ext_coord.extension.ctail)); +} + +static inline int hint_is_unprepped_dclust(hint_t *hint) +{ + assert("edward-1451", hint_is_valid(hint)); + return dclust_get_extension_shift(hint) == (int)UCTAIL_SHIFT; +} + +static inline void coord_set_between_clusters(coord_t *coord) +{ +#if REISER4_DEBUG + int result; + result = zload(coord->node); + assert("edward-1296", !result); +#endif + if (!coord_is_between_items(coord)) { + coord->between = AFTER_ITEM; + coord->unit_pos = 0; + } +#if REISER4_DEBUG + zrelse(coord->node); +#endif +} + +int reiser4_inflate_cluster(struct cluster_handle *, struct inode *); +int find_disk_cluster(struct cluster_handle *, struct inode *, int read, + znode_lock_mode mode); +int checkout_logical_cluster(struct cluster_handle *, jnode * , struct inode *); +int reiser4_deflate_cluster(struct cluster_handle *, struct inode *); +void truncate_complete_page_cluster(struct inode *inode, cloff_t start, + int even_cows); +void invalidate_hint_cluster(struct cluster_handle *clust); +int get_disk_cluster_locked(struct cluster_handle *clust, struct inode *inode, + znode_lock_mode lock_mode); +void reset_cluster_params(struct cluster_handle *clust); +int set_cluster_by_page(struct cluster_handle *clust, struct page *page, + int count); +int prepare_page_cluster(struct inode *inode, struct cluster_handle *clust, + rw_op rw); +void __put_page_cluster(int from, int count, struct page **pages, + struct inode *inode); +void put_page_cluster(struct cluster_handle *clust, + struct inode *inode, rw_op rw); +void put_cluster_handle(struct cluster_handle *clust); +int grab_tfm_stream(struct inode *inode, struct tfm_cluster *tc, + tfm_stream_id id); +int tfm_cluster_is_uptodate(struct tfm_cluster *tc); +void tfm_cluster_set_uptodate(struct tfm_cluster *tc); +void tfm_cluster_clr_uptodate(struct tfm_cluster *tc); + +/* move cluster handle to the target position + specified by the page of index @pgidx */ +static inline void move_cluster_forward(struct cluster_handle *clust, + struct inode *inode, + pgoff_t pgidx) +{ + assert("edward-1297", clust != NULL); + assert("edward-1298", inode != NULL); + + reset_cluster_params(clust); + if (clust->index_valid && + /* Hole in the indices. Hint became invalid and can not be + used by find_cluster_item() even if seal/node versions + will coincide */ + pg_to_clust(pgidx, inode) != clust->index + 1) { + reiser4_unset_hint(clust->hint); + invalidate_hint_cluster(clust); + } + clust->index = pg_to_clust(pgidx, inode); + clust->index_valid = 1; +} + +static inline int alloc_clust_pages(struct cluster_handle *clust, + struct inode *inode) +{ + assert("edward-791", clust != NULL); + assert("edward-792", inode != NULL); + clust->pages = + kmalloc(sizeof(*clust->pages) << inode_cluster_shift(inode), + reiser4_ctx_gfp_mask_get()); + if (!clust->pages) + return -ENOMEM; + return 0; +} + +static inline void free_clust_pages(struct cluster_handle *clust) +{ + kfree(clust->pages); +} + +#endif /* __FS_REISER4_CLUSTER_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/Makefile b/fs/reiser4/plugin/compress/Makefile new file mode 100644 index 000000000000..7fa4adb9621a --- /dev/null +++ b/fs/reiser4/plugin/compress/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_REISER4_FS) += compress_plugins.o + +compress_plugins-objs := \ + compress.o \ + compress_mode.o diff --git a/fs/reiser4/plugin/compress/compress.c b/fs/reiser4/plugin/compress/compress.c new file mode 100644 index 000000000000..ef568a099090 --- /dev/null +++ b/fs/reiser4/plugin/compress/compress.c @@ -0,0 +1,521 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* reiser4 compression transform plugins */ + +#include "../../debug.h" +#include "../../inode.h" +#include "../plugin.h" + +#include +#include +#include +#include +#include + +static int change_compression(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + assert("edward-1316", inode != NULL); + assert("edward-1317", plugin != NULL); + assert("edward-1318", is_reiser4_inode(inode)); + assert("edward-1319", + plugin->h.type_id == REISER4_COMPRESSION_PLUGIN_TYPE); + + /* cannot change compression plugin of already existing regular object */ + if (!plugin_of_group(inode_file_plugin(inode), REISER4_DIRECTORY_FILE)) + return RETERR(-EINVAL); + + /* If matches, nothing to change. */ + if (inode_hash_plugin(inode) != NULL && + inode_hash_plugin(inode)->h.id == plugin->h.id) + return 0; + + return aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_COMPRESSION, plugin); +} + +static reiser4_plugin_ops compression_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = &change_compression +}; + +/******************************************************************************/ +/* gzip1 compression */ +/******************************************************************************/ + +#define GZIP1_DEF_LEVEL Z_BEST_SPEED +#define GZIP1_DEF_WINBITS 15 +#define GZIP1_DEF_MEMLEVEL MAX_MEM_LEVEL + +static int gzip1_init(void) +{ + return 0; +} + +static int gzip1_overrun(unsigned src_len UNUSED_ARG) +{ + return 0; +} + +static coa_t gzip1_alloc(tfm_action act) +{ + coa_t coa = NULL; + int ret = 0; + switch (act) { + case TFMA_WRITE: /* compress */ + coa = reiser4_vmalloc(zlib_deflate_workspacesize(MAX_WBITS, + MAX_MEM_LEVEL)); + if (!coa) { + ret = -ENOMEM; + break; + } + break; + case TFMA_READ: /* decompress */ + coa = reiser4_vmalloc(zlib_inflate_workspacesize()); + if (!coa) { + ret = -ENOMEM; + break; + } + break; + default: + impossible("edward-767", "unknown tfm action"); + } + if (ret) + return ERR_PTR(ret); + return coa; +} + +static void gzip1_free(coa_t coa, tfm_action act) +{ + assert("edward-769", coa != NULL); + + switch (act) { + case TFMA_WRITE: /* compress */ + vfree(coa); + break; + case TFMA_READ: /* decompress */ + vfree(coa); + break; + default: + impossible("edward-770", "unknown tfm action"); + } + return; +} + +static int gzip1_min_size_deflate(void) +{ + return 64; +} + +static void +gzip1_compress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int ret = 0; + struct z_stream_s stream; + + assert("edward-842", coa != NULL); + assert("edward-875", src_len != 0); + + stream.workspace = coa; + ret = zlib_deflateInit2(&stream, GZIP1_DEF_LEVEL, Z_DEFLATED, + -GZIP1_DEF_WINBITS, GZIP1_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); + if (ret != Z_OK) { + warning("edward-771", "zlib_deflateInit2 returned %d\n", ret); + goto rollback; + } + ret = zlib_deflateReset(&stream); + if (ret != Z_OK) { + warning("edward-772", "zlib_deflateReset returned %d\n", ret); + goto rollback; + } + stream.next_in = src_first; + stream.avail_in = src_len; + stream.next_out = dst_first; + stream.avail_out = *dst_len; + + ret = zlib_deflate(&stream, Z_FINISH); + if (ret != Z_STREAM_END) { + if (ret != Z_OK) + warning("edward-773", + "zlib_deflate returned %d\n", ret); + goto rollback; + } + *dst_len = stream.total_out; + return; + rollback: + *dst_len = src_len; + return; +} + +static void +gzip1_decompress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int ret = 0; + struct z_stream_s stream; + + assert("edward-843", coa != NULL); + assert("edward-876", src_len != 0); + + stream.workspace = coa; + ret = zlib_inflateInit2(&stream, -GZIP1_DEF_WINBITS); + if (ret != Z_OK) { + warning("edward-774", "zlib_inflateInit2 returned %d\n", ret); + return; + } + ret = zlib_inflateReset(&stream); + if (ret != Z_OK) { + warning("edward-775", "zlib_inflateReset returned %d\n", ret); + return; + } + + stream.next_in = src_first; + stream.avail_in = src_len; + stream.next_out = dst_first; + stream.avail_out = *dst_len; + + ret = zlib_inflate(&stream, Z_SYNC_FLUSH); + /* + * Work around a bug in zlib, which sometimes wants to taste an extra + * byte when being used in the (undocumented) raw deflate mode. + * (From USAGI). + */ + if (ret == Z_OK && !stream.avail_in && stream.avail_out) { + u8 zerostuff = 0; + stream.next_in = &zerostuff; + stream.avail_in = 1; + ret = zlib_inflate(&stream, Z_FINISH); + } + if (ret != Z_STREAM_END) { + warning("edward-776", "zlib_inflate returned %d\n", ret); + return; + } + *dst_len = stream.total_out; + return; +} + +/******************************************************************************/ +/* lzo1 compression */ +/******************************************************************************/ + +static int lzo1_init(void) +{ + return 0; +} + +static int lzo1_overrun(unsigned in_len) +{ + return in_len / 16 + 64 + 3; +} + +static coa_t lzo1_alloc(tfm_action act) +{ + int ret = 0; + coa_t coa = NULL; + + switch (act) { + case TFMA_WRITE: /* compress */ + coa = reiser4_vmalloc(LZO1X_1_MEM_COMPRESS); + if (!coa) { + ret = -ENOMEM; + break; + } + case TFMA_READ: /* decompress */ + break; + default: + impossible("edward-877", "unknown tfm action"); + } + if (ret) + return ERR_PTR(ret); + return coa; +} + +static void lzo1_free(coa_t coa, tfm_action act) +{ + assert("edward-879", coa != NULL); + + switch (act) { + case TFMA_WRITE: /* compress */ + vfree(coa); + break; + case TFMA_READ: /* decompress */ + impossible("edward-1304", + "trying to free non-allocated workspace"); + default: + impossible("edward-880", "unknown tfm action"); + } + return; +} + +static int lzo1_min_size_deflate(void) +{ + return 256; +} + +static void +lzo1_compress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int result; + + assert("edward-846", coa != NULL); + assert("edward-847", src_len != 0); + + result = lzo1x_1_compress(src_first, src_len, dst_first, dst_len, coa); + if (unlikely(result != LZO_E_OK)) { + warning("edward-849", "lzo1x_1_compress failed\n"); + goto out; + } + if (*dst_len >= src_len) { + //warning("edward-850", "lzo1x_1_compress: incompressible data\n"); + goto out; + } + return; + out: + *dst_len = src_len; + return; +} + +static void +lzo1_decompress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int result; + + assert("edward-851", coa == NULL); + assert("edward-852", src_len != 0); + + result = lzo1x_decompress_safe(src_first, src_len, dst_first, dst_len); + if (result != LZO_E_OK) + warning("edward-853", "lzo1x_1_decompress failed\n"); + return; +} + +/******************************************************************************/ +/* zstd1 compression */ +/******************************************************************************/ + +typedef struct { + ZSTD_parameters params; + void* workspace; + ZSTD_CCtx* cctx; +} zstd1_coa_c; +typedef struct { + void* workspace; + ZSTD_DCtx* dctx; +} zstd1_coa_d; + +static int zstd1_init(void) +{ + return 0; +} + +static int zstd1_overrun(unsigned src_len UNUSED_ARG) +{ + return ZSTD_compressBound(src_len) - src_len; +} + +static coa_t zstd1_alloc(tfm_action act) +{ + int ret = 0; + size_t workspace_size; + coa_t coa = NULL; + + switch (act) { + case TFMA_WRITE: /* compress */ + coa = reiser4_vmalloc(sizeof(zstd1_coa_c)); + if (!coa) { + ret = -ENOMEM; + break; + } + /* ZSTD benchmark use level 1 as default. Max is 22. */ + ((zstd1_coa_c*)coa)->params = ZSTD_getParams(1, 0, 0); + workspace_size = ZSTD_CCtxWorkspaceBound(((zstd1_coa_c*)coa)->params.cParams); + ((zstd1_coa_c*)coa)->workspace = reiser4_vmalloc(workspace_size); + if (!(((zstd1_coa_c*)coa)->workspace)) { + ret = -ENOMEM; + vfree(coa); + break; + } + ((zstd1_coa_c*)coa)->cctx = ZSTD_initCCtx(((zstd1_coa_c*)coa)->workspace, workspace_size); + if (!(((zstd1_coa_c*)coa)->cctx)) { + ret = -ENOMEM; + vfree(((zstd1_coa_c*)coa)->workspace); + vfree(coa); + break; + } + break; + case TFMA_READ: /* decompress */ + coa = reiser4_vmalloc(sizeof(zstd1_coa_d)); + if (!coa) { + ret = -ENOMEM; + break; + } + workspace_size = ZSTD_DCtxWorkspaceBound(); + ((zstd1_coa_d*)coa)->workspace = reiser4_vmalloc(workspace_size); + if (!(((zstd1_coa_d*)coa)->workspace)) { + ret = -ENOMEM; + vfree(coa); + break; + } + ((zstd1_coa_d*)coa)->dctx = ZSTD_initDCtx(((zstd1_coa_d*)coa)->workspace, workspace_size); + if (!(((zstd1_coa_d*)coa)->dctx)) { + ret = -ENOMEM; + vfree(((zstd1_coa_d*)coa)->workspace); + vfree(coa); + break; + } + break; + default: + impossible("bsinot-1", + "trying to alloc workspace for unknown tfm action"); + } + if (ret) { + warning("bsinot-2", + "alloc workspace for zstd (tfm action = %d) failed\n", + act); + return ERR_PTR(ret); + } + return coa; +} + +static void zstd1_free(coa_t coa, tfm_action act) +{ + assert("bsinot-3", coa != NULL); + + switch (act) { + case TFMA_WRITE: /* compress */ + vfree(((zstd1_coa_c*)coa)->workspace); + vfree(coa); + //printk(KERN_WARNING "free comp memory -- %p\n", coa); + break; + case TFMA_READ: /* decompress */ + vfree(((zstd1_coa_d*)coa)->workspace); + vfree(coa); + //printk(KERN_WARNING "free decomp memory -- %p\n", coa); + break; + default: + impossible("bsinot-4", "unknown tfm action"); + } + return; +} + +static int zstd1_min_size_deflate(void) +{ + return 256; /* I'm not sure about the correct value, so took from LZO1 */ +} + +static void +zstd1_compress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + unsigned int result; + + assert("bsinot-5", coa != NULL); + assert("bsinot-6", src_len != 0); + result = ZSTD_compressCCtx(((zstd1_coa_c*)coa)->cctx, dst_first, *dst_len, src_first, src_len, ((zstd1_coa_c*)coa)->params); + if (ZSTD_isError(result)) { + warning("bsinot-7", "zstd1_compressCCtx failed\n"); + goto out; + } + *dst_len = result; + if (*dst_len >= src_len) { + //warning("bsinot-8", "zstd1_compressCCtx: incompressible data\n"); + goto out; + } + return; + out: + *dst_len = src_len; + return; +} + +static void +zstd1_decompress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + unsigned int result; + + assert("bsinot-9", coa != NULL); + assert("bsinot-10", src_len != 0); + + result = ZSTD_decompressDCtx(((zstd1_coa_d*)coa)->dctx, dst_first, *dst_len, src_first, src_len); + /* Same here. */ + if (ZSTD_isError(result)) + warning("bsinot-11", "zstd1_decompressDCtx failed\n"); + *dst_len = result; + return; +} + + +compression_plugin compression_plugins[LAST_COMPRESSION_ID] = { + [LZO1_COMPRESSION_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = LZO1_COMPRESSION_ID, + .pops = &compression_plugin_ops, + .label = "lzo1", + .desc = "lzo1 compression transform", + .linkage = {NULL, NULL} + }, + .init = lzo1_init, + .overrun = lzo1_overrun, + .alloc = lzo1_alloc, + .free = lzo1_free, + .min_size_deflate = lzo1_min_size_deflate, + .checksum = reiser4_adler32, + .compress = lzo1_compress, + .decompress = lzo1_decompress + }, + [GZIP1_COMPRESSION_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = GZIP1_COMPRESSION_ID, + .pops = &compression_plugin_ops, + .label = "gzip1", + .desc = "gzip1 compression transform", + .linkage = {NULL, NULL} + }, + .init = gzip1_init, + .overrun = gzip1_overrun, + .alloc = gzip1_alloc, + .free = gzip1_free, + .min_size_deflate = gzip1_min_size_deflate, + .checksum = reiser4_adler32, + .compress = gzip1_compress, + .decompress = gzip1_decompress + }, + [ZSTD1_COMPRESSION_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = ZSTD1_COMPRESSION_ID, + .pops = &compression_plugin_ops, + .label = "zstd1", + .desc = "zstd1 compression transform", + .linkage = {NULL, NULL} + }, + .init = zstd1_init, + .overrun = zstd1_overrun, + .alloc = zstd1_alloc, + .free = zstd1_free, + .min_size_deflate = zstd1_min_size_deflate, + .checksum = reiser4_adler32, + .compress = zstd1_compress, + .decompress = zstd1_decompress + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/compress.h b/fs/reiser4/plugin/compress/compress.h new file mode 100644 index 000000000000..235273897071 --- /dev/null +++ b/fs/reiser4/plugin/compress/compress.h @@ -0,0 +1,44 @@ +#if !defined( __FS_REISER4_COMPRESS_H__ ) +#define __FS_REISER4_COMPRESS_H__ + +#include +#include + +/* transform direction */ +typedef enum { + TFMA_READ, /* decrypt, decompress */ + TFMA_WRITE, /* encrypt, compress */ + TFMA_LAST +} tfm_action; + +/* supported compression algorithms */ +typedef enum { + LZO1_COMPRESSION_ID, + GZIP1_COMPRESSION_ID, + ZSTD1_COMPRESSION_ID, + LAST_COMPRESSION_ID, +} reiser4_compression_id; + +/* the same as pgoff, but units are page clusters */ +typedef unsigned long cloff_t; + +/* working data of a (de)compression algorithm */ +typedef void *coa_t; + +/* table for all supported (de)compression algorithms */ +typedef coa_t coa_set[LAST_COMPRESSION_ID][TFMA_LAST]; + +__u32 reiser4_adler32(char *data, __u32 len); + +#endif /* __FS_REISER4_COMPRESS_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/compress_mode.c b/fs/reiser4/plugin/compress/compress_mode.c new file mode 100644 index 000000000000..5e318caf995c --- /dev/null +++ b/fs/reiser4/plugin/compress/compress_mode.c @@ -0,0 +1,162 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* This file contains Reiser4 compression mode plugins. + + Compression mode plugin is a set of handlers called by compressor + at flush time and represent some heuristics including the ones + which are to avoid compression of incompressible data, see + http://www.namesys.com/cryptcompress_design.html for more details. +*/ +#include "../../inode.h" +#include "../plugin.h" + +static int should_deflate_none(struct inode * inode, cloff_t index) +{ + return 0; +} + +static int should_deflate_common(struct inode * inode, cloff_t index) +{ + return compression_is_on(cryptcompress_inode_data(inode)); +} + +static int discard_hook_ultim(struct inode *inode, cloff_t index) +{ + turn_off_compression(cryptcompress_inode_data(inode)); + return 0; +} + +static int discard_hook_lattd(struct inode *inode, cloff_t index) +{ + struct cryptcompress_info * info = cryptcompress_inode_data(inode); + + assert("edward-1462", + get_lattice_factor(info) >= MIN_LATTICE_FACTOR && + get_lattice_factor(info) <= MAX_LATTICE_FACTOR); + + turn_off_compression(info); + if (get_lattice_factor(info) < MAX_LATTICE_FACTOR) + set_lattice_factor(info, get_lattice_factor(info) << 1); + return 0; +} + +static int accept_hook_lattd(struct inode *inode, cloff_t index) +{ + turn_on_compression(cryptcompress_inode_data(inode)); + set_lattice_factor(cryptcompress_inode_data(inode), MIN_LATTICE_FACTOR); + return 0; +} + +/* Check on dynamic lattice, the adaptive compression modes which + defines the following behavior: + + Compression is on: try to compress everything and turn + it off, whenever cluster is incompressible. + + Compression is off: try to compress clusters of indexes + k * FACTOR (k = 0, 1, 2, ...) and turn it on, if some of + them is compressible. If incompressible, then increase FACTOR */ + +/* check if @index belongs to one-dimensional lattice + of sparce factor @factor */ +static int is_on_lattice(cloff_t index, int factor) +{ + return (factor ? index % factor == 0: index == 0); +} + +static int should_deflate_lattd(struct inode * inode, cloff_t index) +{ + return should_deflate_common(inode, index) || + is_on_lattice(index, + get_lattice_factor + (cryptcompress_inode_data(inode))); +} + +/* compression mode_plugins */ +compression_mode_plugin compression_mode_plugins[LAST_COMPRESSION_MODE_ID] = { + [NONE_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = NONE_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "none", + .desc = "Compress nothing", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_none, + .accept_hook = NULL, + .discard_hook = NULL + }, + /* Check-on-dynamic-lattice adaptive compression mode */ + [LATTD_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = LATTD_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "lattd", + .desc = "Check on dynamic lattice", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_lattd, + .accept_hook = accept_hook_lattd, + .discard_hook = discard_hook_lattd + }, + /* Check-ultimately compression mode: + Turn off compression forever as soon as we meet + incompressible data */ + [ULTIM_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = ULTIM_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "ultim", + .desc = "Check ultimately", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_common, + .accept_hook = NULL, + .discard_hook = discard_hook_ultim + }, + /* Force-to-compress-everything compression mode */ + [FORCE_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = FORCE_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "force", + .desc = "Force to compress everything", + .linkage = {NULL, NULL} + }, + .should_deflate = NULL, + .accept_hook = NULL, + .discard_hook = NULL + }, + /* Convert-to-extent compression mode. + In this mode items will be converted to extents and management + will be passed to (classic) unix file plugin as soon as ->write() + detects that the first complete logical cluster (of index #0) is + incompressible. */ + [CONVX_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = CONVX_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "conv", + .desc = "Convert to extent", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_common, + .accept_hook = NULL, + .discard_hook = NULL + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/lzoconf.h b/fs/reiser4/plugin/compress/lzoconf.h new file mode 100644 index 000000000000..cc0fa4db25b5 --- /dev/null +++ b/fs/reiser4/plugin/compress/lzoconf.h @@ -0,0 +1,216 @@ +/* lzoconf.h -- configuration for the LZO real-time data compression library + adopted for reiser4 compression transform plugin. + + This file is part of the LZO real-time data compression library + and not included in any proprietary licenses of reiser4. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/lzo/ + */ + +#include /* for UINT_MAX, ULONG_MAX - edward */ + +#ifndef __LZOCONF_H +#define __LZOCONF_H + +#define LZO_VERSION 0x1080 +#define LZO_VERSION_STRING "1.08" +#define LZO_VERSION_DATE "Jul 12 2002" + +/* internal Autoconf configuration file - only used when building LZO */ + +/*********************************************************************** +// LZO requires a conforming +************************************************************************/ + +#define CHAR_BIT 8 +#define USHRT_MAX 0xffff + +/* workaround a cpp bug under hpux 10.20 */ +#define LZO_0xffffffffL 4294967295ul + +/*********************************************************************** +// architecture defines +************************************************************************/ + +#if !defined(__LZO_i386) +# if defined(__i386__) || defined(__386__) || defined(_M_IX86) +# define __LZO_i386 +# endif +#endif + +/* memory checkers */ +#if !defined(__LZO_CHECKER) +# if defined(__BOUNDS_CHECKING_ON) +# define __LZO_CHECKER +# elif defined(__CHECKER__) +# define __LZO_CHECKER +# elif defined(__INSURE__) +# define __LZO_CHECKER +# elif defined(__PURIFY__) +# define __LZO_CHECKER +# endif +#endif + +/*********************************************************************** +// integral and pointer types +************************************************************************/ + +/* Integral types with 32 bits or more */ +#if !defined(LZO_UINT32_MAX) +# if (UINT_MAX >= LZO_0xffffffffL) + typedef unsigned int lzo_uint32; + typedef int lzo_int32; +# define LZO_UINT32_MAX UINT_MAX +# define LZO_INT32_MAX INT_MAX +# define LZO_INT32_MIN INT_MIN +# elif (ULONG_MAX >= LZO_0xffffffffL) + typedef unsigned long lzo_uint32; + typedef long lzo_int32; +# define LZO_UINT32_MAX ULONG_MAX +# define LZO_INT32_MAX LONG_MAX +# define LZO_INT32_MIN LONG_MIN +# else +# error "lzo_uint32" +# endif +#endif + +/* lzo_uint is used like size_t */ +#if !defined(LZO_UINT_MAX) +# if (UINT_MAX >= LZO_0xffffffffL) + typedef unsigned int lzo_uint; + typedef int lzo_int; +# define LZO_UINT_MAX UINT_MAX +# define LZO_INT_MAX INT_MAX +# define LZO_INT_MIN INT_MIN +# elif (ULONG_MAX >= LZO_0xffffffffL) + typedef unsigned long lzo_uint; + typedef long lzo_int; +# define LZO_UINT_MAX ULONG_MAX +# define LZO_INT_MAX LONG_MAX +# define LZO_INT_MIN LONG_MIN +# else +# error "lzo_uint" +# endif +#endif + + typedef int lzo_bool; + +/*********************************************************************** +// memory models +************************************************************************/ + +/* Memory model that allows to access memory at offsets of lzo_uint. */ +#if !defined(__LZO_MMODEL) +# if (LZO_UINT_MAX <= UINT_MAX) +# define __LZO_MMODEL +# else +# error "__LZO_MMODEL" +# endif +#endif + +/* no typedef here because of const-pointer issues */ +#define lzo_byte unsigned char __LZO_MMODEL +#define lzo_bytep unsigned char __LZO_MMODEL * +#define lzo_charp char __LZO_MMODEL * +#define lzo_voidp void __LZO_MMODEL * +#define lzo_shortp short __LZO_MMODEL * +#define lzo_ushortp unsigned short __LZO_MMODEL * +#define lzo_uint32p lzo_uint32 __LZO_MMODEL * +#define lzo_int32p lzo_int32 __LZO_MMODEL * +#define lzo_uintp lzo_uint __LZO_MMODEL * +#define lzo_intp lzo_int __LZO_MMODEL * +#define lzo_voidpp lzo_voidp __LZO_MMODEL * +#define lzo_bytepp lzo_bytep __LZO_MMODEL * + +#ifndef lzo_sizeof_dict_t +# define lzo_sizeof_dict_t sizeof(lzo_bytep) +#endif + +typedef int (*lzo_compress_t) (const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem); + + +/*********************************************************************** +// error codes and prototypes +************************************************************************/ + +/* Error codes for the compression/decompression functions. Negative + * values are errors, positive values will be used for special but + * normal events. + */ +#define LZO_E_OK 0 +#define LZO_E_ERROR (-1) +#define LZO_E_OUT_OF_MEMORY (-2) /* not used right now */ +#define LZO_E_NOT_COMPRESSIBLE (-3) /* not used right now */ +#define LZO_E_INPUT_OVERRUN (-4) +#define LZO_E_OUTPUT_OVERRUN (-5) +#define LZO_E_LOOKBEHIND_OVERRUN (-6) +#define LZO_E_EOF_NOT_FOUND (-7) +#define LZO_E_INPUT_NOT_CONSUMED (-8) + +/* lzo_init() should be the first function you call. + * Check the return code ! + * + * lzo_init() is a macro to allow checking that the library and the + * compiler's view of various types are consistent. + */ +#define lzo_init() __lzo_init2(LZO_VERSION,(int)sizeof(short),(int)sizeof(int),\ + (int)sizeof(long),(int)sizeof(lzo_uint32),(int)sizeof(lzo_uint),\ + (int)lzo_sizeof_dict_t,(int)sizeof(char *),(int)sizeof(lzo_voidp),\ + (int)sizeof(lzo_compress_t)) + extern int __lzo_init2(unsigned, int, int, int, int, int, int, + int, int, int); + +/* checksum functions */ +extern lzo_uint32 lzo_crc32(lzo_uint32 _c, const lzo_byte * _buf, + lzo_uint _len); +/* misc. */ + typedef union { + lzo_bytep p; + lzo_uint u; + } __lzo_pu_u; + typedef union { + lzo_bytep p; + lzo_uint32 u32; + } __lzo_pu32_u; + typedef union { + void *vp; + lzo_bytep bp; + lzo_uint32 u32; + long l; + } lzo_align_t; + +#define LZO_PTR_ALIGN_UP(_ptr,_size) \ + ((_ptr) + (lzo_uint) __lzo_align_gap((const lzo_voidp)(_ptr),(lzo_uint)(_size))) + +/* deprecated - only for backward compatibility */ +#define LZO_ALIGN(_ptr,_size) LZO_PTR_ALIGN_UP(_ptr,_size) + +#endif /* already included */ diff --git a/fs/reiser4/plugin/compress/minilzo.c b/fs/reiser4/plugin/compress/minilzo.c new file mode 100644 index 000000000000..2dba187d9845 --- /dev/null +++ b/fs/reiser4/plugin/compress/minilzo.c @@ -0,0 +1,1967 @@ +/* minilzo.c -- mini subset of the LZO real-time data compression library + adopted for reiser4 compression transform plugin. + + This file is part of the LZO real-time data compression library + and not included in any proprietary licenses of reiser4. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/lzo/ + */ + +/* + * NOTE: + * the full LZO package can be found at + * http://www.oberhumer.com/opensource/lzo/ + */ + +#include "../../debug.h" /* for reiser4 assert macro -edward */ + +#define __LZO_IN_MINILZO +#define LZO_BUILD + +#include "minilzo.h" + +#if !defined(MINILZO_VERSION) || (MINILZO_VERSION != 0x1080) +# error "version mismatch in miniLZO source files" +#endif + +#ifndef __LZO_CONF_H +#define __LZO_CONF_H + +# define BOUNDS_CHECKING_OFF_DURING(stmt) stmt +# define BOUNDS_CHECKING_OFF_IN_EXPR(expr) (expr) + +# define HAVE_MEMCMP +# define HAVE_MEMCPY +# define HAVE_MEMMOVE +# define HAVE_MEMSET + +#undef NDEBUG +#if !defined(LZO_DEBUG) +# define NDEBUG +#endif +#if defined(LZO_DEBUG) || !defined(NDEBUG) +# if !defined(NO_STDIO_H) +# include +# endif +#endif + +#if !defined(LZO_COMPILE_TIME_ASSERT) +# define LZO_COMPILE_TIME_ASSERT(expr) \ + { typedef int __lzo_compile_time_assert_fail[1 - 2 * !(expr)]; } +#endif + +#if !defined(LZO_UNUSED) +# if 1 +# define LZO_UNUSED(var) ((void)&var) +# elif 0 +# define LZO_UNUSED(var) { typedef int __lzo_unused[sizeof(var) ? 2 : 1]; } +# else +# define LZO_UNUSED(parm) (parm = parm) +# endif +#endif + +#if defined(NO_MEMCMP) +# undef HAVE_MEMCMP +#endif + +#if !defined(HAVE_MEMSET) +# undef memset +# define memset lzo_memset +#endif + +# define LZO_BYTE(x) ((unsigned char) ((x) & 0xff)) + +#define LZO_MAX(a,b) ((a) >= (b) ? (a) : (b)) +#define LZO_MIN(a,b) ((a) <= (b) ? (a) : (b)) +#define LZO_MAX3(a,b,c) ((a) >= (b) ? LZO_MAX(a,c) : LZO_MAX(b,c)) +#define LZO_MIN3(a,b,c) ((a) <= (b) ? LZO_MIN(a,c) : LZO_MIN(b,c)) + +#define lzo_sizeof(type) ((lzo_uint) (sizeof(type))) + +#define LZO_HIGH(array) ((lzo_uint) (sizeof(array)/sizeof(*(array)))) + +#define LZO_SIZE(bits) (1u << (bits)) +#define LZO_MASK(bits) (LZO_SIZE(bits) - 1) + +#define LZO_LSIZE(bits) (1ul << (bits)) +#define LZO_LMASK(bits) (LZO_LSIZE(bits) - 1) + +#define LZO_USIZE(bits) ((lzo_uint) 1 << (bits)) +#define LZO_UMASK(bits) (LZO_USIZE(bits) - 1) + +#define LZO_STYPE_MAX(b) (((1l << (8*(b)-2)) - 1l) + (1l << (8*(b)-2))) +#define LZO_UTYPE_MAX(b) (((1ul << (8*(b)-1)) - 1ul) + (1ul << (8*(b)-1))) + +#if !defined(SIZEOF_UNSIGNED) +# if (UINT_MAX == 0xffff) +# define SIZEOF_UNSIGNED 2 +# elif (UINT_MAX == LZO_0xffffffffL) +# define SIZEOF_UNSIGNED 4 +# elif (UINT_MAX >= LZO_0xffffffffL) +# define SIZEOF_UNSIGNED 8 +# else +# error "SIZEOF_UNSIGNED" +# endif +#endif + +#if !defined(SIZEOF_UNSIGNED_LONG) +# if (ULONG_MAX == LZO_0xffffffffL) +# define SIZEOF_UNSIGNED_LONG 4 +# elif (ULONG_MAX >= LZO_0xffffffffL) +# define SIZEOF_UNSIGNED_LONG 8 +# else +# error "SIZEOF_UNSIGNED_LONG" +# endif +#endif + +#if !defined(SIZEOF_SIZE_T) +# define SIZEOF_SIZE_T SIZEOF_UNSIGNED +#endif +#if !defined(SIZE_T_MAX) +# define SIZE_T_MAX LZO_UTYPE_MAX(SIZEOF_SIZE_T) +#endif + +#if 1 && defined(__LZO_i386) && (UINT_MAX == LZO_0xffffffffL) +# if !defined(LZO_UNALIGNED_OK_2) && (USHRT_MAX == 0xffff) +# define LZO_UNALIGNED_OK_2 +# endif +# if !defined(LZO_UNALIGNED_OK_4) && (LZO_UINT32_MAX == LZO_0xffffffffL) +# define LZO_UNALIGNED_OK_4 +# endif +#endif + +#if defined(LZO_UNALIGNED_OK_2) || defined(LZO_UNALIGNED_OK_4) +# if !defined(LZO_UNALIGNED_OK) +# define LZO_UNALIGNED_OK +# endif +#endif + +#if defined(__LZO_NO_UNALIGNED) +# undef LZO_UNALIGNED_OK +# undef LZO_UNALIGNED_OK_2 +# undef LZO_UNALIGNED_OK_4 +#endif + +#if defined(LZO_UNALIGNED_OK_2) && (USHRT_MAX != 0xffff) +# error "LZO_UNALIGNED_OK_2 must not be defined on this system" +#endif +#if defined(LZO_UNALIGNED_OK_4) && (LZO_UINT32_MAX != LZO_0xffffffffL) +# error "LZO_UNALIGNED_OK_4 must not be defined on this system" +#endif + +#if defined(__LZO_NO_ALIGNED) +# undef LZO_ALIGNED_OK_4 +#endif + +#if defined(LZO_ALIGNED_OK_4) && (LZO_UINT32_MAX != LZO_0xffffffffL) +# error "LZO_ALIGNED_OK_4 must not be defined on this system" +#endif + +#define LZO_LITTLE_ENDIAN 1234 +#define LZO_BIG_ENDIAN 4321 +#define LZO_PDP_ENDIAN 3412 + +#if !defined(LZO_BYTE_ORDER) +# if defined(MFX_BYTE_ORDER) +# define LZO_BYTE_ORDER MFX_BYTE_ORDER +# elif defined(__LZO_i386) +# define LZO_BYTE_ORDER LZO_LITTLE_ENDIAN +# elif defined(BYTE_ORDER) +# define LZO_BYTE_ORDER BYTE_ORDER +# elif defined(__BYTE_ORDER) +# define LZO_BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#if defined(LZO_BYTE_ORDER) +# if (LZO_BYTE_ORDER != LZO_LITTLE_ENDIAN) && \ + (LZO_BYTE_ORDER != LZO_BIG_ENDIAN) +# error "invalid LZO_BYTE_ORDER" +# endif +#endif + +#if defined(LZO_UNALIGNED_OK) && !defined(LZO_BYTE_ORDER) +# error "LZO_BYTE_ORDER is not defined" +#endif + +#define LZO_OPTIMIZE_GNUC_i386_IS_BUGGY + +#if defined(NDEBUG) && !defined(LZO_DEBUG) && !defined(__LZO_CHECKER) +# if defined(__GNUC__) && defined(__i386__) +# if !defined(LZO_OPTIMIZE_GNUC_i386_IS_BUGGY) +# define LZO_OPTIMIZE_GNUC_i386 +# endif +# endif +#endif + +extern const lzo_uint32 _lzo_crc32_table[256]; + +#define _LZO_STRINGIZE(x) #x +#define _LZO_MEXPAND(x) _LZO_STRINGIZE(x) + +#define _LZO_CONCAT2(a,b) a ## b +#define _LZO_CONCAT3(a,b,c) a ## b ## c +#define _LZO_CONCAT4(a,b,c,d) a ## b ## c ## d +#define _LZO_CONCAT5(a,b,c,d,e) a ## b ## c ## d ## e + +#define _LZO_ECONCAT2(a,b) _LZO_CONCAT2(a,b) +#define _LZO_ECONCAT3(a,b,c) _LZO_CONCAT3(a,b,c) +#define _LZO_ECONCAT4(a,b,c,d) _LZO_CONCAT4(a,b,c,d) +#define _LZO_ECONCAT5(a,b,c,d,e) _LZO_CONCAT5(a,b,c,d,e) + +#ifndef __LZO_PTR_H +#define __LZO_PTR_H + +#if !defined(lzo_ptrdiff_t) +# if (UINT_MAX >= LZO_0xffffffffL) +typedef ptrdiff_t lzo_ptrdiff_t; +# else +typedef long lzo_ptrdiff_t; +# endif +#endif + +#if !defined(__LZO_HAVE_PTR_T) +# if defined(lzo_ptr_t) +# define __LZO_HAVE_PTR_T +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(SIZEOF_CHAR_P) && defined(SIZEOF_UNSIGNED_LONG) +# if (SIZEOF_CHAR_P == SIZEOF_UNSIGNED_LONG) +typedef unsigned long lzo_ptr_t; +typedef long lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(SIZEOF_CHAR_P) && defined(SIZEOF_UNSIGNED) +# if (SIZEOF_CHAR_P == SIZEOF_UNSIGNED) +typedef unsigned int lzo_ptr_t; +typedef int lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(SIZEOF_CHAR_P) && defined(SIZEOF_UNSIGNED_SHORT) +# if (SIZEOF_CHAR_P == SIZEOF_UNSIGNED_SHORT) +typedef unsigned short lzo_ptr_t; +typedef short lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(LZO_HAVE_CONFIG_H) || defined(SIZEOF_CHAR_P) +# error "no suitable type for lzo_ptr_t" +# else +typedef unsigned long lzo_ptr_t; +typedef long lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +#endif + +#define PTR(a) ((lzo_ptr_t) (a)) +#define PTR_LINEAR(a) PTR(a) +#define PTR_ALIGNED_4(a) ((PTR_LINEAR(a) & 3) == 0) +#define PTR_ALIGNED_8(a) ((PTR_LINEAR(a) & 7) == 0) +#define PTR_ALIGNED2_4(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 3) == 0) +#define PTR_ALIGNED2_8(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 7) == 0) + +#define PTR_LT(a,b) (PTR(a) < PTR(b)) +#define PTR_GE(a,b) (PTR(a) >= PTR(b)) +#define PTR_DIFF(a,b) ((lzo_ptrdiff_t) (PTR(a) - PTR(b))) +#define pd(a,b) ((lzo_uint) ((a)-(b))) + +typedef union { + char a_char; + unsigned char a_uchar; + short a_short; + unsigned short a_ushort; + int a_int; + unsigned int a_uint; + long a_long; + unsigned long a_ulong; + lzo_int a_lzo_int; + lzo_uint a_lzo_uint; + lzo_int32 a_lzo_int32; + lzo_uint32 a_lzo_uint32; + ptrdiff_t a_ptrdiff_t; + lzo_ptrdiff_t a_lzo_ptrdiff_t; + lzo_ptr_t a_lzo_ptr_t; + lzo_voidp a_lzo_voidp; + void *a_void_p; + lzo_bytep a_lzo_bytep; + lzo_bytepp a_lzo_bytepp; + lzo_uintp a_lzo_uintp; + lzo_uint *a_lzo_uint_p; + lzo_uint32p a_lzo_uint32p; + lzo_uint32 *a_lzo_uint32_p; + unsigned char *a_uchar_p; + char *a_char_p; +} lzo_full_align_t; + +#endif +#define LZO_DETERMINISTIC +#define LZO_DICT_USE_PTR +# define lzo_dict_t const lzo_bytep +# define lzo_dict_p lzo_dict_t __LZO_MMODEL * +#if !defined(lzo_moff_t) +#define lzo_moff_t lzo_uint +#endif +#endif +static lzo_ptr_t __lzo_ptr_linear(const lzo_voidp ptr) +{ + return PTR_LINEAR(ptr); +} + +static unsigned __lzo_align_gap(const lzo_voidp ptr, lzo_uint size) +{ + lzo_ptr_t p, s, n; + + assert("lzo-01", size > 0); + + p = __lzo_ptr_linear(ptr); + s = (lzo_ptr_t) (size - 1); + n = (((p + s) / size) * size) - p; + + assert("lzo-02", (long)n >= 0); + assert("lzo-03", n <= s); + + return (unsigned)n; +} + +#ifndef __LZO_UTIL_H +#define __LZO_UTIL_H + +#ifndef __LZO_CONF_H +#endif + +#if 1 && defined(HAVE_MEMCPY) +#define MEMCPY8_DS(dest,src,len) \ + memcpy(dest,src,len); \ + dest += len; \ + src += len +#endif + +#if !defined(MEMCPY8_DS) + +#define MEMCPY8_DS(dest,src,len) \ + { register lzo_uint __l = (len) / 8; \ + do { \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + } while (--__l > 0); } + +#endif + +#define MEMCPY_DS(dest,src,len) \ + do *dest++ = *src++; \ + while (--len > 0) + +#define MEMMOVE_DS(dest,src,len) \ + do *dest++ = *src++; \ + while (--len > 0) + +#if (LZO_UINT_MAX <= SIZE_T_MAX) && defined(HAVE_MEMSET) + +#define BZERO8_PTR(s,l,n) memset((s),0,(lzo_uint)(l)*(n)) + +#else + +#define BZERO8_PTR(s,l,n) \ + lzo_memset((lzo_voidp)(s),0,(lzo_uint)(l)*(n)) + +#endif +#endif + +/* If you use the LZO library in a product, you *must* keep this + * copyright string in the executable of your product. + */ + +static const lzo_byte __lzo_copyright[] = +#if !defined(__LZO_IN_MINLZO) + LZO_VERSION_STRING; +#else + "\n\n\n" + "LZO real-time data compression library.\n" + "Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002 Markus Franz Xaver Johannes Oberhumer\n" + "\n" + "http://www.oberhumer.com/opensource/lzo/\n" + "\n" + "LZO version: v" LZO_VERSION_STRING ", " LZO_VERSION_DATE "\n" + "LZO build date: " __DATE__ " " __TIME__ "\n\n" + "LZO special compilation options:\n" +#ifdef __cplusplus + " __cplusplus\n" +#endif +#if defined(__PIC__) + " __PIC__\n" +#elif defined(__pic__) + " __pic__\n" +#endif +#if (UINT_MAX < LZO_0xffffffffL) + " 16BIT\n" +#endif +#if defined(__LZO_STRICT_16BIT) + " __LZO_STRICT_16BIT\n" +#endif +#if (UINT_MAX > LZO_0xffffffffL) + " UINT_MAX=" _LZO_MEXPAND(UINT_MAX) "\n" +#endif +#if (ULONG_MAX > LZO_0xffffffffL) + " ULONG_MAX=" _LZO_MEXPAND(ULONG_MAX) "\n" +#endif +#if defined(LZO_BYTE_ORDER) + " LZO_BYTE_ORDER=" _LZO_MEXPAND(LZO_BYTE_ORDER) "\n" +#endif +#if defined(LZO_UNALIGNED_OK_2) + " LZO_UNALIGNED_OK_2\n" +#endif +#if defined(LZO_UNALIGNED_OK_4) + " LZO_UNALIGNED_OK_4\n" +#endif +#if defined(LZO_ALIGNED_OK_4) + " LZO_ALIGNED_OK_4\n" +#endif +#if defined(LZO_DICT_USE_PTR) + " LZO_DICT_USE_PTR\n" +#endif +#if defined(__LZO_QUERY_COMPRESS) + " __LZO_QUERY_COMPRESS\n" +#endif +#if defined(__LZO_QUERY_DECOMPRESS) + " __LZO_QUERY_DECOMPRESS\n" +#endif +#if defined(__LZO_IN_MINILZO) + " __LZO_IN_MINILZO\n" +#endif + "\n\n" "$Id: LZO " LZO_VERSION_STRING " built " __DATE__ " " __TIME__ +#if defined(__GNUC__) && defined(__VERSION__) + " by gcc " __VERSION__ +#elif defined(__BORLANDC__) + " by Borland C " _LZO_MEXPAND(__BORLANDC__) +#elif defined(_MSC_VER) + " by Microsoft C " _LZO_MEXPAND(_MSC_VER) +#elif defined(__PUREC__) + " by Pure C " _LZO_MEXPAND(__PUREC__) +#elif defined(__SC__) + " by Symantec C " _LZO_MEXPAND(__SC__) +#elif defined(__TURBOC__) + " by Turbo C " _LZO_MEXPAND(__TURBOC__) +#elif defined(__WATCOMC__) + " by Watcom C " _LZO_MEXPAND(__WATCOMC__) +#endif + " $\n" + "$Copyright: LZO (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002 Markus Franz Xaver Johannes Oberhumer $\n"; +#endif + +#define LZO_BASE 65521u +#define LZO_NMAX 5552 + +#define LZO_DO1(buf,i) {s1 += buf[i]; s2 += s1;} +#define LZO_DO2(buf,i) LZO_DO1(buf,i); LZO_DO1(buf,i+1); +#define LZO_DO4(buf,i) LZO_DO2(buf,i); LZO_DO2(buf,i+2); +#define LZO_DO8(buf,i) LZO_DO4(buf,i); LZO_DO4(buf,i+4); +#define LZO_DO16(buf,i) LZO_DO8(buf,i); LZO_DO8(buf,i+8); + +# define IS_SIGNED(type) (((type) (-1)) < ((type) 0)) +# define IS_UNSIGNED(type) (((type) (-1)) > ((type) 0)) + +#define IS_POWER_OF_2(x) (((x) & ((x) - 1)) == 0) + +static lzo_bool schedule_insns_bug(void); +static lzo_bool strength_reduce_bug(int *); + +# define __lzo_assert(x) ((x) ? 1 : 0) + +#undef COMPILE_TIME_ASSERT + +# define COMPILE_TIME_ASSERT(expr) LZO_COMPILE_TIME_ASSERT(expr) + +static lzo_bool basic_integral_check(void) +{ + lzo_bool r = 1; + + COMPILE_TIME_ASSERT(CHAR_BIT == 8); + COMPILE_TIME_ASSERT(sizeof(char) == 1); + COMPILE_TIME_ASSERT(sizeof(short) >= 2); + COMPILE_TIME_ASSERT(sizeof(long) >= 4); + COMPILE_TIME_ASSERT(sizeof(int) >= sizeof(short)); + COMPILE_TIME_ASSERT(sizeof(long) >= sizeof(int)); + + COMPILE_TIME_ASSERT(sizeof(lzo_uint) == sizeof(lzo_int)); + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) == sizeof(lzo_int32)); + + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) >= 4); + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) >= sizeof(unsigned)); +#if defined(__LZO_STRICT_16BIT) + COMPILE_TIME_ASSERT(sizeof(lzo_uint) == 2); +#else + COMPILE_TIME_ASSERT(sizeof(lzo_uint) >= 4); + COMPILE_TIME_ASSERT(sizeof(lzo_uint) >= sizeof(unsigned)); +#endif + +#if (USHRT_MAX == 65535u) + COMPILE_TIME_ASSERT(sizeof(short) == 2); +#elif (USHRT_MAX == LZO_0xffffffffL) + COMPILE_TIME_ASSERT(sizeof(short) == 4); +#elif (USHRT_MAX >= LZO_0xffffffffL) + COMPILE_TIME_ASSERT(sizeof(short) > 4); +#endif + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned char)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned short)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned long)); + COMPILE_TIME_ASSERT(IS_SIGNED(short)); + COMPILE_TIME_ASSERT(IS_SIGNED(int)); + COMPILE_TIME_ASSERT(IS_SIGNED(long)); + + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_uint32)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_uint)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_int32)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_int)); + + COMPILE_TIME_ASSERT(INT_MAX == LZO_STYPE_MAX(sizeof(int))); + COMPILE_TIME_ASSERT(UINT_MAX == LZO_UTYPE_MAX(sizeof(unsigned))); + COMPILE_TIME_ASSERT(LONG_MAX == LZO_STYPE_MAX(sizeof(long))); + COMPILE_TIME_ASSERT(ULONG_MAX == LZO_UTYPE_MAX(sizeof(unsigned long))); + COMPILE_TIME_ASSERT(USHRT_MAX == LZO_UTYPE_MAX(sizeof(unsigned short))); + COMPILE_TIME_ASSERT(LZO_UINT32_MAX == + LZO_UTYPE_MAX(sizeof(lzo_uint32))); + COMPILE_TIME_ASSERT(LZO_UINT_MAX == LZO_UTYPE_MAX(sizeof(lzo_uint))); + + r &= __lzo_assert(LZO_BYTE(257) == 1); + + return r; +} + +static lzo_bool basic_ptr_check(void) +{ + lzo_bool r = 1; + + COMPILE_TIME_ASSERT(sizeof(char *) >= sizeof(int)); + COMPILE_TIME_ASSERT(sizeof(lzo_byte *) >= sizeof(char *)); + + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) == sizeof(lzo_byte *)); + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) == sizeof(lzo_voidpp)); + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) == sizeof(lzo_bytepp)); + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) >= sizeof(lzo_uint)); + + COMPILE_TIME_ASSERT(sizeof(lzo_ptr_t) == sizeof(lzo_voidp)); + COMPILE_TIME_ASSERT(sizeof(lzo_ptr_t) == sizeof(lzo_sptr_t)); + COMPILE_TIME_ASSERT(sizeof(lzo_ptr_t) >= sizeof(lzo_uint)); + + COMPILE_TIME_ASSERT(sizeof(lzo_ptrdiff_t) >= 4); + COMPILE_TIME_ASSERT(sizeof(lzo_ptrdiff_t) >= sizeof(ptrdiff_t)); + + COMPILE_TIME_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); + COMPILE_TIME_ASSERT(sizeof(lzo_ptrdiff_t) >= sizeof(lzo_uint)); + +#if defined(SIZEOF_CHAR_P) + COMPILE_TIME_ASSERT(SIZEOF_CHAR_P == sizeof(char *)); +#endif +#if defined(SIZEOF_PTRDIFF_T) + COMPILE_TIME_ASSERT(SIZEOF_PTRDIFF_T == sizeof(ptrdiff_t)); +#endif + + COMPILE_TIME_ASSERT(IS_SIGNED(ptrdiff_t)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(size_t)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_ptrdiff_t)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_sptr_t)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_ptr_t)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_moff_t)); + + return r; +} + +static lzo_bool ptr_check(void) +{ + lzo_bool r = 1; + int i; + char _wrkmem[10 * sizeof(lzo_byte *) + sizeof(lzo_full_align_t)]; + lzo_bytep wrkmem; + lzo_bytepp dict; + unsigned char x[4 * sizeof(lzo_full_align_t)]; + long d; + lzo_full_align_t a; + lzo_full_align_t u; + + for (i = 0; i < (int)sizeof(x); i++) + x[i] = LZO_BYTE(i); + + wrkmem = + LZO_PTR_ALIGN_UP((lzo_byte *) _wrkmem, sizeof(lzo_full_align_t)); + + u.a_lzo_bytep = wrkmem; + dict = u.a_lzo_bytepp; + + d = (long)((const lzo_bytep)dict - (const lzo_bytep)_wrkmem); + r &= __lzo_assert(d >= 0); + r &= __lzo_assert(d < (long)sizeof(lzo_full_align_t)); + + memset(&a, 0, sizeof(a)); + r &= __lzo_assert(a.a_lzo_voidp == NULL); + + memset(&a, 0xff, sizeof(a)); + r &= __lzo_assert(a.a_ushort == USHRT_MAX); + r &= __lzo_assert(a.a_uint == UINT_MAX); + r &= __lzo_assert(a.a_ulong == ULONG_MAX); + r &= __lzo_assert(a.a_lzo_uint == LZO_UINT_MAX); + r &= __lzo_assert(a.a_lzo_uint32 == LZO_UINT32_MAX); + + if (r == 1) { + for (i = 0; i < 8; i++) + r &= __lzo_assert((const lzo_voidp)(&dict[i]) == + (const + lzo_voidp)(&wrkmem[i * + sizeof(lzo_byte + *)])); + } + + memset(&a, 0, sizeof(a)); + r &= __lzo_assert(a.a_char_p == NULL); + r &= __lzo_assert(a.a_lzo_bytep == NULL); + r &= __lzo_assert(NULL == (void *)0); + if (r == 1) { + for (i = 0; i < 10; i++) + dict[i] = wrkmem; + BZERO8_PTR(dict + 1, sizeof(dict[0]), 8); + r &= __lzo_assert(dict[0] == wrkmem); + for (i = 1; i < 9; i++) + r &= __lzo_assert(dict[i] == NULL); + r &= __lzo_assert(dict[9] == wrkmem); + } + + if (r == 1) { + unsigned k = 1; + const unsigned n = (unsigned)sizeof(lzo_uint32); + lzo_byte *p0; + lzo_byte *p1; + + k += __lzo_align_gap(&x[k], n); + p0 = (lzo_bytep) & x[k]; +#if defined(PTR_LINEAR) + r &= __lzo_assert((PTR_LINEAR(p0) & (n - 1)) == 0); +#else + r &= __lzo_assert(n == 4); + r &= __lzo_assert(PTR_ALIGNED_4(p0)); +#endif + + r &= __lzo_assert(k >= 1); + p1 = (lzo_bytep) & x[1]; + r &= __lzo_assert(PTR_GE(p0, p1)); + + r &= __lzo_assert(k < 1 + n); + p1 = (lzo_bytep) & x[1 + n]; + r &= __lzo_assert(PTR_LT(p0, p1)); + + if (r == 1) { + lzo_uint32 v0, v1; + + u.a_uchar_p = &x[k]; + v0 = *u.a_lzo_uint32_p; + u.a_uchar_p = &x[k + n]; + v1 = *u.a_lzo_uint32_p; + + r &= __lzo_assert(v0 > 0); + r &= __lzo_assert(v1 > 0); + } + } + + return r; +} + +static int _lzo_config_check(void) +{ + lzo_bool r = 1; + int i; + union { + lzo_uint32 a; + unsigned short b; + lzo_uint32 aa[4]; + unsigned char x[4 * sizeof(lzo_full_align_t)]; + } u; + + COMPILE_TIME_ASSERT((int)((unsigned char)((signed char)-1)) == 255); + COMPILE_TIME_ASSERT((((unsigned char)128) << (int)(8 * sizeof(int) - 8)) + < 0); + + r &= basic_integral_check(); + r &= basic_ptr_check(); + if (r != 1) + return LZO_E_ERROR; + + u.a = 0; + u.b = 0; + for (i = 0; i < (int)sizeof(u.x); i++) + u.x[i] = LZO_BYTE(i); + +#if defined(LZO_BYTE_ORDER) + if (r == 1) { +# if (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + lzo_uint32 a = (lzo_uint32) (u.a & LZO_0xffffffffL); + unsigned short b = (unsigned short)(u.b & 0xffff); + r &= __lzo_assert(a == 0x03020100L); + r &= __lzo_assert(b == 0x0100); +# elif (LZO_BYTE_ORDER == LZO_BIG_ENDIAN) + lzo_uint32 a = u.a >> (8 * sizeof(u.a) - 32); + unsigned short b = u.b >> (8 * sizeof(u.b) - 16); + r &= __lzo_assert(a == 0x00010203L); + r &= __lzo_assert(b == 0x0001); +# else +# error "invalid LZO_BYTE_ORDER" +# endif + } +#endif + +#if defined(LZO_UNALIGNED_OK_2) + COMPILE_TIME_ASSERT(sizeof(short) == 2); + if (r == 1) { + unsigned short b[4]; + + for (i = 0; i < 4; i++) + b[i] = *(const unsigned short *)&u.x[i]; + +# if (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + r &= __lzo_assert(b[0] == 0x0100); + r &= __lzo_assert(b[1] == 0x0201); + r &= __lzo_assert(b[2] == 0x0302); + r &= __lzo_assert(b[3] == 0x0403); +# elif (LZO_BYTE_ORDER == LZO_BIG_ENDIAN) + r &= __lzo_assert(b[0] == 0x0001); + r &= __lzo_assert(b[1] == 0x0102); + r &= __lzo_assert(b[2] == 0x0203); + r &= __lzo_assert(b[3] == 0x0304); +# endif + } +#endif + +#if defined(LZO_UNALIGNED_OK_4) + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) == 4); + if (r == 1) { + lzo_uint32 a[4]; + + for (i = 0; i < 4; i++) + a[i] = *(const lzo_uint32 *)&u.x[i]; + +# if (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + r &= __lzo_assert(a[0] == 0x03020100L); + r &= __lzo_assert(a[1] == 0x04030201L); + r &= __lzo_assert(a[2] == 0x05040302L); + r &= __lzo_assert(a[3] == 0x06050403L); +# elif (LZO_BYTE_ORDER == LZO_BIG_ENDIAN) + r &= __lzo_assert(a[0] == 0x00010203L); + r &= __lzo_assert(a[1] == 0x01020304L); + r &= __lzo_assert(a[2] == 0x02030405L); + r &= __lzo_assert(a[3] == 0x03040506L); +# endif + } +#endif + +#if defined(LZO_ALIGNED_OK_4) + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) == 4); +#endif + + COMPILE_TIME_ASSERT(lzo_sizeof_dict_t == sizeof(lzo_dict_t)); + + if (r == 1) { + r &= __lzo_assert(!schedule_insns_bug()); + } + + if (r == 1) { + static int x[3]; + static unsigned xn = 3; + register unsigned j; + + for (j = 0; j < xn; j++) + x[j] = (int)j - 3; + r &= __lzo_assert(!strength_reduce_bug(x)); + } + + if (r == 1) { + r &= ptr_check(); + } + + return r == 1 ? LZO_E_OK : LZO_E_ERROR; +} + +static lzo_bool schedule_insns_bug(void) +{ +#if defined(__LZO_CHECKER) + return 0; +#else + const int clone[] = { 1, 2, 0 }; + const int *q; + q = clone; + return (*q) ? 0 : 1; +#endif +} + +static lzo_bool strength_reduce_bug(int *x) +{ + return x[0] != -3 || x[1] != -2 || x[2] != -1; +} + +#undef COMPILE_TIME_ASSERT + +int __lzo_init2(unsigned v, int s1, int s2, int s3, int s4, int s5, + int s6, int s7, int s8, int s9) +{ + int r; + + if (v == 0) + return LZO_E_ERROR; + + r = (s1 == -1 || s1 == (int)sizeof(short)) && + (s2 == -1 || s2 == (int)sizeof(int)) && + (s3 == -1 || s3 == (int)sizeof(long)) && + (s4 == -1 || s4 == (int)sizeof(lzo_uint32)) && + (s5 == -1 || s5 == (int)sizeof(lzo_uint)) && + (s6 == -1 || s6 == (int)lzo_sizeof_dict_t) && + (s7 == -1 || s7 == (int)sizeof(char *)) && + (s8 == -1 || s8 == (int)sizeof(lzo_voidp)) && + (s9 == -1 || s9 == (int)sizeof(lzo_compress_t)); + if (!r) + return LZO_E_ERROR; + + r = _lzo_config_check(); + if (r != LZO_E_OK) + return r; + + return r; +} + +#define do_compress _lzo1x_1_do_compress + +#define LZO_NEED_DICT_H +#define D_BITS 14 +#define D_INDEX1(d,p) d = DM((0x21*DX3(p,5,5,6)) >> 5) +#define D_INDEX2(d,p) d = (d & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f) + +#ifndef __LZO_CONFIG1X_H +#define __LZO_CONFIG1X_H + +#if !defined(LZO1X) && !defined(LZO1Y) && !defined(LZO1Z) +# define LZO1X +#endif + +#define LZO_EOF_CODE +#undef LZO_DETERMINISTIC + +#define M1_MAX_OFFSET 0x0400 +#ifndef M2_MAX_OFFSET +#define M2_MAX_OFFSET 0x0800 +#endif +#define M3_MAX_OFFSET 0x4000 +#define M4_MAX_OFFSET 0xbfff + +#define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET) + +#define M1_MIN_LEN 2 +#define M1_MAX_LEN 2 +#define M2_MIN_LEN 3 +#ifndef M2_MAX_LEN +#define M2_MAX_LEN 8 +#endif +#define M3_MIN_LEN 3 +#define M3_MAX_LEN 33 +#define M4_MIN_LEN 3 +#define M4_MAX_LEN 9 + +#define M1_MARKER 0 +#define M2_MARKER 64 +#define M3_MARKER 32 +#define M4_MARKER 16 + +#ifndef MIN_LOOKAHEAD +#define MIN_LOOKAHEAD (M2_MAX_LEN + 1) +#endif + +#if defined(LZO_NEED_DICT_H) + +#ifndef LZO_HASH +#define LZO_HASH LZO_HASH_LZO_INCREMENTAL_B +#endif +#define DL_MIN_LEN M2_MIN_LEN + +#ifndef __LZO_DICT_H +#define __LZO_DICT_H + +#if !defined(D_BITS) && defined(DBITS) +# define D_BITS DBITS +#endif +#if !defined(D_BITS) +# error "D_BITS is not defined" +#endif +#if (D_BITS < 16) +# define D_SIZE LZO_SIZE(D_BITS) +# define D_MASK LZO_MASK(D_BITS) +#else +# define D_SIZE LZO_USIZE(D_BITS) +# define D_MASK LZO_UMASK(D_BITS) +#endif +#define D_HIGH ((D_MASK >> 1) + 1) + +#if !defined(DD_BITS) +# define DD_BITS 0 +#endif +#define DD_SIZE LZO_SIZE(DD_BITS) +#define DD_MASK LZO_MASK(DD_BITS) + +#if !defined(DL_BITS) +# define DL_BITS (D_BITS - DD_BITS) +#endif +#if (DL_BITS < 16) +# define DL_SIZE LZO_SIZE(DL_BITS) +# define DL_MASK LZO_MASK(DL_BITS) +#else +# define DL_SIZE LZO_USIZE(DL_BITS) +# define DL_MASK LZO_UMASK(DL_BITS) +#endif + +#if (D_BITS != DL_BITS + DD_BITS) +# error "D_BITS does not match" +#endif +#if (D_BITS < 8 || D_BITS > 18) +# error "invalid D_BITS" +#endif +#if (DL_BITS < 8 || DL_BITS > 20) +# error "invalid DL_BITS" +#endif +#if (DD_BITS < 0 || DD_BITS > 6) +# error "invalid DD_BITS" +#endif + +#if !defined(DL_MIN_LEN) +# define DL_MIN_LEN 3 +#endif +#if !defined(DL_SHIFT) +# define DL_SHIFT ((DL_BITS + (DL_MIN_LEN - 1)) / DL_MIN_LEN) +#endif + +#define LZO_HASH_GZIP 1 +#define LZO_HASH_GZIP_INCREMENTAL 2 +#define LZO_HASH_LZO_INCREMENTAL_A 3 +#define LZO_HASH_LZO_INCREMENTAL_B 4 + +#if !defined(LZO_HASH) +# error "choose a hashing strategy" +#endif + +#if (DL_MIN_LEN == 3) +# define _DV2_A(p,shift1,shift2) \ + (((( (lzo_uint32)((p)[0]) << shift1) ^ (p)[1]) << shift2) ^ (p)[2]) +# define _DV2_B(p,shift1,shift2) \ + (((( (lzo_uint32)((p)[2]) << shift1) ^ (p)[1]) << shift2) ^ (p)[0]) +# define _DV3_B(p,shift1,shift2,shift3) \ + ((_DV2_B((p)+1,shift1,shift2) << (shift3)) ^ (p)[0]) +#elif (DL_MIN_LEN == 2) +# define _DV2_A(p,shift1,shift2) \ + (( (lzo_uint32)(p[0]) << shift1) ^ p[1]) +# define _DV2_B(p,shift1,shift2) \ + (( (lzo_uint32)(p[1]) << shift1) ^ p[2]) +#else +# error "invalid DL_MIN_LEN" +#endif +#define _DV_A(p,shift) _DV2_A(p,shift,shift) +#define _DV_B(p,shift) _DV2_B(p,shift,shift) +#define DA2(p,s1,s2) \ + (((((lzo_uint32)((p)[2]) << (s2)) + (p)[1]) << (s1)) + (p)[0]) +#define DS2(p,s1,s2) \ + (((((lzo_uint32)((p)[2]) << (s2)) - (p)[1]) << (s1)) - (p)[0]) +#define DX2(p,s1,s2) \ + (((((lzo_uint32)((p)[2]) << (s2)) ^ (p)[1]) << (s1)) ^ (p)[0]) +#define DA3(p,s1,s2,s3) ((DA2((p)+1,s2,s3) << (s1)) + (p)[0]) +#define DS3(p,s1,s2,s3) ((DS2((p)+1,s2,s3) << (s1)) - (p)[0]) +#define DX3(p,s1,s2,s3) ((DX2((p)+1,s2,s3) << (s1)) ^ (p)[0]) +#define DMS(v,s) ((lzo_uint) (((v) & (D_MASK >> (s))) << (s))) +#define DM(v) DMS(v,0) + +#if (LZO_HASH == LZO_HASH_GZIP) +# define _DINDEX(dv,p) (_DV_A((p),DL_SHIFT)) + +#elif (LZO_HASH == LZO_HASH_GZIP_INCREMENTAL) +# define __LZO_HASH_INCREMENTAL +# define DVAL_FIRST(dv,p) dv = _DV_A((p),DL_SHIFT) +# define DVAL_NEXT(dv,p) dv = (((dv) << DL_SHIFT) ^ p[2]) +# define _DINDEX(dv,p) (dv) +# define DVAL_LOOKAHEAD DL_MIN_LEN + +#elif (LZO_HASH == LZO_HASH_LZO_INCREMENTAL_A) +# define __LZO_HASH_INCREMENTAL +# define DVAL_FIRST(dv,p) dv = _DV_A((p),5) +# define DVAL_NEXT(dv,p) \ + dv ^= (lzo_uint32)(p[-1]) << (2*5); dv = (((dv) << 5) ^ p[2]) +# define _DINDEX(dv,p) ((0x9f5f * (dv)) >> 5) +# define DVAL_LOOKAHEAD DL_MIN_LEN + +#elif (LZO_HASH == LZO_HASH_LZO_INCREMENTAL_B) +# define __LZO_HASH_INCREMENTAL +# define DVAL_FIRST(dv,p) dv = _DV_B((p),5) +# define DVAL_NEXT(dv,p) \ + dv ^= p[-1]; dv = (((dv) >> 5) ^ ((lzo_uint32)(p[2]) << (2*5))) +# define _DINDEX(dv,p) ((0x9f5f * (dv)) >> 5) +# define DVAL_LOOKAHEAD DL_MIN_LEN + +#else +# error "choose a hashing strategy" +#endif + +#ifndef DINDEX +#define DINDEX(dv,p) ((lzo_uint)((_DINDEX(dv,p)) & DL_MASK) << DD_BITS) +#endif +#if !defined(DINDEX1) && defined(D_INDEX1) +#define DINDEX1 D_INDEX1 +#endif +#if !defined(DINDEX2) && defined(D_INDEX2) +#define DINDEX2 D_INDEX2 +#endif + +#if !defined(__LZO_HASH_INCREMENTAL) +# define DVAL_FIRST(dv,p) ((void) 0) +# define DVAL_NEXT(dv,p) ((void) 0) +# define DVAL_LOOKAHEAD 0 +#endif + +#if !defined(DVAL_ASSERT) +#if defined(__LZO_HASH_INCREMENTAL) && !defined(NDEBUG) +static void DVAL_ASSERT(lzo_uint32 dv, const lzo_byte * p) +{ + lzo_uint32 df; + DVAL_FIRST(df, (p)); + assert(DINDEX(dv, p) == DINDEX(df, p)); +} +#else +# define DVAL_ASSERT(dv,p) ((void) 0) +#endif +#endif + +# define DENTRY(p,in) (p) +# define GINDEX(m_pos,m_off,dict,dindex,in) m_pos = dict[dindex] + +#if (DD_BITS == 0) + +# define UPDATE_D(dict,drun,dv,p,in) dict[ DINDEX(dv,p) ] = DENTRY(p,in) +# define UPDATE_I(dict,drun,index,p,in) dict[index] = DENTRY(p,in) +# define UPDATE_P(ptr,drun,p,in) (ptr)[0] = DENTRY(p,in) + +#else + +# define UPDATE_D(dict,drun,dv,p,in) \ + dict[ DINDEX(dv,p) + drun++ ] = DENTRY(p,in); drun &= DD_MASK +# define UPDATE_I(dict,drun,index,p,in) \ + dict[ (index) + drun++ ] = DENTRY(p,in); drun &= DD_MASK +# define UPDATE_P(ptr,drun,p,in) \ + (ptr) [ drun++ ] = DENTRY(p,in); drun &= DD_MASK + +#endif + +#define LZO_CHECK_MPOS_DET(m_pos,m_off,in,ip,max_offset) \ + (m_pos == NULL || (m_off = (lzo_moff_t) (ip - m_pos)) > max_offset) + +#define LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,max_offset) \ + (BOUNDS_CHECKING_OFF_IN_EXPR( \ + (PTR_LT(m_pos,in) || \ + (m_off = (lzo_moff_t) PTR_DIFF(ip,m_pos)) <= 0 || \ + m_off > max_offset) )) + +#if defined(LZO_DETERMINISTIC) +# define LZO_CHECK_MPOS LZO_CHECK_MPOS_DET +#else +# define LZO_CHECK_MPOS LZO_CHECK_MPOS_NON_DET +#endif +#endif +#endif +#endif +#define DO_COMPRESS lzo1x_1_compress +static +lzo_uint do_compress(const lzo_byte * in, lzo_uint in_len, + lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem) +{ + register const lzo_byte *ip; + lzo_byte *op; + const lzo_byte *const in_end = in + in_len; + const lzo_byte *const ip_end = in + in_len - M2_MAX_LEN - 5; + const lzo_byte *ii; + lzo_dict_p const dict = (lzo_dict_p) wrkmem; + + op = out; + ip = in; + ii = ip; + + ip += 4; + for (;;) { + register const lzo_byte *m_pos; + + lzo_moff_t m_off; + lzo_uint m_len; + lzo_uint dindex; + + DINDEX1(dindex, ip); + GINDEX(m_pos, m_off, dict, dindex, in); + if (LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, M4_MAX_OFFSET)) + goto literal; +#if 1 + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + DINDEX2(dindex, ip); +#endif + GINDEX(m_pos, m_off, dict, dindex, in); + if (LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, M4_MAX_OFFSET)) + goto literal; + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + goto literal; + + try_match: +#if 1 && defined(LZO_UNALIGNED_OK_2) + if (*(const lzo_ushortp)m_pos != *(const lzo_ushortp)ip) { +#else + if (m_pos[0] != ip[0] || m_pos[1] != ip[1]) { +#endif + ; + } else { + if (m_pos[2] == ip[2]) { + goto match; + } else { + ; + } + } + + literal: + UPDATE_I(dict, 0, dindex, ip, in); + ++ip; + if (ip >= ip_end) + break; + continue; + + match: + UPDATE_I(dict, 0, dindex, ip, in); + if (pd(ip, ii) > 0) { + register lzo_uint t = pd(ip, ii); + + if (t <= 3) { + assert("lzo-04", op - 2 > out); + op[-2] |= LZO_BYTE(t); + } else if (t <= 18) + *op++ = LZO_BYTE(t - 3); + else { + register lzo_uint tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert("lzo-05", tt > 0); + *op++ = LZO_BYTE(tt); + } + do + *op++ = *ii++; + while (--t > 0); + } + + assert("lzo-06", ii == ip); + ip += 3; + if (m_pos[3] != *ip++ || m_pos[4] != *ip++ || m_pos[5] != *ip++ + || m_pos[6] != *ip++ || m_pos[7] != *ip++ + || m_pos[8] != *ip++ +#ifdef LZO1Y + || m_pos[9] != *ip++ || m_pos[10] != *ip++ + || m_pos[11] != *ip++ || m_pos[12] != *ip++ + || m_pos[13] != *ip++ || m_pos[14] != *ip++ +#endif + ) { + --ip; + m_len = ip - ii; + assert("lzo-07", m_len >= 3); + assert("lzo-08", m_len <= M2_MAX_LEN); + + if (m_off <= M2_MAX_OFFSET) { + m_off -= 1; +#if defined(LZO1X) + *op++ = + LZO_BYTE(((m_len - + 1) << 5) | ((m_off & 7) << 2)); + *op++ = LZO_BYTE(m_off >> 3); +#elif defined(LZO1Y) + *op++ = + LZO_BYTE(((m_len + + 1) << 4) | ((m_off & 3) << 2)); + *op++ = LZO_BYTE(m_off >> 2); +#endif + } else if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + *op++ = LZO_BYTE(M3_MARKER | (m_len - 2)); + goto m3_m4_offset; + } else +#if defined(LZO1X) + { + m_off -= 0x4000; + assert("lzo-09", m_off > 0); + assert("lzo-10", m_off <= 0x7fff); + *op++ = LZO_BYTE(M4_MARKER | + ((m_off & 0x4000) >> 11) | + (m_len - 2)); + goto m3_m4_offset; + } +#elif defined(LZO1Y) + goto m4_match; +#endif + } else { + { + const lzo_byte *end = in_end; + const lzo_byte *m = m_pos + M2_MAX_LEN + 1; + while (ip < end && *m == *ip) + m++, ip++; + m_len = (ip - ii); + } + assert("lzo-11", m_len > M2_MAX_LEN); + + if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + if (m_len <= 33) + *op++ = + LZO_BYTE(M3_MARKER | (m_len - 2)); + else { + m_len -= 33; + *op++ = M3_MARKER | 0; + goto m3_m4_len; + } + } else { +#if defined(LZO1Y) + m4_match: +#endif + m_off -= 0x4000; + assert("lzo-12", m_off > 0); + assert("lzo-13", m_off <= 0x7fff); + if (m_len <= M4_MAX_LEN) + *op++ = LZO_BYTE(M4_MARKER | + ((m_off & 0x4000) >> + 11) | (m_len - 2)); + else { + m_len -= M4_MAX_LEN; + *op++ = + LZO_BYTE(M4_MARKER | + ((m_off & 0x4000) >> 11)); + m3_m4_len: + while (m_len > 255) { + m_len -= 255; + *op++ = 0; + } + assert("lzo-14", m_len > 0); + *op++ = LZO_BYTE(m_len); + } + } + + m3_m4_offset: + *op++ = LZO_BYTE((m_off & 63) << 2); + *op++ = LZO_BYTE(m_off >> 6); + } + + ii = ip; + if (ip >= ip_end) + break; + } + + *out_len = op - out; + return pd(in_end, ii); +} + +int DO_COMPRESS(const lzo_byte * in, lzo_uint in_len, + lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem) +{ + lzo_byte *op = out; + lzo_uint t; + +#if defined(__LZO_QUERY_COMPRESS) + if (__LZO_IS_COMPRESS_QUERY(in, in_len, out, out_len, wrkmem)) + return __LZO_QUERY_COMPRESS(in, in_len, out, out_len, wrkmem, + D_SIZE, lzo_sizeof(lzo_dict_t)); +#endif + + if (in_len <= M2_MAX_LEN + 5) + t = in_len; + else { + t = do_compress(in, in_len, op, out_len, wrkmem); + op += *out_len; + } + + if (t > 0) { + const lzo_byte *ii = in + in_len - t; + + if (op == out && t <= 238) + *op++ = LZO_BYTE(17 + t); + else if (t <= 3) + op[-2] |= LZO_BYTE(t); + else if (t <= 18) + *op++ = LZO_BYTE(t - 3); + else { + lzo_uint tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert("lzo-15", tt > 0); + *op++ = LZO_BYTE(tt); + } + do + *op++ = *ii++; + while (--t > 0); + } + + *op++ = M4_MARKER | 1; + *op++ = 0; + *op++ = 0; + + *out_len = op - out; + return LZO_E_OK; +} + +#undef do_compress +#undef DO_COMPRESS +#undef LZO_HASH + +#undef LZO_TEST_DECOMPRESS_OVERRUN +#undef LZO_TEST_DECOMPRESS_OVERRUN_INPUT +#undef LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT +#undef LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND +#undef DO_DECOMPRESS +#define DO_DECOMPRESS lzo1x_decompress + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN) +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_INPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND +# endif +#endif + +#undef TEST_IP +#undef TEST_OP +#undef TEST_LOOKBEHIND +#undef NEED_IP +#undef NEED_OP +#undef HAVE_TEST_IP +#undef HAVE_TEST_OP +#undef HAVE_NEED_IP +#undef HAVE_NEED_OP +#undef HAVE_ANY_IP +#undef HAVE_ANY_OP + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 1) +# define TEST_IP (ip < ip_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 2) +# define NEED_IP(x) \ + if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 1) +# define TEST_OP (op <= op_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 2) +# undef TEST_OP +# define NEED_OP(x) \ + if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define TEST_LOOKBEHIND(m_pos,out) if (m_pos < out) goto lookbehind_overrun +#else +# define TEST_LOOKBEHIND(m_pos,op) ((void) 0) +#endif + +#if !defined(LZO_EOF_CODE) && !defined(TEST_IP) +# define TEST_IP (ip < ip_end) +#endif + +#if defined(TEST_IP) +# define HAVE_TEST_IP +#else +# define TEST_IP 1 +#endif +#if defined(TEST_OP) +# define HAVE_TEST_OP +#else +# define TEST_OP 1 +#endif + +#if defined(NEED_IP) +# define HAVE_NEED_IP +#else +# define NEED_IP(x) ((void) 0) +#endif +#if defined(NEED_OP) +# define HAVE_NEED_OP +#else +# define NEED_OP(x) ((void) 0) +#endif + +#if defined(HAVE_TEST_IP) || defined(HAVE_NEED_IP) +# define HAVE_ANY_IP +#endif +#if defined(HAVE_TEST_OP) || defined(HAVE_NEED_OP) +# define HAVE_ANY_OP +#endif + +#undef __COPY4 +#define __COPY4(dst,src) * (lzo_uint32p)(dst) = * (const lzo_uint32p)(src) + +#undef COPY4 +#if defined(LZO_UNALIGNED_OK_4) +# define COPY4(dst,src) __COPY4(dst,src) +#elif defined(LZO_ALIGNED_OK_4) +# define COPY4(dst,src) __COPY4((lzo_ptr_t)(dst),(lzo_ptr_t)(src)) +#endif + +#if defined(DO_DECOMPRESS) +int DO_DECOMPRESS(const lzo_byte * in, lzo_uint in_len, + lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem) +#endif +{ + register lzo_byte *op; + register const lzo_byte *ip; + register lzo_uint t; +#if defined(COPY_DICT) + lzo_uint m_off; + const lzo_byte *dict_end; +#else + register const lzo_byte *m_pos; +#endif + + const lzo_byte *const ip_end = in + in_len; +#if defined(HAVE_ANY_OP) + lzo_byte *const op_end = out + *out_len; +#endif +#if defined(LZO1Z) + lzo_uint last_m_off = 0; +#endif + + LZO_UNUSED(wrkmem); + +#if defined(__LZO_QUERY_DECOMPRESS) + if (__LZO_IS_DECOMPRESS_QUERY(in, in_len, out, out_len, wrkmem)) + return __LZO_QUERY_DECOMPRESS(in, in_len, out, out_len, wrkmem, + 0, 0); +#endif + +#if defined(COPY_DICT) + if (dict) { + if (dict_len > M4_MAX_OFFSET) { + dict += dict_len - M4_MAX_OFFSET; + dict_len = M4_MAX_OFFSET; + } + dict_end = dict + dict_len; + } else { + dict_len = 0; + dict_end = NULL; + } +#endif + + *out_len = 0; + + op = out; + ip = in; + + if (*ip > 17) { + t = *ip++ - 17; + if (t < 4) + goto match_next; + assert("lzo-16", t > 0); + NEED_OP(t); + NEED_IP(t + 1); + do + *op++ = *ip++; + while (--t > 0); + goto first_literal_run; + } + + while (TEST_IP && TEST_OP) { + t = *ip++; + if (t >= 16) + goto match; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + t += 15 + *ip++; + } + assert("lzo-17", t > 0); + NEED_OP(t + 3); + NEED_IP(t + 4); +#if defined(LZO_UNALIGNED_OK_4) || defined(LZO_ALIGNED_OK_4) +#if !defined(LZO_UNALIGNED_OK_4) + if (PTR_ALIGNED2_4(op, ip)) { +#endif + COPY4(op, ip); + op += 4; + ip += 4; + if (--t > 0) { + if (t >= 4) { + do { + COPY4(op, ip); + op += 4; + ip += 4; + t -= 4; + } while (t >= 4); + if (t > 0) + do + *op++ = *ip++; + while (--t > 0); + } else + do + *op++ = *ip++; + while (--t > 0); + } +#if !defined(LZO_UNALIGNED_OK_4) + } else +#endif +#endif +#if !defined(LZO_UNALIGNED_OK_4) + { + *op++ = *ip++; + *op++ = *ip++; + *op++ = *ip++; + do + *op++ = *ip++; + while (--t > 0); + } +#endif + + first_literal_run: + + t = *ip++; + if (t >= 16) + goto match; +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2); + last_m_off = m_off; +#else + m_off = (1 + M2_MAX_OFFSET) + (t >> 2) + (*ip++ << 2); +#endif + NEED_OP(3); + t = 3; + COPY_DICT(t, m_off) +#else +#if defined(LZO1Z) + t = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2); + m_pos = op - t; + last_m_off = t; +#else + m_pos = op - (1 + M2_MAX_OFFSET); + m_pos -= t >> 2; + m_pos -= *ip++ << 2; +#endif + TEST_LOOKBEHIND(m_pos, out); + NEED_OP(3); + *op++ = *m_pos++; + *op++ = *m_pos++; + *op++ = *m_pos; +#endif + goto match_done; + + while (TEST_IP && TEST_OP) { + match: + if (t >= 64) { +#if defined(COPY_DICT) +#if defined(LZO1X) + m_off = 1 + ((t >> 2) & 7) + (*ip++ << 3); + t = (t >> 5) - 1; +#elif defined(LZO1Y) + m_off = 1 + ((t >> 2) & 3) + (*ip++ << 2); + t = (t >> 4) - 3; +#elif defined(LZO1Z) + m_off = t & 0x1f; + if (m_off >= 0x1c) + m_off = last_m_off; + else { + m_off = 1 + (m_off << 6) + (*ip++ >> 2); + last_m_off = m_off; + } + t = (t >> 5) - 1; +#endif +#else +#if defined(LZO1X) + m_pos = op - 1; + m_pos -= (t >> 2) & 7; + m_pos -= *ip++ << 3; + t = (t >> 5) - 1; +#elif defined(LZO1Y) + m_pos = op - 1; + m_pos -= (t >> 2) & 3; + m_pos -= *ip++ << 2; + t = (t >> 4) - 3; +#elif defined(LZO1Z) + { + lzo_uint off = t & 0x1f; + m_pos = op; + if (off >= 0x1c) { + assert(last_m_off > 0); + m_pos -= last_m_off; + } else { + off = + 1 + (off << 6) + + (*ip++ >> 2); + m_pos -= off; + last_m_off = off; + } + } + t = (t >> 5) - 1; +#endif + TEST_LOOKBEHIND(m_pos, out); + assert("lzo-18", t > 0); + NEED_OP(t + 3 - 1); + goto copy_match; +#endif + } else if (t >= 32) { + t &= 31; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + t += 31 + *ip++; + } +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = 1 + (ip[0] << 6) + (ip[1] >> 2); + last_m_off = m_off; +#else + m_off = 1 + (ip[0] >> 2) + (ip[1] << 6); +#endif +#else +#if defined(LZO1Z) + { + lzo_uint off = + 1 + (ip[0] << 6) + (ip[1] >> 2); + m_pos = op - off; + last_m_off = off; + } +#elif defined(LZO_UNALIGNED_OK_2) && (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + m_pos = op - 1; + m_pos -= (*(const lzo_ushortp)ip) >> 2; +#else + m_pos = op - 1; + m_pos -= (ip[0] >> 2) + (ip[1] << 6); +#endif +#endif + ip += 2; + } else if (t >= 16) { +#if defined(COPY_DICT) + m_off = (t & 8) << 11; +#else + m_pos = op; + m_pos -= (t & 8) << 11; +#endif + t &= 7; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + t += 7 + *ip++; + } +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off += (ip[0] << 6) + (ip[1] >> 2); +#else + m_off += (ip[0] >> 2) + (ip[1] << 6); +#endif + ip += 2; + if (m_off == 0) + goto eof_found; + m_off += 0x4000; +#if defined(LZO1Z) + last_m_off = m_off; +#endif +#else +#if defined(LZO1Z) + m_pos -= (ip[0] << 6) + (ip[1] >> 2); +#elif defined(LZO_UNALIGNED_OK_2) && (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + m_pos -= (*(const lzo_ushortp)ip) >> 2; +#else + m_pos -= (ip[0] >> 2) + (ip[1] << 6); +#endif + ip += 2; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; +#if defined(LZO1Z) + last_m_off = op - m_pos; +#endif +#endif + } else { +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = 1 + (t << 6) + (*ip++ >> 2); + last_m_off = m_off; +#else + m_off = 1 + (t >> 2) + (*ip++ << 2); +#endif + NEED_OP(2); + t = 2; + COPY_DICT(t, m_off) +#else +#if defined(LZO1Z) + t = 1 + (t << 6) + (*ip++ >> 2); + m_pos = op - t; + last_m_off = t; +#else + m_pos = op - 1; + m_pos -= t >> 2; + m_pos -= *ip++ << 2; +#endif + TEST_LOOKBEHIND(m_pos, out); + NEED_OP(2); + *op++ = *m_pos++; + *op++ = *m_pos; +#endif + goto match_done; + } + +#if defined(COPY_DICT) + + NEED_OP(t + 3 - 1); + t += 3 - 1; + COPY_DICT(t, m_off) +#else + + TEST_LOOKBEHIND(m_pos, out); + assert("lzo-19", t > 0); + NEED_OP(t + 3 - 1); +#if defined(LZO_UNALIGNED_OK_4) || defined(LZO_ALIGNED_OK_4) +#if !defined(LZO_UNALIGNED_OK_4) + if (t >= 2 * 4 - (3 - 1) && PTR_ALIGNED2_4(op, m_pos)) { + assert((op - m_pos) >= 4); +#else + if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { +#endif + COPY4(op, m_pos); + op += 4; + m_pos += 4; + t -= 4 - (3 - 1); + do { + COPY4(op, m_pos); + op += 4; + m_pos += 4; + t -= 4; + } while (t >= 4); + if (t > 0) + do + *op++ = *m_pos++; + while (--t > 0); + } else +#endif + { + copy_match: + *op++ = *m_pos++; + *op++ = *m_pos++; + do + *op++ = *m_pos++; + while (--t > 0); + } + +#endif + + match_done: +#if defined(LZO1Z) + t = ip[-1] & 3; +#else + t = ip[-2] & 3; +#endif + if (t == 0) + break; + + match_next: + assert("lzo-20", t > 0); + NEED_OP(t); + NEED_IP(t + 1); + do + *op++ = *ip++; + while (--t > 0); + t = *ip++; + } + } + +#if defined(HAVE_TEST_IP) || defined(HAVE_TEST_OP) + *out_len = op - out; + return LZO_E_EOF_NOT_FOUND; +#endif + + eof_found: + assert("lzo-21", t == 1); + *out_len = op - out; + return (ip == ip_end ? LZO_E_OK : + (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); + +#if defined(HAVE_NEED_IP) + input_overrun: + *out_len = op - out; + return LZO_E_INPUT_OVERRUN; +#endif + +#if defined(HAVE_NEED_OP) + output_overrun: + *out_len = op - out; + return LZO_E_OUTPUT_OVERRUN; +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) + lookbehind_overrun: + *out_len = op - out; + return LZO_E_LOOKBEHIND_OVERRUN; +#endif +} + +#define LZO_TEST_DECOMPRESS_OVERRUN +#undef DO_DECOMPRESS +#define DO_DECOMPRESS lzo1x_decompress_safe + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN) +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_INPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND +# endif +#endif + +#undef TEST_IP +#undef TEST_OP +#undef TEST_LOOKBEHIND +#undef NEED_IP +#undef NEED_OP +#undef HAVE_TEST_IP +#undef HAVE_TEST_OP +#undef HAVE_NEED_IP +#undef HAVE_NEED_OP +#undef HAVE_ANY_IP +#undef HAVE_ANY_OP + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 1) +# define TEST_IP (ip < ip_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 2) +# define NEED_IP(x) \ + if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 1) +# define TEST_OP (op <= op_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 2) +# undef TEST_OP +# define NEED_OP(x) \ + if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define TEST_LOOKBEHIND(m_pos,out) if (m_pos < out) goto lookbehind_overrun +#else +# define TEST_LOOKBEHIND(m_pos,op) ((void) 0) +#endif + +#if !defined(LZO_EOF_CODE) && !defined(TEST_IP) +# define TEST_IP (ip < ip_end) +#endif + +#if defined(TEST_IP) +# define HAVE_TEST_IP +#else +# define TEST_IP 1 +#endif +#if defined(TEST_OP) +# define HAVE_TEST_OP +#else +# define TEST_OP 1 +#endif + +#if defined(NEED_IP) +# define HAVE_NEED_IP +#else +# define NEED_IP(x) ((void) 0) +#endif +#if defined(NEED_OP) +# define HAVE_NEED_OP +#else +# define NEED_OP(x) ((void) 0) +#endif + +#if defined(HAVE_TEST_IP) || defined(HAVE_NEED_IP) +# define HAVE_ANY_IP +#endif +#if defined(HAVE_TEST_OP) || defined(HAVE_NEED_OP) +# define HAVE_ANY_OP +#endif + +#undef __COPY4 +#define __COPY4(dst,src) * (lzo_uint32p)(dst) = * (const lzo_uint32p)(src) + +#undef COPY4 +#if defined(LZO_UNALIGNED_OK_4) +# define COPY4(dst,src) __COPY4(dst,src) +#elif defined(LZO_ALIGNED_OK_4) +# define COPY4(dst,src) __COPY4((lzo_ptr_t)(dst),(lzo_ptr_t)(src)) +#endif + +/***** End of minilzo.c *****/ diff --git a/fs/reiser4/plugin/compress/minilzo.h b/fs/reiser4/plugin/compress/minilzo.h new file mode 100644 index 000000000000..6a470012db04 --- /dev/null +++ b/fs/reiser4/plugin/compress/minilzo.h @@ -0,0 +1,70 @@ +/* minilzo.h -- mini subset of the LZO real-time data compression library + adopted for reiser4 compression transform plugin. + + This file is part of the LZO real-time data compression library + and not included in any proprietary licenses of reiser4. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/lzo/ + */ + +/* + * NOTE: + * the full LZO package can be found at + * http://www.oberhumer.com/opensource/lzo/ + */ + +#ifndef __MINILZO_H +#define __MINILZO_H + +#define MINILZO_VERSION 0x1080 + +#include "lzoconf.h" + +/* Memory required for the wrkmem parameter. + * When the required size is 0, you can also pass a NULL pointer. + */ + +#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS +#define LZO1X_1_MEM_COMPRESS ((lzo_uint32) (16384L * lzo_sizeof_dict_t)) +#define LZO1X_MEM_DECOMPRESS (0) + +/* compression */ +extern int lzo1x_1_compress(const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem); +/* decompression */ +extern int lzo1x_decompress(const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem /* NOT USED */); +/* safe decompression with overrun testing */ +extern int lzo1x_decompress_safe(const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem /* NOT USED */ ); + +#endif /* already included */ diff --git a/fs/reiser4/plugin/crypto/cipher.c b/fs/reiser4/plugin/crypto/cipher.c new file mode 100644 index 000000000000..e9181541ef6c --- /dev/null +++ b/fs/reiser4/plugin/crypto/cipher.c @@ -0,0 +1,37 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, + licensing governed by reiser4/README */ +/* Reiser4 cipher transform plugins */ + +#include "../../debug.h" +#include "../plugin.h" + +cipher_plugin cipher_plugins[LAST_CIPHER_ID] = { + [NONE_CIPHER_ID] = { + .h = { + .type_id = REISER4_CIPHER_PLUGIN_TYPE, + .id = NONE_CIPHER_ID, + .pops = NULL, + .label = "none", + .desc = "no cipher transform", + .linkage = {NULL, NULL} + }, + .alloc = NULL, + .free = NULL, + .scale = NULL, + .align_stream = NULL, + .setkey = NULL, + .encrypt = NULL, + .decrypt = NULL + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/crypto/cipher.h b/fs/reiser4/plugin/crypto/cipher.h new file mode 100644 index 000000000000..a7920e0a1e95 --- /dev/null +++ b/fs/reiser4/plugin/crypto/cipher.h @@ -0,0 +1,55 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* This file contains definitions for the objects operated + by reiser4 key manager, which is something like keyring + wrapped by appropriate reiser4 plugin */ + +#if !defined( __FS_REISER4_CRYPT_H__ ) +#define __FS_REISER4_CRYPT_H__ + +#include + +/* key info imported from user space */ +struct reiser4_crypto_data { + int keysize; /* uninstantiated key size */ + __u8 * key; /* uninstantiated key */ + int keyid_size; /* size of passphrase */ + __u8 * keyid; /* passphrase */ +}; + +/* This object contains all needed infrastructure to implement + cipher transform. This is operated (allocating, inheriting, + validating, binding to host inode, etc..) by reiser4 key manager. + + This info can be allocated in two cases: + 1. importing a key from user space. + 2. reading inode from disk */ +struct reiser4_crypto_info { + struct inode * host; + struct crypto_hash * digest; + struct crypto_blkcipher * cipher; +#if 0 + cipher_key_plugin * kplug; /* key manager */ +#endif + __u8 * keyid; /* key fingerprint, created by digest plugin, + using uninstantiated key and passphrase. + supposed to be stored in disk stat-data */ + int inst; /* this indicates if the cipher key is + instantiated (case 1 above) */ + int keysize; /* uninstantiated key size (bytes), supposed + to be stored in disk stat-data */ + int keyload_count; /* number of the objects which has this + crypto-stat attached */ +}; + +#endif /* __FS_REISER4_CRYPT_H__ */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/crypto/digest.c b/fs/reiser4/plugin/crypto/digest.c new file mode 100644 index 000000000000..7508917d4407 --- /dev/null +++ b/fs/reiser4/plugin/crypto/digest.c @@ -0,0 +1,58 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* reiser4 digest transform plugin (is used by cryptcompress object plugin) */ +/* EDWARD-FIXME-HANS: and it does what? a digest is a what? */ +#include "../../debug.h" +#include "../plugin_header.h" +#include "../plugin.h" +#include "../file/cryptcompress.h" + +#include + +extern digest_plugin digest_plugins[LAST_DIGEST_ID]; + +static struct crypto_hash * alloc_sha256 (void) +{ +#if REISER4_SHA256 + return crypto_alloc_hash ("sha256", 0, CRYPTO_ALG_ASYNC); +#else + warning("edward-1418", "sha256 unsupported"); + return ERR_PTR(-EINVAL); +#endif +} + +static void free_sha256 (struct crypto_hash * tfm) +{ +#if REISER4_SHA256 + crypto_free_hash(tfm); +#endif + return; +} + +/* digest plugins */ +digest_plugin digest_plugins[LAST_DIGEST_ID] = { + [SHA256_32_DIGEST_ID] = { + .h = { + .type_id = REISER4_DIGEST_PLUGIN_TYPE, + .id = SHA256_32_DIGEST_ID, + .pops = NULL, + .label = "sha256_32", + .desc = "sha256_32 digest transform", + .linkage = {NULL, NULL} + }, + .fipsize = sizeof(__u32), + .alloc = alloc_sha256, + .free = free_sha256 + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/dir/Makefile b/fs/reiser4/plugin/dir/Makefile new file mode 100644 index 000000000000..ed370b1ed093 --- /dev/null +++ b/fs/reiser4/plugin/dir/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_REISER4_FS) += dir_plugins.o + +dir_plugins-objs := \ + hashed_dir.o \ + seekable_dir.o diff --git a/fs/reiser4/plugin/dir/dir.h b/fs/reiser4/plugin/dir/dir.h new file mode 100644 index 000000000000..4a91ebeb3232 --- /dev/null +++ b/fs/reiser4/plugin/dir/dir.h @@ -0,0 +1,36 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* this file contains declarations of methods implementing directory plugins */ + +#if !defined( __REISER4_DIR_H__ ) +#define __REISER4_DIR_H__ + +/*#include "../../key.h" + +#include */ + +/* declarations of functions implementing HASHED_DIR_PLUGIN_ID dir plugin */ + +/* "hashed" directory methods of dir plugin */ +void build_entry_key_hashed(const struct inode *, const struct qstr *, + reiser4_key *); + +/* declarations of functions implementing SEEKABLE_HASHED_DIR_PLUGIN_ID dir plugin */ + +/* "seekable" directory methods of dir plugin */ +void build_entry_key_seekable(const struct inode *, const struct qstr *, + reiser4_key *); + +/* __REISER4_DIR_H__ */ +#endif + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/dir/hashed_dir.c b/fs/reiser4/plugin/dir/hashed_dir.c new file mode 100644 index 000000000000..0f34824dbae4 --- /dev/null +++ b/fs/reiser4/plugin/dir/hashed_dir.c @@ -0,0 +1,81 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Directory plugin using hashes (see fs/reiser4/plugin/hash.c) to map file + names to the files. */ + +/* + * Hashed directory logically consists of persistent directory + * entries. Directory entry is a pair of a file name and a key of stat-data of + * a file that has this name in the given directory. + * + * Directory entries are stored in the tree in the form of directory + * items. Directory item should implement dir_entry_ops portion of item plugin + * interface (see plugin/item/item.h). Hashed directory interacts with + * directory item plugin exclusively through dir_entry_ops operations. + * + * Currently there are two implementations of directory items: "simple + * directory item" (plugin/item/sde.[ch]), and "compound directory item" + * (plugin/item/cde.[ch]) with the latter being the default. + * + * There is, however some delicate way through which directory code interferes + * with item plugin: key assignment policy. A key for a directory item is + * chosen by directory code, and as described in kassign.c, this key contains + * a portion of file name. Directory item uses this knowledge to avoid storing + * this portion of file name twice: in the key and in the directory item body. + * + */ + +#include "../../inode.h" + +void complete_entry_key(const struct inode *, const char *name, + int len, reiser4_key * result); + +/* this is implementation of build_entry_key method of dir + plugin for HASHED_DIR_PLUGIN_ID + */ +void build_entry_key_hashed(const struct inode *dir, /* directory where entry is + * (or will be) in.*/ + const struct qstr *qname, /* name of file referenced + * by this entry */ + reiser4_key * result /* resulting key of directory + * entry */ ) +{ + const char *name; + int len; + + assert("nikita-1139", dir != NULL); + assert("nikita-1140", qname != NULL); + assert("nikita-1141", qname->name != NULL); + assert("nikita-1142", result != NULL); + + name = qname->name; + len = qname->len; + + assert("nikita-2867", strlen(name) == len); + + reiser4_key_init(result); + /* locality of directory entry's key is objectid of parent + directory */ + set_key_locality(result, get_inode_oid(dir)); + /* minor packing locality is constant */ + set_key_type(result, KEY_FILE_NAME_MINOR); + /* dot is special case---we always want it to be first entry in + a directory. Actually, we just want to have smallest + directory entry. + */ + if (len == 1 && name[0] == '.') + return; + + /* initialize part of entry key which depends on file name */ + complete_entry_key(dir, name, len, result); +} + +/* Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/dir/seekable_dir.c b/fs/reiser4/plugin/dir/seekable_dir.c new file mode 100644 index 000000000000..c1c6c4cc400f --- /dev/null +++ b/fs/reiser4/plugin/dir/seekable_dir.c @@ -0,0 +1,46 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "../../inode.h" + +/* this is implementation of build_entry_key method of dir + plugin for SEEKABLE_HASHED_DIR_PLUGIN_ID + This is for directories where we want repeatable and restartable readdir() + even in case 32bit user level struct dirent (readdir(3)). +*/ +void +build_entry_key_seekable(const struct inode *dir, const struct qstr *name, + reiser4_key * result) +{ + oid_t objectid; + + assert("nikita-2283", dir != NULL); + assert("nikita-2284", name != NULL); + assert("nikita-2285", name->name != NULL); + assert("nikita-2286", result != NULL); + + reiser4_key_init(result); + /* locality of directory entry's key is objectid of parent + directory */ + set_key_locality(result, get_inode_oid(dir)); + /* minor packing locality is constant */ + set_key_type(result, KEY_FILE_NAME_MINOR); + /* dot is special case---we always want it to be first entry in + a directory. Actually, we just want to have smallest + directory entry. + */ + if ((name->len == 1) && (name->name[0] == '.')) + return; + + /* objectid of key is 31 lowest bits of hash. */ + objectid = + inode_hash_plugin(dir)->hash(name->name, + (int)name->len) & 0x7fffffff; + + assert("nikita-2303", !(objectid & ~KEY_OBJECTID_MASK)); + set_key_objectid(result, objectid); + + /* offset is always 0. */ + set_key_offset(result, (__u64) 0); + return; +} diff --git a/fs/reiser4/plugin/dir_plugin_common.c b/fs/reiser4/plugin/dir_plugin_common.c new file mode 100644 index 000000000000..5e72fa670adb --- /dev/null +++ b/fs/reiser4/plugin/dir_plugin_common.c @@ -0,0 +1,865 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + reiser4/README */ + +/* this file contains typical implementations for most of methods of + directory plugin +*/ + +#include "../inode.h" + +int reiser4_find_entry(struct inode *dir, struct dentry *name, + lock_handle * , znode_lock_mode, reiser4_dir_entry_desc *); +int reiser4_lookup_name(struct inode *parent, struct dentry *dentry, + reiser4_key * key); +void check_light_weight(struct inode *inode, struct inode *parent); + +/* this is common implementation of get_parent method of dir plugin + this is used by NFS kernel server to "climb" up directory tree to + check permissions + */ +struct dentry *get_parent_common(struct inode *child) +{ + struct super_block *s; + struct inode *parent; + struct dentry dotdot; + struct dentry *dentry; + reiser4_key key; + int result; + + /* + * lookup dotdot entry. + */ + + s = child->i_sb; + memset(&dotdot, 0, sizeof(dotdot)); + dotdot.d_name.name = ".."; + dotdot.d_name.len = 2; + dotdot.d_op = &get_super_private(s)->ops.dentry; + + result = reiser4_lookup_name(child, &dotdot, &key); + if (result != 0) + return ERR_PTR(result); + + parent = reiser4_iget(s, &key, 1); + if (!IS_ERR(parent)) { + /* + * FIXME-NIKITA dubious: attributes are inherited from @child + * to @parent. But: + * + * (*) this is the only this we can do + * + * (*) attributes of light-weight object are inherited + * from a parent through which object was looked up first, + * so it is ambiguous anyway. + * + */ + check_light_weight(parent, child); + reiser4_iget_complete(parent); + dentry = d_obtain_alias(parent); + if (!IS_ERR(dentry)) + dentry->d_op = &get_super_private(s)->ops.dentry; + } else if (PTR_ERR(parent) == -ENOENT) + dentry = ERR_PTR(RETERR(-ESTALE)); + else + dentry = (void *)parent; + return dentry; +} + +/* this is common implementation of is_name_acceptable method of dir + plugin + */ +int is_name_acceptable_common(const struct inode *inode, /* directory to check*/ + const char *name UNUSED_ARG, /* name to check */ + int len/* @name's length */) +{ + assert("nikita-733", inode != NULL); + assert("nikita-734", name != NULL); + assert("nikita-735", len > 0); + + return len <= reiser4_max_filename_len(inode); +} + +/* there is no common implementation of build_entry_key method of dir + plugin. See plugin/dir/hashed_dir.c:build_entry_key_hashed() or + plugin/dir/seekable.c:build_entry_key_seekable() for example +*/ + +/* this is common implementation of build_readdir_key method of dir + plugin + see reiser4_readdir_common for more details +*/ +int build_readdir_key_common(struct file *dir /* directory being read */ , + reiser4_key * result/* where to store key */) +{ + reiser4_file_fsdata *fdata; + struct inode *inode; + + assert("nikita-1361", dir != NULL); + assert("nikita-1362", result != NULL); + assert("nikita-1363", dir->f_path.dentry != NULL); + inode = file_inode(dir); + assert("nikita-1373", inode != NULL); + + fdata = reiser4_get_file_fsdata(dir); + if (IS_ERR(fdata)) + return PTR_ERR(fdata); + assert("nikita-1364", fdata != NULL); + return extract_key_from_de_id(get_inode_oid(inode), + &fdata->dir.readdir.position. + dir_entry_key, result); + +} + +void reiser4_adjust_dir_file(struct inode *, const struct dentry *, int offset, + int adj); + +/* this is common implementation of add_entry method of dir plugin +*/ +int reiser4_add_entry_common(struct inode *object, /* directory to add new name + * in */ + struct dentry *where, /* new name */ + reiser4_object_create_data * data, /* parameters of + * new object */ + reiser4_dir_entry_desc * entry /* parameters of + * new directory + * entry */) +{ + int result; + coord_t *coord; + lock_handle lh; + struct reiser4_dentry_fsdata *fsdata; + reiser4_block_nr reserve; + + assert("nikita-1114", object != NULL); + assert("nikita-1250", where != NULL); + + fsdata = reiser4_get_dentry_fsdata(where); + if (unlikely(IS_ERR(fsdata))) + return PTR_ERR(fsdata); + + reserve = inode_dir_plugin(object)->estimate.add_entry(object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + init_lh(&lh); + coord = &fsdata->dec.entry_coord; + coord_clear_iplug(coord); + + /* check for this entry in a directory. This is plugin method. */ + result = reiser4_find_entry(object, where, &lh, ZNODE_WRITE_LOCK, + entry); + if (likely(result == -ENOENT)) { + /* add new entry. Just pass control to the directory + item plugin. */ + assert("nikita-1709", inode_dir_item_plugin(object)); + assert("nikita-2230", coord->node == lh.node); + reiser4_seal_done(&fsdata->dec.entry_seal); + result = + inode_dir_item_plugin(object)->s.dir.add_entry(object, + coord, &lh, + where, + entry); + if (result == 0) { + reiser4_adjust_dir_file(object, where, + fsdata->dec.pos + 1, +1); + INODE_INC_FIELD(object, i_size); + } + } else if (result == 0) { + assert("nikita-2232", coord->node == lh.node); + result = RETERR(-EEXIST); + } + done_lh(&lh); + + return result; +} + +/** + * rem_entry - remove entry from directory item + * @dir: + * @dentry: + * @entry: + * @coord: + * @lh: + * + * Checks that coordinate @coord is set properly and calls item plugin + * method to cut entry. + */ +static int +rem_entry(struct inode *dir, struct dentry *dentry, + reiser4_dir_entry_desc * entry, coord_t *coord, lock_handle * lh) +{ + item_plugin *iplug; + struct inode *child; + + iplug = inode_dir_item_plugin(dir); + child = dentry->d_inode; + assert("nikita-3399", child != NULL); + + /* check that we are really destroying an entry for @child */ + if (REISER4_DEBUG) { + int result; + reiser4_key key; + + result = iplug->s.dir.extract_key(coord, &key); + if (result != 0) + return result; + if (get_key_objectid(&key) != get_inode_oid(child)) { + warning("nikita-3397", + "rem_entry: %#llx != %#llx\n", + get_key_objectid(&key), + (unsigned long long)get_inode_oid(child)); + return RETERR(-EIO); + } + } + return iplug->s.dir.rem_entry(dir, &dentry->d_name, coord, lh, entry); +} + +/** + * reiser4_rem_entry_common - remove entry from a directory + * @dir: directory to remove entry from + * @where: name that is being removed + * @entry: description of entry being removed + * + * This is common implementation of rem_entry method of dir plugin. + */ +int reiser4_rem_entry_common(struct inode *dir, + struct dentry *dentry, + reiser4_dir_entry_desc * entry) +{ + int result; + coord_t *coord; + lock_handle lh; + struct reiser4_dentry_fsdata *fsdata; + __u64 tograb; + + assert("nikita-1124", dir != NULL); + assert("nikita-1125", dentry != NULL); + + tograb = inode_dir_plugin(dir)->estimate.rem_entry(dir); + result = reiser4_grab_space(tograb, BA_CAN_COMMIT | BA_RESERVED); + if (result != 0) + return RETERR(-ENOSPC); + + init_lh(&lh); + + /* check for this entry in a directory. This is plugin method. */ + result = reiser4_find_entry(dir, dentry, &lh, ZNODE_WRITE_LOCK, entry); + fsdata = reiser4_get_dentry_fsdata(dentry); + if (IS_ERR(fsdata)) { + done_lh(&lh); + return PTR_ERR(fsdata); + } + + coord = &fsdata->dec.entry_coord; + + assert("nikita-3404", + get_inode_oid(dentry->d_inode) != get_inode_oid(dir) || + dir->i_size <= 1); + + coord_clear_iplug(coord); + if (result == 0) { + /* remove entry. Just pass control to the directory item + plugin. */ + assert("vs-542", inode_dir_item_plugin(dir)); + reiser4_seal_done(&fsdata->dec.entry_seal); + reiser4_adjust_dir_file(dir, dentry, fsdata->dec.pos, -1); + result = + WITH_COORD(coord, + rem_entry(dir, dentry, entry, coord, &lh)); + if (result == 0) { + if (dir->i_size >= 1) + INODE_DEC_FIELD(dir, i_size); + else { + warning("nikita-2509", "Dir %llu is runt", + (unsigned long long) + get_inode_oid(dir)); + result = RETERR(-EIO); + } + + assert("nikita-3405", dentry->d_inode->i_nlink != 1 || + dentry->d_inode->i_size != 2 || + inode_dir_plugin(dentry->d_inode) == NULL); + } + } + done_lh(&lh); + + return result; +} + +static reiser4_block_nr estimate_init(struct inode *parent, + struct inode *object); +static int create_dot_dotdot(struct inode *object, struct inode *parent); + +/* this is common implementation of init method of dir plugin + create "." and ".." entries +*/ +int reiser4_dir_init_common(struct inode *object, /* new directory */ + struct inode *parent, /* parent directory */ + reiser4_object_create_data * data /* info passed + * to us, this + * is filled by + * reiser4() + * syscall in + * particular */) +{ + reiser4_block_nr reserve; + + assert("nikita-680", object != NULL); + assert("nikita-681", S_ISDIR(object->i_mode)); + assert("nikita-682", parent != NULL); + assert("nikita-684", data != NULL); + assert("nikita-686", data->id == DIRECTORY_FILE_PLUGIN_ID); + assert("nikita-687", object->i_mode & S_IFDIR); + + reserve = estimate_init(parent, object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + return create_dot_dotdot(object, parent); +} + +/* this is common implementation of done method of dir plugin + remove "." entry +*/ +int reiser4_dir_done_common(struct inode *object/* object being deleted */) +{ + int result; + reiser4_block_nr reserve; + struct dentry goodby_dots; + reiser4_dir_entry_desc entry; + + assert("nikita-1449", object != NULL); + + if (reiser4_inode_get_flag(object, REISER4_NO_SD)) + return 0; + + /* of course, this can be rewritten to sweep everything in one + reiser4_cut_tree(). */ + memset(&entry, 0, sizeof entry); + + /* FIXME: this done method is called from reiser4_delete_dir_common + * which reserved space already */ + reserve = inode_dir_plugin(object)->estimate.rem_entry(object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT | BA_RESERVED)) + return RETERR(-ENOSPC); + + memset(&goodby_dots, 0, sizeof goodby_dots); + entry.obj = goodby_dots.d_inode = object; + goodby_dots.d_name.name = "."; + goodby_dots.d_name.len = 1; + result = reiser4_rem_entry_common(object, &goodby_dots, &entry); + reiser4_free_dentry_fsdata(&goodby_dots); + if (unlikely(result != 0 && result != -ENOMEM && result != -ENOENT)) + warning("nikita-2252", "Cannot remove dot of %lli: %i", + (unsigned long long)get_inode_oid(object), result); + return 0; +} + +/* this is common implementation of attach method of dir plugin +*/ +int reiser4_attach_common(struct inode *child UNUSED_ARG, + struct inode *parent UNUSED_ARG) +{ + assert("nikita-2647", child != NULL); + assert("nikita-2648", parent != NULL); + + return 0; +} + +/* this is common implementation of detach method of dir plugin + remove "..", decrease nlink on parent +*/ +int reiser4_detach_common(struct inode *object, struct inode *parent) +{ + int result; + struct dentry goodby_dots; + reiser4_dir_entry_desc entry; + + assert("nikita-2885", object != NULL); + assert("nikita-2886", !reiser4_inode_get_flag(object, REISER4_NO_SD)); + + memset(&entry, 0, sizeof entry); + + /* NOTE-NIKITA this only works if @parent is -the- parent of + @object, viz. object whose key is stored in dotdot + entry. Wouldn't work with hard-links on directories. */ + memset(&goodby_dots, 0, sizeof goodby_dots); + entry.obj = goodby_dots.d_inode = parent; + goodby_dots.d_name.name = ".."; + goodby_dots.d_name.len = 2; + result = reiser4_rem_entry_common(object, &goodby_dots, &entry); + reiser4_free_dentry_fsdata(&goodby_dots); + if (result == 0) { + /* the dot should be the only entry remaining at this time... */ + assert("nikita-3400", + object->i_size == 1 && object->i_nlink <= 2); +#if 0 + /* and, together with the only name directory can have, they + * provides for the last 2 remaining references. If we get + * here as part of error handling during mkdir, @object + * possibly has no name yet, so its nlink == 1. If we get here + * from rename (targeting empty directory), it has no name + * already, so its nlink == 1. */ + assert("nikita-3401", + object->i_nlink == 2 || object->i_nlink == 1); +#endif + + /* decrement nlink of directory removed ".." pointed + to */ + reiser4_del_nlink(parent, NULL, 0); + } + return result; +} + +/* this is common implementation of estimate.add_entry method of + dir plugin + estimation of adding entry which supposes that entry is inserting a + unit into item +*/ +reiser4_block_nr estimate_add_entry_common(const struct inode *inode) +{ + return estimate_one_insert_into_item(reiser4_tree_by_inode(inode)); +} + +/* this is common implementation of estimate.rem_entry method of dir + plugin +*/ +reiser4_block_nr estimate_rem_entry_common(const struct inode *inode) +{ + return estimate_one_item_removal(reiser4_tree_by_inode(inode)); +} + +/* this is common implementation of estimate.unlink method of dir + plugin +*/ +reiser4_block_nr +dir_estimate_unlink_common(const struct inode *parent, + const struct inode *object) +{ + reiser4_block_nr res; + + /* hashed_rem_entry(object) */ + res = inode_dir_plugin(object)->estimate.rem_entry(object); + /* del_nlink(parent) */ + res += 2 * inode_file_plugin(parent)->estimate.update(parent); + + return res; +} + +/* + * helper for inode_ops ->lookup() and dir plugin's ->get_parent() + * methods: if @inode is a light-weight file, setup its credentials + * that are not stored in the stat-data in this case + */ +void check_light_weight(struct inode *inode, struct inode *parent) +{ + if (reiser4_inode_get_flag(inode, REISER4_LIGHT_WEIGHT)) { + inode->i_uid = parent->i_uid; + inode->i_gid = parent->i_gid; + /* clear light-weight flag. If inode would be read by any + other name, [ug]id wouldn't change. */ + reiser4_inode_clr_flag(inode, REISER4_LIGHT_WEIGHT); + } +} + +/* looks for name specified in @dentry in directory @parent and if name is + found - key of object found entry points to is stored in @entry->key */ +int reiser4_lookup_name(struct inode *parent, /* inode of directory to lookup + * for name in */ + struct dentry *dentry, /* name to look for */ + reiser4_key * key/* place to store key */) +{ + int result; + coord_t *coord; + lock_handle lh; + const char *name; + int len; + reiser4_dir_entry_desc entry; + struct reiser4_dentry_fsdata *fsdata; + + assert("nikita-1247", parent != NULL); + assert("nikita-1248", dentry != NULL); + assert("nikita-1123", dentry->d_name.name != NULL); + assert("vs-1486", + dentry->d_op == &get_super_private(parent->i_sb)->ops.dentry); + + name = dentry->d_name.name; + len = dentry->d_name.len; + + if (!inode_dir_plugin(parent)->is_name_acceptable(parent, name, len)) + /* some arbitrary error code to return */ + return RETERR(-ENAMETOOLONG); + + fsdata = reiser4_get_dentry_fsdata(dentry); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + + coord = &fsdata->dec.entry_coord; + coord_clear_iplug(coord); + init_lh(&lh); + + /* find entry in a directory. This is plugin method. */ + result = reiser4_find_entry(parent, dentry, &lh, ZNODE_READ_LOCK, + &entry); + if (result == 0) { + /* entry was found, extract object key from it. */ + result = + WITH_COORD(coord, + item_plugin_by_coord(coord)->s.dir. + extract_key(coord, key)); + } + done_lh(&lh); + return result; + +} + +/* helper for reiser4_dir_init_common(): estimate number of blocks to reserve */ +static reiser4_block_nr +estimate_init(struct inode *parent, struct inode *object) +{ + reiser4_block_nr res = 0; + + assert("vpf-321", parent != NULL); + assert("vpf-322", object != NULL); + + /* hashed_add_entry(object) */ + res += inode_dir_plugin(object)->estimate.add_entry(object); + /* reiser4_add_nlink(object) */ + res += inode_file_plugin(object)->estimate.update(object); + /* hashed_add_entry(object) */ + res += inode_dir_plugin(object)->estimate.add_entry(object); + /* reiser4_add_nlink(parent) */ + res += inode_file_plugin(parent)->estimate.update(parent); + + return 0; +} + +/* helper function for reiser4_dir_init_common(). Create "." and ".." */ +static int create_dot_dotdot(struct inode *object/* object to create dot and + * dotdot for */ , + struct inode *parent/* parent of @object */) +{ + int result; + struct dentry dots_entry; + reiser4_dir_entry_desc entry; + + assert("nikita-688", object != NULL); + assert("nikita-689", S_ISDIR(object->i_mode)); + assert("nikita-691", parent != NULL); + + /* We store dot and dotdot as normal directory entries. This is + not necessary, because almost all information stored in them + is already in the stat-data of directory, the only thing + being missed is objectid of grand-parent directory that can + easily be added there as extension. + + But it is done the way it is done, because not storing dot + and dotdot will lead to the following complications: + + . special case handling in ->lookup(). + . addition of another extension to the sd. + . dependency on key allocation policy for stat data. + + */ + + memset(&entry, 0, sizeof entry); + memset(&dots_entry, 0, sizeof dots_entry); + entry.obj = dots_entry.d_inode = object; + dots_entry.d_name.name = "."; + dots_entry.d_name.len = 1; + result = reiser4_add_entry_common(object, &dots_entry, NULL, &entry); + reiser4_free_dentry_fsdata(&dots_entry); + + if (result == 0) { + result = reiser4_add_nlink(object, object, 0); + if (result == 0) { + entry.obj = dots_entry.d_inode = parent; + dots_entry.d_name.name = ".."; + dots_entry.d_name.len = 2; + result = reiser4_add_entry_common(object, + &dots_entry, NULL, &entry); + reiser4_free_dentry_fsdata(&dots_entry); + /* if creation of ".." failed, iput() will delete + object with ".". */ + if (result == 0) { + result = reiser4_add_nlink(parent, object, 0); + if (result != 0) + /* + * if we failed to bump i_nlink, try + * to remove ".." + */ + reiser4_detach_common(object, parent); + } + } + } + + if (result != 0) { + /* + * in the case of error, at least update stat-data so that, + * ->i_nlink updates are not lingering. + */ + reiser4_update_sd(object); + reiser4_update_sd(parent); + } + + return result; +} + +/* + * return 0 iff @coord contains a directory entry for the file with the name + * @name. + */ +static int +check_item(const struct inode *dir, const coord_t *coord, const char *name) +{ + item_plugin *iplug; + char buf[DE_NAME_BUF_LEN]; + + iplug = item_plugin_by_coord(coord); + if (iplug == NULL) { + warning("nikita-1135", "Cannot get item plugin"); + print_coord("coord", coord, 1); + return RETERR(-EIO); + } else if (item_id_by_coord(coord) != + item_id_by_plugin(inode_dir_item_plugin(dir))) { + /* item id of current item does not match to id of items a + directory is built of */ + warning("nikita-1136", "Wrong item plugin"); + print_coord("coord", coord, 1); + return RETERR(-EIO); + } + assert("nikita-1137", iplug->s.dir.extract_name); + + /* Compare name stored in this entry with name we are looking for. + + NOTE-NIKITA Here should go code for support of something like + unicode, code tables, etc. + */ + return !!strcmp(name, iplug->s.dir.extract_name(coord, buf)); +} + +static int +check_entry(const struct inode *dir, coord_t *coord, const struct qstr *name) +{ + return WITH_COORD(coord, check_item(dir, coord, name->name)); +} + +/* + * argument package used by entry_actor to scan entries with identical keys. + */ +struct entry_actor_args { + /* name we are looking for */ + const char *name; + /* key of directory entry. entry_actor() scans through sequence of + * items/units having the same key */ + reiser4_key *key; + /* how many entries with duplicate key was scanned so far. */ + int non_uniq; +#if REISER4_USE_COLLISION_LIMIT + /* scan limit */ + int max_non_uniq; +#endif + /* return parameter: set to true, if ->name wasn't found */ + int not_found; + /* what type of lock to take when moving to the next node during + * scan */ + znode_lock_mode mode; + + /* last coord that was visited during scan */ + coord_t last_coord; + /* last node locked during scan */ + lock_handle last_lh; + /* inode of directory */ + const struct inode *inode; +}; + +/* Function called by reiser4_find_entry() to look for given name + in the directory. */ +static int entry_actor(reiser4_tree * tree UNUSED_ARG /* tree being scanned */ , + coord_t *coord /* current coord */ , + lock_handle * lh /* current lock handle */ , + void *entry_actor_arg/* argument to scan */) +{ + reiser4_key unit_key; + struct entry_actor_args *args; + + assert("nikita-1131", tree != NULL); + assert("nikita-1132", coord != NULL); + assert("nikita-1133", entry_actor_arg != NULL); + + args = entry_actor_arg; + ++args->non_uniq; +#if REISER4_USE_COLLISION_LIMIT + if (args->non_uniq > args->max_non_uniq) { + args->not_found = 1; + /* hash collision overflow. */ + return RETERR(-EBUSY); + } +#endif + + /* + * did we just reach the end of the sequence of items/units with + * identical keys? + */ + if (!keyeq(args->key, unit_key_by_coord(coord, &unit_key))) { + assert("nikita-1791", + keylt(args->key, unit_key_by_coord(coord, &unit_key))); + args->not_found = 1; + args->last_coord.between = AFTER_UNIT; + return 0; + } + + coord_dup(&args->last_coord, coord); + /* + * did scan just moved to the next node? + */ + if (args->last_lh.node != lh->node) { + int lock_result; + + /* + * if so, lock new node with the mode requested by the caller + */ + done_lh(&args->last_lh); + assert("nikita-1896", znode_is_any_locked(lh->node)); + lock_result = longterm_lock_znode(&args->last_lh, lh->node, + args->mode, ZNODE_LOCK_HIPRI); + if (lock_result != 0) + return lock_result; + } + return check_item(args->inode, coord, args->name); +} + +/* Look for given @name within directory @dir. + + This is called during lookup, creation and removal of directory + entries and on reiser4_rename_common + + First calculate key that directory entry for @name would have. Search + for this key in the tree. If such key is found, scan all items with + the same key, checking name in each directory entry along the way. +*/ +int reiser4_find_entry(struct inode *dir, /* directory to scan */ + struct dentry *de, /* name to search for */ + lock_handle * lh, /* resulting lock handle */ + znode_lock_mode mode, /* required lock mode */ + reiser4_dir_entry_desc * entry /* parameters of found + directory entry */) +{ + const struct qstr *name; + seal_t *seal; + coord_t *coord; + int result; + __u32 flags; + struct de_location *dec; + struct reiser4_dentry_fsdata *fsdata; + + assert("nikita-1130", lh != NULL); + assert("nikita-1128", dir != NULL); + + name = &de->d_name; + assert("nikita-1129", name != NULL); + + /* dentry private data don't require lock, because dentry + manipulations are protected by i_mutex on parent. + + This is not so for inodes, because there is no -the- parent in + inode case. + */ + fsdata = reiser4_get_dentry_fsdata(de); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + dec = &fsdata->dec; + + coord = &dec->entry_coord; + coord_clear_iplug(coord); + seal = &dec->entry_seal; + /* compose key of directory entry for @name */ + inode_dir_plugin(dir)->build_entry_key(dir, name, &entry->key); + + if (reiser4_seal_is_set(seal)) { + /* check seal */ + result = reiser4_seal_validate(seal, coord, &entry->key, + lh, mode, ZNODE_LOCK_LOPRI); + if (result == 0) { + /* key was found. Check that it is really item we are + looking for. */ + result = check_entry(dir, coord, name); + if (result == 0) + return 0; + } + } + flags = (mode == ZNODE_WRITE_LOCK) ? CBK_FOR_INSERT : 0; + /* + * find place in the tree where directory item should be located. + */ + result = reiser4_object_lookup(dir, &entry->key, coord, lh, mode, + FIND_EXACT, LEAF_LEVEL, LEAF_LEVEL, + flags, NULL/*ra_info */); + if (result == CBK_COORD_FOUND) { + struct entry_actor_args arg; + + /* fast path: no hash collisions */ + result = check_entry(dir, coord, name); + if (result == 0) { + reiser4_seal_init(seal, coord, &entry->key); + dec->pos = 0; + } else if (result > 0) { + /* Iterate through all units with the same keys. */ + arg.name = name->name; + arg.key = &entry->key; + arg.not_found = 0; + arg.non_uniq = 0; +#if REISER4_USE_COLLISION_LIMIT + arg.max_non_uniq = max_hash_collisions(dir); + assert("nikita-2851", arg.max_non_uniq > 1); +#endif + arg.mode = mode; + arg.inode = dir; + coord_init_zero(&arg.last_coord); + init_lh(&arg.last_lh); + + result = reiser4_iterate_tree + (reiser4_tree_by_inode(dir), + coord, lh, + entry_actor, &arg, mode, 1); + /* if end of the tree or extent was reached during + scanning. */ + if (arg.not_found || (result == -E_NO_NEIGHBOR)) { + /* step back */ + done_lh(lh); + + result = zload(arg.last_coord.node); + if (result == 0) { + coord_clear_iplug(&arg.last_coord); + coord_dup(coord, &arg.last_coord); + move_lh(lh, &arg.last_lh); + result = RETERR(-ENOENT); + zrelse(arg.last_coord.node); + --arg.non_uniq; + } + } + + done_lh(&arg.last_lh); + if (result == 0) + reiser4_seal_init(seal, coord, &entry->key); + + if (result == 0 || result == -ENOENT) { + assert("nikita-2580", arg.non_uniq > 0); + dec->pos = arg.non_uniq - 1; + } + } + } else + dec->pos = -1; + return result; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/Makefile b/fs/reiser4/plugin/disk_format/Makefile new file mode 100644 index 000000000000..e4e9e54f278d --- /dev/null +++ b/fs/reiser4/plugin/disk_format/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_REISER4_FS) += df_plugins.o + +df_plugins-objs := \ + disk_format40.o \ + disk_format.o diff --git a/fs/reiser4/plugin/disk_format/disk_format.c b/fs/reiser4/plugin/disk_format/disk_format.c new file mode 100644 index 000000000000..d7851063c821 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format.c @@ -0,0 +1,38 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../plugin_header.h" +#include "disk_format40.h" +#include "disk_format.h" +#include "../plugin.h" + +/* initialization of disk layout plugins */ +disk_format_plugin format_plugins[LAST_FORMAT_ID] = { + [FORMAT40_ID] = { + .h = { + .type_id = REISER4_FORMAT_PLUGIN_TYPE, + .id = FORMAT40_ID, + .pops = NULL, + .label = "reiser40", + .desc = "standard disk layout for reiser40", + .linkage = {NULL, NULL} + }, + .init_format = init_format_format40, + .root_dir_key = root_dir_key_format40, + .release = release_format40, + .log_super = log_super_format40, + .check_open = check_open_format40, + .version_update = version_update_format40 + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/disk_format.h b/fs/reiser4/plugin/disk_format/disk_format.h new file mode 100644 index 000000000000..b9c53acede72 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format.h @@ -0,0 +1,27 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* identifiers for disk layouts, they are also used as indexes in array of disk + plugins */ + +#if !defined( __REISER4_DISK_FORMAT_H__ ) +#define __REISER4_DISK_FORMAT_H__ + +typedef enum { + /* standard reiser4 disk layout plugin id */ + FORMAT40_ID, + LAST_FORMAT_ID +} disk_format_id; + +/* __REISER4_DISK_FORMAT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/disk_format40.c b/fs/reiser4/plugin/disk_format/disk_format40.c new file mode 100644 index 000000000000..b572f14d69a8 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format40.c @@ -0,0 +1,664 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../../dformat.h" +#include "../../key.h" +#include "../node/node.h" +#include "../space/space_allocator.h" +#include "disk_format40.h" +#include "../plugin.h" +#include "../../txnmgr.h" +#include "../../jnode.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../wander.h" +#include "../../inode.h" +#include "../../ktxnmgrd.h" +#include "../../status_flags.h" + +#include /* for __u?? */ +#include /* for struct super_block */ +#include + +/* reiser 4.0 default disk layout */ + +/* Amount of free blocks needed to perform release_format40 when fs gets + mounted RW: 1 for SB, 1 for non-leaves in overwrite set, 2 for tx header + & tx record. */ +#define RELEASE_RESERVED 4 + +/* This flag indicates that backup should be updated + (the update is performed by fsck) */ +#define FORMAT40_UPDATE_BACKUP (1 << 31) + +/* functions to access fields of format40_disk_super_block */ +static __u64 get_format40_block_count(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->block_count)); +} + +static __u64 get_format40_free_blocks(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->free_blocks)); +} + +static __u64 get_format40_root_block(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->root_block)); +} + +static __u16 get_format40_tree_height(const format40_disk_super_block * sb) +{ + return le16_to_cpu(get_unaligned(&sb->tree_height)); +} + +static __u64 get_format40_file_count(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->file_count)); +} + +static __u64 get_format40_oid(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->oid)); +} + +static __u32 get_format40_mkfs_id(const format40_disk_super_block * sb) +{ + return le32_to_cpu(get_unaligned(&sb->mkfs_id)); +} + +static __u32 get_format40_node_plugin_id(const format40_disk_super_block * sb) +{ + return le32_to_cpu(get_unaligned(&sb->node_pid)); +} + +static __u64 get_format40_flags(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->flags)); +} + +static __u32 get_format40_version(const format40_disk_super_block * sb) +{ + return le32_to_cpu(get_unaligned(&sb->version)) & + ~FORMAT40_UPDATE_BACKUP; +} + +static int update_backup_version(const format40_disk_super_block * sb) +{ + return (le32_to_cpu(get_unaligned(&sb->version)) & + FORMAT40_UPDATE_BACKUP); +} + +static int update_disk_version_minor(const format40_disk_super_block * sb) +{ + return (get_format40_version(sb) < get_release_number_minor()); +} + +static int incomplete_compatibility(const format40_disk_super_block * sb) +{ + return (get_format40_version(sb) > get_release_number_minor()); +} + +static format40_super_info *get_sb_info(struct super_block *super) +{ + return &get_super_private(super)->u.format40; +} + +static int consult_diskmap(struct super_block *s) +{ + format40_super_info *info; + journal_location *jloc; + + info = get_sb_info(s); + jloc = &get_super_private(s)->jloc; + /* Default format-specific locations, if there is nothing in + * diskmap */ + jloc->footer = FORMAT40_JOURNAL_FOOTER_BLOCKNR; + jloc->header = FORMAT40_JOURNAL_HEADER_BLOCKNR; + info->loc.super = FORMAT40_OFFSET / s->s_blocksize; +#ifdef CONFIG_REISER4_BADBLOCKS + reiser4_get_diskmap_value(FORMAT40_PLUGIN_DISKMAP_ID, FORMAT40_JF, + &jloc->footer); + reiser4_get_diskmap_value(FORMAT40_PLUGIN_DISKMAP_ID, FORMAT40_JH, + &jloc->header); + reiser4_get_diskmap_value(FORMAT40_PLUGIN_DISKMAP_ID, FORMAT40_SUPER, + &info->loc.super); +#endif + return 0; +} + +/* find any valid super block of disk_format40 (even if the first + super block is destroyed), will change block numbers of actual journal header/footer (jf/jh) + if needed */ +static struct buffer_head *find_a_disk_format40_super_block(struct super_block + *s) +{ + struct buffer_head *super_bh; + format40_disk_super_block *disk_sb; + format40_super_info *info; + + assert("umka-487", s != NULL); + + info = get_sb_info(s); + + super_bh = sb_bread(s, info->loc.super); + if (super_bh == NULL) + return ERR_PTR(RETERR(-EIO)); + + disk_sb = (format40_disk_super_block *) super_bh->b_data; + if (strncmp(disk_sb->magic, FORMAT40_MAGIC, sizeof(FORMAT40_MAGIC))) { + brelse(super_bh); + return ERR_PTR(RETERR(-EINVAL)); + } + + reiser4_set_block_count(s, le64_to_cpu(get_unaligned(&disk_sb->block_count))); + reiser4_set_data_blocks(s, le64_to_cpu(get_unaligned(&disk_sb->block_count)) - + le64_to_cpu(get_unaligned(&disk_sb->free_blocks))); + reiser4_set_free_blocks(s, le64_to_cpu(get_unaligned(&disk_sb->free_blocks))); + + return super_bh; +} + +/* find the most recent version of super block. This is called after journal is + replayed */ +static struct buffer_head *read_super_block(struct super_block *s UNUSED_ARG) +{ + /* Here the most recent superblock copy has to be read. However, as + journal replay isn't complete, we are using + find_a_disk_format40_super_block() function. */ + return find_a_disk_format40_super_block(s); +} + +static int get_super_jnode(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + jnode *sb_jnode; + int ret; + + sb_jnode = reiser4_alloc_io_head(&get_sb_info(s)->loc.super); + + ret = jload(sb_jnode); + + if (ret) { + reiser4_drop_io_head(sb_jnode); + return ret; + } + + pin_jnode_data(sb_jnode); + jrelse(sb_jnode); + + sbinfo->u.format40.sb_jnode = sb_jnode; + + return 0; +} + +static void done_super_jnode(struct super_block *s) +{ + jnode *sb_jnode = get_super_private(s)->u.format40.sb_jnode; + + if (sb_jnode) { + unpin_jnode_data(sb_jnode); + reiser4_drop_io_head(sb_jnode); + } +} + +typedef enum format40_init_stage { + NONE_DONE = 0, + CONSULT_DISKMAP, + FIND_A_SUPER, + INIT_JOURNAL_INFO, + INIT_STATUS, + JOURNAL_REPLAY, + READ_SUPER, + KEY_CHECK, + INIT_OID, + INIT_TREE, + JOURNAL_RECOVER, + INIT_SA, + INIT_JNODE, + ALL_DONE +} format40_init_stage; + +static format40_disk_super_block *copy_sb(const struct buffer_head *super_bh) +{ + format40_disk_super_block *sb_copy; + + sb_copy = kmalloc(sizeof(format40_disk_super_block), + reiser4_ctx_gfp_mask_get()); + if (sb_copy == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + memcpy(sb_copy, ((format40_disk_super_block *) super_bh->b_data), + sizeof(format40_disk_super_block)); + return sb_copy; +} + +static int check_key_format(const format40_disk_super_block *sb_copy) +{ + if (!equi(REISER4_LARGE_KEY, + get_format40_flags(sb_copy) & (1 << FORMAT40_LARGE_KEYS))) { + warning("nikita-3228", "Key format mismatch. " + "Only %s keys are supported.", + REISER4_LARGE_KEY ? "large" : "small"); + return RETERR(-EINVAL); + } + return 0; +} + +/** + * try_init_format40 + * @super: + * @stage: + * + */ +static int try_init_format40(struct super_block *super, + format40_init_stage *stage) +{ + int result; + struct buffer_head *super_bh; + reiser4_super_info_data *sbinfo; + format40_disk_super_block *sb_copy; + tree_level height; + reiser4_block_nr root_block; + node_plugin *nplug; + + assert("vs-475", super != NULL); + assert("vs-474", get_super_private(super)); + + *stage = NONE_DONE; + + result = consult_diskmap(super); + if (result) + return result; + *stage = CONSULT_DISKMAP; + + super_bh = find_a_disk_format40_super_block(super); + if (IS_ERR(super_bh)) + return PTR_ERR(super_bh); + brelse(super_bh); + *stage = FIND_A_SUPER; + + /* ok, we are sure that filesystem format is a format40 format */ + + /* map jnodes for journal control blocks (header, footer) to disk */ + result = reiser4_init_journal_info(super); + if (result) + return result; + *stage = INIT_JOURNAL_INFO; + + /* ok, we are sure that filesystem format is a format40 format */ + /* Now check it's state */ + result = reiser4_status_init(FORMAT40_STATUS_BLOCKNR); + if (result != 0 && result != -EINVAL) + /* -EINVAL means there is no magic, so probably just old + * fs. */ + return result; + *stage = INIT_STATUS; + + result = reiser4_status_query(NULL, NULL); + if (result == REISER4_STATUS_MOUNT_WARN) + notice("vpf-1363", "Warning: mounting %s with errors.", + super->s_id); + if (result == REISER4_STATUS_MOUNT_RO) { + notice("vpf-1364", "Warning: mounting %s with fatal errors," + " forcing read-only mount.", super->s_id); + super->s_flags |= MS_RDONLY; + } + result = reiser4_journal_replay(super); + if (result) + return result; + *stage = JOURNAL_REPLAY; + + super_bh = read_super_block(super); + if (IS_ERR(super_bh)) + return PTR_ERR(super_bh); + *stage = READ_SUPER; + + /* allocate and make a copy of format40_disk_super_block */ + sb_copy = copy_sb(super_bh); + brelse(super_bh); + + if (IS_ERR(sb_copy)) + return PTR_ERR(sb_copy); + printk("reiser4: %s: found disk format 4.0.%u.\n", + super->s_id, + get_format40_version(sb_copy)); + if (incomplete_compatibility(sb_copy)) + printk("reiser4: %s: format version number (4.0.%u) is " + "greater than release number (4.%u.%u) of reiser4 " + "kernel module. Some objects of the volume can be " + "inaccessible.\n", + super->s_id, + get_format40_version(sb_copy), + get_release_number_major(), + get_release_number_minor()); + /* make sure that key format of kernel and filesystem match */ + result = check_key_format(sb_copy); + if (result) { + kfree(sb_copy); + return result; + } + *stage = KEY_CHECK; + + result = oid_init_allocator(super, get_format40_file_count(sb_copy), + get_format40_oid(sb_copy)); + if (result) { + kfree(sb_copy); + return result; + } + *stage = INIT_OID; + + /* get things necessary to init reiser4_tree */ + root_block = get_format40_root_block(sb_copy); + height = get_format40_tree_height(sb_copy); + nplug = node_plugin_by_id(get_format40_node_plugin_id(sb_copy)); + + /* initialize reiser4_super_info_data */ + sbinfo = get_super_private(super); + assert("", sbinfo->tree.super == super); + /* init reiser4_tree for the filesystem */ + result = reiser4_init_tree(&sbinfo->tree, &root_block, height, nplug); + if (result) { + kfree(sb_copy); + return result; + } + *stage = INIT_TREE; + + /* + * initialize reiser4_super_info_data with data from format40 super + * block + */ + sbinfo->default_uid = 0; + sbinfo->default_gid = 0; + sbinfo->mkfs_id = get_format40_mkfs_id(sb_copy); + /* number of blocks in filesystem and reserved space */ + reiser4_set_block_count(super, get_format40_block_count(sb_copy)); + sbinfo->blocks_free = get_format40_free_blocks(sb_copy); + sbinfo->version = get_format40_version(sb_copy); + + if (update_backup_version(sb_copy)) + printk("reiser4: %s: use 'fsck.reiser4 --fix' " + "to complete disk format upgrade.\n", super->s_id); + kfree(sb_copy); + + sbinfo->fsuid = 0; + sbinfo->fs_flags |= (1 << REISER4_ADG); /* hard links for directories + * are not supported */ + sbinfo->fs_flags |= (1 << REISER4_ONE_NODE_PLUGIN); /* all nodes in + * layout 40 are + * of one + * plugin */ + /* sbinfo->tmgr is initialized already */ + + /* recover sb data which were logged separately from sb block */ + + /* NOTE-NIKITA: reiser4_journal_recover_sb_data() calls + * oid_init_allocator() and reiser4_set_free_blocks() with new + * data. What's the reason to call them above? */ + result = reiser4_journal_recover_sb_data(super); + if (result != 0) + return result; + *stage = JOURNAL_RECOVER; + + /* + * Set number of used blocks. The number of used blocks is not stored + * neither in on-disk super block nor in the journal footer blocks. At + * this moment actual values of total blocks and free block counters + * are set in the reiser4 super block (in-memory structure) and we can + * calculate number of used blocks from them. + */ + reiser4_set_data_blocks(super, + reiser4_block_count(super) - + reiser4_free_blocks(super)); + +#if REISER4_DEBUG + sbinfo->min_blocks_used = 16 /* reserved area */ + + 2 /* super blocks */ + + 2 /* journal footer and header */ ; +#endif + + /* init disk space allocator */ + result = sa_init_allocator(reiser4_get_space_allocator(super), + super, NULL); + if (result) + return result; + *stage = INIT_SA; + + result = get_super_jnode(super); + if (result == 0) + *stage = ALL_DONE; + return result; +} + +/* plugin->u.format.get_ready */ +int init_format_format40(struct super_block *s, void *data UNUSED_ARG) +{ + int result; + format40_init_stage stage; + + result = try_init_format40(s, &stage); + switch (stage) { + case ALL_DONE: + assert("nikita-3458", result == 0); + break; + case INIT_JNODE: + done_super_jnode(s); + case INIT_SA: + sa_destroy_allocator(reiser4_get_space_allocator(s), s); + case JOURNAL_RECOVER: + case INIT_TREE: + reiser4_done_tree(&get_super_private(s)->tree); + case INIT_OID: + case KEY_CHECK: + case READ_SUPER: + case JOURNAL_REPLAY: + case INIT_STATUS: + reiser4_status_finish(); + case INIT_JOURNAL_INFO: + reiser4_done_journal_info(s); + case FIND_A_SUPER: + case CONSULT_DISKMAP: + case NONE_DONE: + break; + default: + impossible("nikita-3457", "init stage: %i", stage); + } + + if (!rofs_super(s) && reiser4_free_blocks(s) < RELEASE_RESERVED) + return RETERR(-ENOSPC); + + return result; +} + +static void pack_format40_super(const struct super_block *s, char *data) +{ + format40_disk_super_block *super_data = + (format40_disk_super_block *) data; + + reiser4_super_info_data *sbinfo = get_super_private(s); + + assert("zam-591", data != NULL); + + put_unaligned(cpu_to_le64(reiser4_free_committed_blocks(s)), + &super_data->free_blocks); + + put_unaligned(cpu_to_le64(sbinfo->tree.root_block), + &super_data->root_block); + + put_unaligned(cpu_to_le64(oid_next(s)), + &super_data->oid); + + put_unaligned(cpu_to_le64(oids_used(s)), + &super_data->file_count); + + put_unaligned(cpu_to_le16(sbinfo->tree.height), + &super_data->tree_height); + + if (update_disk_version_minor(super_data)) { + __u32 version = PLUGIN_LIBRARY_VERSION | FORMAT40_UPDATE_BACKUP; + + put_unaligned(cpu_to_le32(version), &super_data->version); + } +} + +/* plugin->u.format.log_super + return a jnode which should be added to transaction when the super block + gets logged */ +jnode *log_super_format40(struct super_block *s) +{ + jnode *sb_jnode; + + sb_jnode = get_super_private(s)->u.format40.sb_jnode; + + jload(sb_jnode); + + pack_format40_super(s, jdata(sb_jnode)); + + jrelse(sb_jnode); + + return sb_jnode; +} + +/* plugin->u.format.release */ +int release_format40(struct super_block *s) +{ + int ret; + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(s); + assert("zam-579", sbinfo != NULL); + + if (!rofs_super(s)) { + ret = reiser4_capture_super_block(s); + if (ret != 0) + warning("vs-898", + "reiser4_capture_super_block failed: %d", + ret); + + ret = txnmgr_force_commit_all(s, 1); + if (ret != 0) + warning("jmacd-74438", "txn_force failed: %d", ret); + + all_grabbed2free(); + } + + sa_destroy_allocator(&sbinfo->space_allocator, s); + reiser4_done_journal_info(s); + done_super_jnode(s); + + rcu_barrier(); + reiser4_done_tree(&sbinfo->tree); + /* call finish_rcu(), because some znode were "released" in + * reiser4_done_tree(). */ + rcu_barrier(); + + return 0; +} + +#define FORMAT40_ROOT_LOCALITY 41 +#define FORMAT40_ROOT_OBJECTID 42 + +/* plugin->u.format.root_dir_key */ +const reiser4_key *root_dir_key_format40(const struct super_block *super + UNUSED_ARG) +{ + static const reiser4_key FORMAT40_ROOT_DIR_KEY = { + .el = { + __constant_cpu_to_le64((FORMAT40_ROOT_LOCALITY << 4) | KEY_SD_MINOR), +#if REISER4_LARGE_KEY + ON_LARGE_KEY(0ull,) +#endif + __constant_cpu_to_le64(FORMAT40_ROOT_OBJECTID), + 0ull + } + }; + + return &FORMAT40_ROOT_DIR_KEY; +} + +/* plugin->u.format.check_open. + Check the opened object for validness. For now it checks for the valid oid & + locality only, can be improved later and it its work may depend on the mount + options. */ +int check_open_format40(const struct inode *object) +{ + oid_t max, oid; + + max = oid_next(object->i_sb) - 1; + + /* Check the oid. */ + oid = get_inode_oid(object); + if (oid > max) { + warning("vpf-1360", "The object with the oid %llu " + "greater then the max used oid %llu found.", + (unsigned long long)oid, (unsigned long long)max); + + return RETERR(-EIO); + } + + /* Check the locality. */ + oid = reiser4_inode_data(object)->locality_id; + if (oid > max) { + warning("vpf-1361", "The object with the locality %llu " + "greater then the max used oid %llu found.", + (unsigned long long)oid, (unsigned long long)max); + + return RETERR(-EIO); + } + + return 0; +} + +/* + * plugin->u.format.version_update + * Upgrade minor disk format version number + */ +int version_update_format40(struct super_block *super) { + txn_handle * trans; + lock_handle lh; + txn_atom *atom; + int ret; + + /* Nothing to do if RO mount or the on-disk version is not less. */ + if (super->s_flags & MS_RDONLY) + return 0; + + if (get_super_private(super)->version >= get_release_number_minor()) + return 0; + + printk("reiser4: %s: upgrading disk format to 4.0.%u.\n", + super->s_id, + get_release_number_minor()); + printk("reiser4: %s: use 'fsck.reiser4 --fix' " + "to complete disk format upgrade.\n", super->s_id); + + /* Mark the uber znode dirty to call log_super on write_logs. */ + init_lh(&lh); + ret = get_uber_znode(reiser4_get_tree(super), ZNODE_WRITE_LOCK, + ZNODE_LOCK_HIPRI, &lh); + if (ret != 0) + return ret; + + znode_make_dirty(lh.node); + done_lh(&lh); + + /* Update the backup blocks. */ + + /* Force write_logs immediately. */ + trans = get_current_context()->trans; + atom = get_current_atom_locked(); + assert("vpf-1906", atom != NULL); + + spin_lock_txnh(trans); + return force_commit_atom(trans); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/disk_format40.h b/fs/reiser4/plugin/disk_format/disk_format40.h new file mode 100644 index 000000000000..f91f6c4327d8 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format40.h @@ -0,0 +1,111 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* this file contains: + - definition of ondisk super block of standart disk layout for + reiser 4.0 (layout 40) + - definition of layout 40 specific portion of in-core super block + - declarations of functions implementing methods of layout plugin + for layout 40 + - declarations of functions used to get/set fields in layout 40 super block +*/ + +#ifndef __DISK_FORMAT40_H__ +#define __DISK_FORMAT40_H__ + +/* magic for default reiser4 layout */ +#define FORMAT40_MAGIC "ReIsEr40FoRmAt" +#define FORMAT40_OFFSET (REISER4_MASTER_OFFSET + PAGE_SIZE) + +#include "../../dformat.h" + +#include /* for struct super_block */ + +typedef enum { + FORMAT40_LARGE_KEYS +} format40_flags; + +/* ondisk super block for format 40. It is 512 bytes long */ +typedef struct format40_disk_super_block { + /* 0 */ d64 block_count; + /* number of block in a filesystem */ + /* 8 */ d64 free_blocks; + /* number of free blocks */ + /* 16 */ d64 root_block; + /* filesystem tree root block */ + /* 24 */ d64 oid; + /* smallest free objectid */ + /* 32 */ d64 file_count; + /* number of files in a filesystem */ + /* 40 */ d64 flushes; + /* number of times super block was + flushed. Needed if format 40 + will have few super blocks */ + /* 48 */ d32 mkfs_id; + /* unique identifier of fs */ + /* 52 */ char magic[16]; + /* magic string ReIsEr40FoRmAt */ + /* 68 */ d16 tree_height; + /* height of filesystem tree */ + /* 70 */ d16 formatting_policy; + /* not used anymore */ + /* 72 */ d64 flags; + /* 80 */ d32 version; + /* on-disk format version number + initially assigned by mkfs as the greatest format40 + version number supported by reiser4progs and updated + in mount time in accordance with the greatest format40 + version number supported by kernel. + Is used by fsck to catch possible corruption and + for various compatibility issues */ + /* 84 */ d32 node_pid; + /* node plugin id */ + /* 88 */ char not_used[424]; +} format40_disk_super_block; + +/* format 40 specific part of reiser4_super_info_data */ +typedef struct format40_super_info { +/* format40_disk_super_block actual_sb; */ + jnode *sb_jnode; + struct { + reiser4_block_nr super; + } loc; +} format40_super_info; + +/* Defines for journal header and footer respectively. */ +#define FORMAT40_JOURNAL_HEADER_BLOCKNR \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 3) + +#define FORMAT40_JOURNAL_FOOTER_BLOCKNR \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 4) + +#define FORMAT40_STATUS_BLOCKNR \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 5) + +/* Diskmap declarations */ +#define FORMAT40_PLUGIN_DISKMAP_ID ((REISER4_FORMAT_PLUGIN_TYPE<<16) | (FORMAT40_ID)) +#define FORMAT40_SUPER 1 +#define FORMAT40_JH 2 +#define FORMAT40_JF 3 + +/* declarations of functions implementing methods of layout plugin for + format 40. The functions theirself are in disk_format40.c */ +extern int init_format_format40(struct super_block *, void *data); +extern const reiser4_key *root_dir_key_format40(const struct super_block *); +extern int release_format40(struct super_block *s); +extern jnode *log_super_format40(struct super_block *s); +extern int check_open_format40(const struct inode *object); +extern int version_update_format40(struct super_block *super); + +/* __DISK_FORMAT40_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/fibration.c b/fs/reiser4/plugin/fibration.c new file mode 100644 index 000000000000..690dac4b83a1 --- /dev/null +++ b/fs/reiser4/plugin/fibration.c @@ -0,0 +1,175 @@ +/* Copyright 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Directory fibrations */ + +/* + * Suppose we have a directory tree with sources of some project. During + * compilation .o files are created within this tree. This makes access + * to the original source files less efficient, because source files are + * now "diluted" by object files: default directory plugin uses prefix + * of a file name as a part of the key for directory entry (and this + * part is also inherited by the key of file body). This means that + * foo.o will be located close to foo.c and foo.h in the tree. + * + * To avoid this effect directory plugin fill highest 7 (unused + * originally) bits of the second component of the directory entry key + * by bit-pattern depending on the file name (see + * fs/reiser4/kassign.c:build_entry_key_common()). These bits are called + * "fibre". Fibre of the file name key is inherited by key of stat data + * and keys of file body (in the case of REISER4_LARGE_KEY). + * + * Fibre for a given file is chosen by per-directory fibration + * plugin. Names within given fibre are ordered lexicographically. + */ + +#include "../debug.h" +#include "plugin_header.h" +#include "plugin.h" +#include "../super.h" +#include "../inode.h" + +#include + +static const int fibre_shift = 57; + +#define FIBRE_NO(n) (((__u64)(n)) << fibre_shift) + +/* + * Trivial fibration: all files of directory are just ordered + * lexicographically. + */ +static __u64 fibre_trivial(const struct inode *dir, const char *name, int len) +{ + return FIBRE_NO(0); +} + +/* + * dot-o fibration: place .o files after all others. + */ +static __u64 fibre_dot_o(const struct inode *dir, const char *name, int len) +{ + /* special treatment for .*\.o */ + if (len > 2 && name[len - 1] == 'o' && name[len - 2] == '.') + return FIBRE_NO(1); + else + return FIBRE_NO(0); +} + +/* + * ext.1 fibration: subdivide directory into 128 fibrations one for each + * 7bit extension character (file "foo.h" goes into fibre "h"), plus + * default fibre for the rest. + */ +static __u64 fibre_ext_1(const struct inode *dir, const char *name, int len) +{ + if (len > 2 && name[len - 2] == '.') + return FIBRE_NO(name[len - 1]); + else + return FIBRE_NO(0); +} + +/* + * ext.3 fibration: try to separate files with different 3-character + * extensions from each other. + */ +static __u64 fibre_ext_3(const struct inode *dir, const char *name, int len) +{ + if (len > 4 && name[len - 4] == '.') + return FIBRE_NO(name[len - 3] + name[len - 2] + name[len - 1]); + else + return FIBRE_NO(0); +} + +static int change_fibration(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + int result; + + assert("nikita-3503", inode != NULL); + assert("nikita-3504", plugin != NULL); + + assert("nikita-3505", is_reiser4_inode(inode)); + assert("nikita-3506", inode_dir_plugin(inode) != NULL); + assert("nikita-3507", + plugin->h.type_id == REISER4_FIBRATION_PLUGIN_TYPE); + + result = 0; + if (inode_fibration_plugin(inode) == NULL || + inode_fibration_plugin(inode)->h.id != plugin->h.id) { + if (is_dir_empty(inode) == 0) + result = aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_FIBRATION, plugin); + else + result = RETERR(-ENOTEMPTY); + + } + return result; +} + +static reiser4_plugin_ops fibration_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = change_fibration +}; + +/* fibration plugins */ +fibration_plugin fibration_plugins[LAST_FIBRATION_ID] = { + [FIBRATION_LEXICOGRAPHIC] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_LEXICOGRAPHIC, + .pops = &fibration_plugin_ops, + .label = "lexicographic", + .desc = "no fibration", + .linkage = {NULL, NULL} + }, + .fibre = fibre_trivial + }, + [FIBRATION_DOT_O] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_DOT_O, + .pops = &fibration_plugin_ops, + .label = "dot-o", + .desc = "fibrate .o files separately", + .linkage = {NULL, NULL} + }, + .fibre = fibre_dot_o + }, + [FIBRATION_EXT_1] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_EXT_1, + .pops = &fibration_plugin_ops, + .label = "ext-1", + .desc = "fibrate file by single character extension", + .linkage = {NULL, NULL} + }, + .fibre = fibre_ext_1 + }, + [FIBRATION_EXT_3] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_EXT_3, + .pops = &fibration_plugin_ops, + .label = "ext-3", + .desc = "fibrate file by three character extension", + .linkage = {NULL, NULL} + }, + .fibre = fibre_ext_3 + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/fibration.h b/fs/reiser4/plugin/fibration.h new file mode 100644 index 000000000000..5ff1800ef705 --- /dev/null +++ b/fs/reiser4/plugin/fibration.h @@ -0,0 +1,37 @@ +/* Copyright 2004 by Hans Reiser, licensing governed by reiser4/README */ + +/* Fibration plugin used by hashed directory plugin to segment content + * of directory. See fs/reiser4/plugin/fibration.c for more on this. */ + +#if !defined(__FS_REISER4_PLUGIN_FIBRATION_H__) +#define __FS_REISER4_PLUGIN_FIBRATION_H__ + +#include "plugin_header.h" + +typedef struct fibration_plugin { + /* generic fields */ + plugin_header h; + + __u64(*fibre) (const struct inode *dir, const char *name, int len); +} fibration_plugin; + +typedef enum { + FIBRATION_LEXICOGRAPHIC, + FIBRATION_DOT_O, + FIBRATION_EXT_1, + FIBRATION_EXT_3, + LAST_FIBRATION_ID +} reiser4_fibration_id; + +/* __FS_REISER4_PLUGIN_FIBRATION_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/file/Makefile b/fs/reiser4/plugin/file/Makefile new file mode 100644 index 000000000000..134fa7aa20b9 --- /dev/null +++ b/fs/reiser4/plugin/file/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_REISER4_FS) += file_plugins.o + +file_plugins-objs := \ + file.o \ + tail_conversion.o \ + symlink.o \ + cryptcompress.o diff --git a/fs/reiser4/plugin/file/cryptcompress.c b/fs/reiser4/plugin/file/cryptcompress.c new file mode 100644 index 000000000000..440277cfd011 --- /dev/null +++ b/fs/reiser4/plugin/file/cryptcompress.c @@ -0,0 +1,3797 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ +/* + * Written by Edward Shishkin. + * + * Implementations of inode/file/address_space operations + * specific for cryptcompress file plugin which manages + * regular files built of compressed and(or) encrypted bodies. + * See http://dev.namesys.com/CryptcompressPlugin for details. + */ + +#include "../../inode.h" +#include "../cluster.h" +#include "../object.h" +#include "../../tree_walk.h" +#include "cryptcompress.h" + +#include +#include +#include +#include +#include +#include + +/* + Managing primary and secondary caches by Reiser4 + cryptcompress file plugin. Synchronization scheme. + + + +------------------+ + +------------------->| tfm stream | + | | (compressed data)| + flush | +------------------+ + +-----------------+ | + |(->)longterm lock| V +--+ writepages() | | +-***-+ reiser4 +---+ + | | +--+ | *** | storage tree | | + | | | +-***-+ (primary cache)| | +u | write() (secondary| cache) V / | \ | | +s | ----> +----+ +----+ +----+ +----+ +-***** ******* **----+ ----> | d | +e | | | |page cluster | | | **disk cluster** | | i | +r | <---- +----+ +----+ +----+ +----+ +-***** **********----+ <---- | s | + | read() ^ ^ | | k | + | | (->)longterm lock| | page_io()| | + | | +------+ | | +--+ readpages() | | +---+ + | V + | +------------------+ + +--------------------| tfm stream | + | (plain text) | + +------------------+ +*/ + +/* get cryptcompress specific portion of inode */ +struct cryptcompress_info *cryptcompress_inode_data(const struct inode *inode) +{ + return &reiser4_inode_data(inode)->file_plugin_data.cryptcompress_info; +} + +/* plugin->u.file.init_inode_data */ +void init_inode_data_cryptcompress(struct inode *inode, + reiser4_object_create_data * crd, + int create) +{ + struct cryptcompress_info *data; + + data = cryptcompress_inode_data(inode); + assert("edward-685", data != NULL); + + memset(data, 0, sizeof(*data)); + + mutex_init(&data->checkin_mutex); + data->trunc_index = ULONG_MAX; + turn_on_compression(data); + set_lattice_factor(data, MIN_LATTICE_FACTOR); + init_inode_ordering(inode, crd, create); +} + +/* The following is a part of reiser4 cipher key manager + which is called when opening/creating a cryptcompress file */ + +/* get/set cipher key info */ +struct reiser4_crypto_info * inode_crypto_info (struct inode * inode) +{ + assert("edward-90", inode != NULL); + assert("edward-91", reiser4_inode_data(inode) != NULL); + return cryptcompress_inode_data(inode)->crypt; +} + +static void set_inode_crypto_info (struct inode * inode, + struct reiser4_crypto_info * info) +{ + cryptcompress_inode_data(inode)->crypt = info; +} + +/* allocate a cipher key info */ +struct reiser4_crypto_info * reiser4_alloc_crypto_info (struct inode * inode) +{ + struct reiser4_crypto_info *info; + int fipsize; + + info = kzalloc(sizeof(*info), reiser4_ctx_gfp_mask_get()); + if (!info) + return ERR_PTR(-ENOMEM); + + fipsize = inode_digest_plugin(inode)->fipsize; + info->keyid = kmalloc(fipsize, reiser4_ctx_gfp_mask_get()); + if (!info->keyid) { + kfree(info); + return ERR_PTR(-ENOMEM); + } + info->host = inode; + return info; +} + +#if 0 +/* allocate/free low-level info for cipher and digest + transforms */ +static int alloc_crypto_tfms(struct reiser4_crypto_info * info) +{ + struct crypto_blkcipher * ctfm = NULL; + struct crypto_hash * dtfm = NULL; + cipher_plugin * cplug = inode_cipher_plugin(info->host); + digest_plugin * dplug = inode_digest_plugin(info->host); + + if (cplug->alloc) { + ctfm = cplug->alloc(); + if (IS_ERR(ctfm)) { + warning("edward-1364", + "Can not allocate info for %s\n", + cplug->h.desc); + return RETERR(PTR_ERR(ctfm)); + } + } + info_set_cipher(info, ctfm); + if (dplug->alloc) { + dtfm = dplug->alloc(); + if (IS_ERR(dtfm)) { + warning("edward-1365", + "Can not allocate info for %s\n", + dplug->h.desc); + goto unhappy_with_digest; + } + } + info_set_digest(info, dtfm); + return 0; + unhappy_with_digest: + if (cplug->free) { + cplug->free(ctfm); + info_set_cipher(info, NULL); + } + return RETERR(PTR_ERR(dtfm)); +} +#endif + +static void +free_crypto_tfms(struct reiser4_crypto_info * info) +{ + assert("edward-1366", info != NULL); + if (!info_get_cipher(info)) { + assert("edward-1601", !info_get_digest(info)); + return; + } + inode_cipher_plugin(info->host)->free(info_get_cipher(info)); + info_set_cipher(info, NULL); + inode_digest_plugin(info->host)->free(info_get_digest(info)); + info_set_digest(info, NULL); + return; +} + +#if 0 +/* create a key fingerprint for disk stat-data */ +static int create_keyid (struct reiser4_crypto_info * info, + struct reiser4_crypto_data * data) +{ + int ret = -ENOMEM; + size_t blk, pad; + __u8 * dmem; + __u8 * cmem; + struct hash_desc ddesc; + struct blkcipher_desc cdesc; + struct scatterlist sg; + + assert("edward-1367", info != NULL); + assert("edward-1368", info->keyid != NULL); + + ddesc.tfm = info_get_digest(info); + ddesc.flags = 0; + cdesc.tfm = info_get_cipher(info); + cdesc.flags = 0; + + dmem = kmalloc((size_t)crypto_hash_digestsize(ddesc.tfm), + reiser4_ctx_gfp_mask_get()); + if (!dmem) + goto exit1; + + blk = crypto_blkcipher_blocksize(cdesc.tfm); + + pad = data->keyid_size % blk; + pad = (pad ? blk - pad : 0); + + cmem = kmalloc((size_t)data->keyid_size + pad, + reiser4_ctx_gfp_mask_get()); + if (!cmem) + goto exit2; + memcpy(cmem, data->keyid, data->keyid_size); + memset(cmem + data->keyid_size, 0, pad); + + sg_init_one(&sg, cmem, data->keyid_size + pad); + + ret = crypto_blkcipher_encrypt(&cdesc, &sg, &sg, + data->keyid_size + pad); + if (ret) { + warning("edward-1369", + "encryption failed flags=%x\n", cdesc.flags); + goto exit3; + } + ret = crypto_hash_digest(&ddesc, &sg, sg.length, dmem); + if (ret) { + warning("edward-1602", + "digest failed flags=%x\n", ddesc.flags); + goto exit3; + } + memcpy(info->keyid, dmem, inode_digest_plugin(info->host)->fipsize); + exit3: + kfree(cmem); + exit2: + kfree(dmem); + exit1: + return ret; +} +#endif + +static void destroy_keyid(struct reiser4_crypto_info * info) +{ + assert("edward-1370", info != NULL); + assert("edward-1371", info->keyid != NULL); + kfree(info->keyid); + return; +} + +static void __free_crypto_info (struct inode * inode) +{ + struct reiser4_crypto_info * info = inode_crypto_info(inode); + assert("edward-1372", info != NULL); + + free_crypto_tfms(info); + destroy_keyid(info); + kfree(info); +} + +#if 0 +static void instantiate_crypto_info(struct reiser4_crypto_info * info) +{ + assert("edward-1373", info != NULL); + assert("edward-1374", info->inst == 0); + info->inst = 1; +} +#endif + +static void uninstantiate_crypto_info(struct reiser4_crypto_info * info) +{ + assert("edward-1375", info != NULL); + info->inst = 0; +} + +#if 0 +static int is_crypto_info_instantiated(struct reiser4_crypto_info * info) +{ + return info->inst; +} + +static int inode_has_cipher_key(struct inode * inode) +{ + assert("edward-1376", inode != NULL); + return inode_crypto_info(inode) && + is_crypto_info_instantiated(inode_crypto_info(inode)); +} +#endif + +static void free_crypto_info (struct inode * inode) +{ + uninstantiate_crypto_info(inode_crypto_info(inode)); + __free_crypto_info(inode); +} + +static int need_cipher(struct inode * inode) +{ + return inode_cipher_plugin(inode) != + cipher_plugin_by_id(NONE_CIPHER_ID); +} + +/* Parse @data which contains a (uninstantiated) cipher key imported + from user space, create a low-level cipher info and attach it to + the @object. If success, then info contains an instantiated key */ +#if 0 +struct reiser4_crypto_info * create_crypto_info(struct inode * object, + struct reiser4_crypto_data * data) +{ + int ret; + struct reiser4_crypto_info * info; + + assert("edward-1377", data != NULL); + assert("edward-1378", need_cipher(object)); + + if (inode_file_plugin(object) != + file_plugin_by_id(DIRECTORY_FILE_PLUGIN_ID)) + return ERR_PTR(-EINVAL); + + info = reiser4_alloc_crypto_info(object); + if (IS_ERR(info)) + return info; + ret = alloc_crypto_tfms(info); + if (ret) + goto err; + /* instantiating a key */ + ret = crypto_blkcipher_setkey(info_get_cipher(info), + data->key, + data->keysize); + if (ret) { + warning("edward-1379", + "setkey failed flags=%x", + crypto_blkcipher_get_flags(info_get_cipher(info))); + goto err; + } + info->keysize = data->keysize; + ret = create_keyid(info, data); + if (ret) + goto err; + instantiate_crypto_info(info); + return info; + err: + __free_crypto_info(object); + return ERR_PTR(ret); +} +#endif + +/* increment/decrement a load counter when + attaching/detaching the crypto-stat to any object */ +static void load_crypto_info(struct reiser4_crypto_info * info) +{ + assert("edward-1380", info != NULL); + inc_keyload_count(info); +} + +static void unload_crypto_info(struct inode * inode) +{ + struct reiser4_crypto_info * info = inode_crypto_info(inode); + assert("edward-1381", info->keyload_count > 0); + + dec_keyload_count(inode_crypto_info(inode)); + if (info->keyload_count == 0) + /* final release */ + free_crypto_info(inode); +} + +/* attach/detach an existing crypto-stat */ +void reiser4_attach_crypto_info(struct inode * inode, + struct reiser4_crypto_info * info) +{ + assert("edward-1382", inode != NULL); + assert("edward-1383", info != NULL); + assert("edward-1384", inode_crypto_info(inode) == NULL); + + set_inode_crypto_info(inode, info); + load_crypto_info(info); +} + +/* returns true, if crypto stat can be attached to the @host */ +#if REISER4_DEBUG +static int host_allows_crypto_info(struct inode * host) +{ + int ret; + file_plugin * fplug = inode_file_plugin(host); + + switch (fplug->h.id) { + case CRYPTCOMPRESS_FILE_PLUGIN_ID: + ret = 1; + break; + default: + ret = 0; + } + return ret; +} +#endif /* REISER4_DEBUG */ + +static void reiser4_detach_crypto_info(struct inode * inode) +{ + assert("edward-1385", inode != NULL); + assert("edward-1386", host_allows_crypto_info(inode)); + + if (inode_crypto_info(inode)) + unload_crypto_info(inode); + set_inode_crypto_info(inode, NULL); +} + +#if 0 + +/* compare fingerprints of @child and @parent */ +static int keyid_eq(struct reiser4_crypto_info * child, + struct reiser4_crypto_info * parent) +{ + return !memcmp(child->keyid, + parent->keyid, + info_digest_plugin(parent)->fipsize); +} + +/* check if a crypto-stat (which is bound to @parent) can be inherited */ +int can_inherit_crypto_cryptcompress(struct inode *child, struct inode *parent) +{ + if (!need_cipher(child)) + return 0; + /* the child is created */ + if (!inode_crypto_info(child)) + return 1; + /* the child is looked up */ + if (!inode_crypto_info(parent)) + return 0; + return (inode_cipher_plugin(child) == inode_cipher_plugin(parent) && + inode_digest_plugin(child) == inode_digest_plugin(parent) && + inode_crypto_info(child)->keysize == + inode_crypto_info(parent)->keysize && + keyid_eq(inode_crypto_info(child), inode_crypto_info(parent))); +} +#endif + +/* helper functions for ->create() method of the cryptcompress plugin */ +static int inode_set_crypto(struct inode * object) +{ + reiser4_inode * info; + if (!inode_crypto_info(object)) { + if (need_cipher(object)) + return RETERR(-EINVAL); + /* the file is not to be encrypted */ + return 0; + } + info = reiser4_inode_data(object); + info->extmask |= (1 << CRYPTO_STAT); + return 0; +} + +static int inode_init_compression(struct inode * object) +{ + int result = 0; + assert("edward-1461", object != NULL); + if (inode_compression_plugin(object)->init) + result = inode_compression_plugin(object)->init(); + return result; +} + +static int inode_check_cluster(struct inode * object) +{ + assert("edward-696", object != NULL); + + if (unlikely(inode_cluster_size(object) < PAGE_SIZE)) { + warning("edward-1320", "Can not support '%s' " + "logical clusters (less then page size)", + inode_cluster_plugin(object)->h.label); + return RETERR(-EINVAL); + } + if (unlikely(inode_cluster_shift(object)) >= BITS_PER_BYTE*sizeof(int)){ + warning("edward-1463", "Can not support '%s' " + "logical clusters (too big for transform)", + inode_cluster_plugin(object)->h.label); + return RETERR(-EINVAL); + } + return 0; +} + +/* plugin->destroy_inode() */ +void destroy_inode_cryptcompress(struct inode * inode) +{ + assert("edward-1464", INODE_PGCOUNT(inode) == 0); + reiser4_detach_crypto_info(inode); + return; +} + +/* plugin->create_object(): +. install plugins +. attach crypto info if specified +. attach compression info if specified +. attach cluster info +*/ +int create_object_cryptcompress(struct inode *object, struct inode *parent, + reiser4_object_create_data * data) +{ + int result; + reiser4_inode *info; + + assert("edward-23", object != NULL); + assert("edward-24", parent != NULL); + assert("edward-30", data != NULL); + assert("edward-26", reiser4_inode_get_flag(object, REISER4_NO_SD)); + assert("edward-27", data->id == CRYPTCOMPRESS_FILE_PLUGIN_ID); + + info = reiser4_inode_data(object); + + assert("edward-29", info != NULL); + + /* set file bit */ + info->plugin_mask |= (1 << PSET_FILE); + + /* set crypto */ + result = inode_set_crypto(object); + if (result) + goto error; + /* set compression */ + result = inode_init_compression(object); + if (result) + goto error; + /* set cluster */ + result = inode_check_cluster(object); + if (result) + goto error; + + /* save everything in disk stat-data */ + result = write_sd_by_inode_common(object); + if (!result) + return 0; + error: + reiser4_detach_crypto_info(object); + return result; +} + +/* plugin->open() */ +int open_cryptcompress(struct inode * inode, struct file * file) +{ + return 0; +} + +/* returns a blocksize, the attribute of a cipher algorithm */ +static unsigned int +cipher_blocksize(struct inode * inode) +{ + assert("edward-758", need_cipher(inode)); + assert("edward-1400", inode_crypto_info(inode) != NULL); + return crypto_blkcipher_blocksize + (info_get_cipher(inode_crypto_info(inode))); +} + +/* returns offset translated by scale factor of the crypto-algorithm */ +static loff_t inode_scaled_offset (struct inode * inode, + const loff_t src_off /* input offset */) +{ + assert("edward-97", inode != NULL); + + if (!need_cipher(inode) || + src_off == get_key_offset(reiser4_min_key()) || + src_off == get_key_offset(reiser4_max_key())) + return src_off; + + return inode_cipher_plugin(inode)->scale(inode, + cipher_blocksize(inode), + src_off); +} + +/* returns disk cluster size */ +size_t inode_scaled_cluster_size(struct inode * inode) +{ + assert("edward-110", inode != NULL); + + return inode_scaled_offset(inode, inode_cluster_size(inode)); +} + +/* set number of cluster pages */ +static void set_cluster_nrpages(struct cluster_handle * clust, + struct inode *inode) +{ + struct reiser4_slide * win; + + assert("edward-180", clust != NULL); + assert("edward-1040", inode != NULL); + + clust->old_nrpages = size_in_pages(lbytes(clust->index, inode)); + win = clust->win; + if (!win) { + clust->nr_pages = size_in_pages(lbytes(clust->index, inode)); + return; + } + assert("edward-1176", clust->op != LC_INVAL); + assert("edward-1064", win->off + win->count + win->delta != 0); + + if (win->stat == HOLE_WINDOW && + win->off == 0 && win->count == inode_cluster_size(inode)) { + /* special case: writing a "fake" logical cluster */ + clust->nr_pages = 0; + return; + } + clust->nr_pages = size_in_pages(max(win->off + win->count + win->delta, + lbytes(clust->index, inode))); + return; +} + +/* plugin->key_by_inode() + build key of a disk cluster */ +int key_by_inode_cryptcompress(struct inode *inode, loff_t off, + reiser4_key * key) +{ + assert("edward-64", inode != 0); + + if (likely(off != get_key_offset(reiser4_max_key()))) + off = off_to_clust_to_off(off, inode); + if (inode_crypto_info(inode)) + off = inode_scaled_offset(inode, off); + + key_by_inode_and_offset_common(inode, 0, key); + set_key_offset(key, (__u64)off); + return 0; +} + +/* plugin->flow_by_inode() */ +/* flow is used to read/write disk clusters */ +int flow_by_inode_cryptcompress(struct inode *inode, const char __user * buf, + int user, /* 1: @buf is of user space, + 0: kernel space */ + loff_t size, /* @buf size */ + loff_t off, /* offset to start io from */ + rw_op op, /* READ or WRITE */ + flow_t * f /* resulting flow */) +{ + assert("edward-436", f != NULL); + assert("edward-149", inode != NULL); + assert("edward-150", inode_file_plugin(inode) != NULL); + assert("edward-1465", user == 0); /* we use flow to read/write + disk clusters located in + kernel space */ + f->length = size; + memcpy(&f->data, &buf, sizeof(buf)); + f->user = user; + f->op = op; + + return key_by_inode_cryptcompress(inode, off, &f->key); +} + +static int +cryptcompress_hint_validate(hint_t * hint, const reiser4_key * key, + znode_lock_mode lock_mode) +{ + coord_t *coord; + + assert("edward-704", hint != NULL); + assert("edward-1089", !hint_is_valid(hint)); + assert("edward-706", hint->lh.owner == NULL); + + coord = &hint->ext_coord.coord; + + if (!hint || !hint_is_set(hint) || hint->mode != lock_mode) + /* hint either not set or set by different operation */ + return RETERR(-E_REPEAT); + + if (get_key_offset(key) != hint->offset) + /* hint is set for different key */ + return RETERR(-E_REPEAT); + + assert("edward-707", reiser4_schedulable()); + + return reiser4_seal_validate(&hint->seal, &hint->ext_coord.coord, + key, &hint->lh, lock_mode, + ZNODE_LOCK_LOPRI); +} + +/* reserve disk space when writing a logical cluster */ +static int reserve4cluster(struct inode *inode, struct cluster_handle *clust) +{ + int result = 0; + + assert("edward-965", reiser4_schedulable()); + assert("edward-439", inode != NULL); + assert("edward-440", clust != NULL); + assert("edward-441", clust->pages != NULL); + + if (clust->nr_pages == 0) { + assert("edward-1152", clust->win != NULL); + assert("edward-1153", clust->win->stat == HOLE_WINDOW); + /* don't reserve disk space for fake logical cluster */ + return 0; + } + assert("edward-442", jprivate(clust->pages[0]) != NULL); + + result = reiser4_grab_space_force(estimate_insert_cluster(inode) + + estimate_update_cluster(inode), + BA_CAN_COMMIT); + if (result) + return result; + clust->reserved = 1; + grabbed2cluster_reserved(estimate_insert_cluster(inode) + + estimate_update_cluster(inode)); +#if REISER4_DEBUG + clust->reserved_prepped = estimate_update_cluster(inode); + clust->reserved_unprepped = estimate_insert_cluster(inode); +#endif + /* there can be space grabbed by txnmgr_force_commit_all */ + return 0; +} + +/* free reserved disk space if writing a logical cluster fails */ +static void free_reserved4cluster(struct inode *inode, + struct cluster_handle *ch, int count) +{ + assert("edward-967", ch->reserved == 1); + + cluster_reserved2free(count); + ch->reserved = 0; +} + +/* + * The core search procedure of the cryptcompress plugin. + * If returned value is not cbk_errored, then current position + * is locked. + */ +static int find_cluster_item(hint_t * hint, + const reiser4_key * key, /* key of the item we are + looking for */ + znode_lock_mode lock_mode /* which lock */ , + ra_info_t * ra_info, lookup_bias bias, __u32 flags) +{ + int result; + reiser4_key ikey; + coord_t *coord = &hint->ext_coord.coord; + coord_t orig = *coord; + + assert("edward-152", hint != NULL); + + if (!hint_is_valid(hint)) { + result = cryptcompress_hint_validate(hint, key, lock_mode); + if (result == -E_REPEAT) + goto traverse_tree; + else if (result) { + assert("edward-1216", 0); + return result; + } + hint_set_valid(hint); + } + assert("edward-709", znode_is_any_locked(coord->node)); + /* + * Hint is valid, so we perform in-place lookup. + * It means we just need to check if the next item in + * the tree (relative to the current position @coord) + * has key @key. + * + * Valid hint means in particular, that node is not + * empty and at least one its item has been processed + */ + if (equal_to_rdk(coord->node, key)) { + /* + * Look for the item in the right neighbor + */ + lock_handle lh_right; + + init_lh(&lh_right); + result = reiser4_get_right_neighbor(&lh_right, coord->node, + znode_is_wlocked(coord->node) ? + ZNODE_WRITE_LOCK : ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result) { + done_lh(&lh_right); + reiser4_unset_hint(hint); + if (result == -E_NO_NEIGHBOR) + return RETERR(-EIO); + return result; + } + assert("edward-1218", + equal_to_ldk(lh_right.node, key)); + result = zload(lh_right.node); + if (result) { + done_lh(&lh_right); + reiser4_unset_hint(hint); + return result; + } + coord_init_first_unit_nocheck(coord, lh_right.node); + + if (!coord_is_existing_item(coord)) { + zrelse(lh_right.node); + done_lh(&lh_right); + goto traverse_tree; + } + item_key_by_coord(coord, &ikey); + zrelse(coord->node); + if (unlikely(!keyeq(key, &ikey))) { + warning("edward-1608", + "Expected item not found. Fsck?"); + done_lh(&lh_right); + goto not_found; + } + /* + * item has been found in the right neighbor; + * move lock to the right + */ + done_lh(&hint->lh); + move_lh(&hint->lh, &lh_right); + + dclust_inc_extension_ncount(hint); + + return CBK_COORD_FOUND; + } else { + /* + * Look for the item in the current node + */ + coord->item_pos++; + coord->unit_pos = 0; + coord->between = AT_UNIT; + + result = zload(coord->node); + if (result) { + done_lh(&hint->lh); + return result; + } + if (!coord_is_existing_item(coord)) { + zrelse(coord->node); + goto not_found; + } + item_key_by_coord(coord, &ikey); + zrelse(coord->node); + if (!keyeq(key, &ikey)) + goto not_found; + /* + * item has been found in the current node + */ + dclust_inc_extension_ncount(hint); + + return CBK_COORD_FOUND; + } + not_found: + /* + * The tree doesn't contain an item with @key; + * roll back the coord + */ + *coord = orig; + ON_DEBUG(coord_update_v(coord)); + return CBK_COORD_NOTFOUND; + + traverse_tree: + + reiser4_unset_hint(hint); + dclust_init_extension(hint); + coord_init_zero(coord); + + assert("edward-713", hint->lh.owner == NULL); + assert("edward-714", reiser4_schedulable()); + + result = coord_by_key(current_tree, key, coord, &hint->lh, + lock_mode, bias, LEAF_LEVEL, LEAF_LEVEL, + CBK_UNIQUE | flags, ra_info); + if (cbk_errored(result)) + return result; + if(result == CBK_COORD_FOUND) + dclust_inc_extension_ncount(hint); + hint_set_valid(hint); + return result; +} + +/* This function is called by deflate[inflate] manager when + creating a transformed/plain stream to check if we should + create/cut some overhead. If this returns true, then @oh + contains the size of this overhead. + */ +static int need_cut_or_align(struct inode * inode, + struct cluster_handle * ch, rw_op rw, int * oh) +{ + struct tfm_cluster * tc = &ch->tc; + switch (rw) { + case WRITE_OP: /* estimate align */ + *oh = tc->len % cipher_blocksize(inode); + if (*oh != 0) + return 1; + break; + case READ_OP: /* estimate cut */ + *oh = *(tfm_output_data(ch) + tc->len - 1); + break; + default: + impossible("edward-1401", "bad option"); + } + return (tc->len != tc->lsize); +} + +/* create/cut an overhead of transformed/plain stream */ +static void align_or_cut_overhead(struct inode * inode, + struct cluster_handle * ch, rw_op rw) +{ + unsigned int oh; + cipher_plugin * cplug = inode_cipher_plugin(inode); + + assert("edward-1402", need_cipher(inode)); + + if (!need_cut_or_align(inode, ch, rw, &oh)) + return; + switch (rw) { + case WRITE_OP: /* do align */ + ch->tc.len += + cplug->align_stream(tfm_input_data(ch) + + ch->tc.len, ch->tc.len, + cipher_blocksize(inode)); + *(tfm_input_data(ch) + ch->tc.len - 1) = + cipher_blocksize(inode) - oh; + break; + case READ_OP: /* do cut */ + assert("edward-1403", oh <= cipher_blocksize(inode)); + ch->tc.len -= oh; + break; + default: + impossible("edward-1404", "bad option"); + } + return; +} + +static unsigned max_cipher_overhead(struct inode * inode) +{ + if (!need_cipher(inode) || !inode_cipher_plugin(inode)->align_stream) + return 0; + return cipher_blocksize(inode); +} + +static int deflate_overhead(struct inode *inode) +{ + return (inode_compression_plugin(inode)-> + checksum ? DC_CHECKSUM_SIZE : 0); +} + +static unsigned deflate_overrun(struct inode * inode, int ilen) +{ + return coa_overrun(inode_compression_plugin(inode), ilen); +} + +static bool is_all_zero(char const* mem, size_t size) +{ + while (size-- > 0) + if (*mem++) + return false; + return true; +} + +static inline bool should_punch_hole(struct tfm_cluster *tc) +{ + if (0 && + !reiser4_is_set(reiser4_get_current_sb(), REISER4_DONT_PUNCH_HOLES) + && is_all_zero(tfm_stream_data(tc, INPUT_STREAM), tc->lsize)) { + + tc->hole = 1; + return true; + } + return false; +} + +/* Estimating compressibility of a logical cluster by various + policies represented by compression mode plugin. + If this returns false, then compressor won't be called for + the cluster of index @index. +*/ +static int should_compress(struct tfm_cluster *tc, cloff_t index, + struct inode *inode) +{ + compression_plugin *cplug = inode_compression_plugin(inode); + compression_mode_plugin *mplug = inode_compression_mode_plugin(inode); + + assert("edward-1321", tc->len != 0); + assert("edward-1322", cplug != NULL); + assert("edward-1323", mplug != NULL); + + if (should_punch_hole(tc)) + /* + * we are about to punch a hole, + * so don't compress data + */ + return 0; + return /* estimate by size */ + (cplug->min_size_deflate ? + tc->len >= cplug->min_size_deflate() : + 1) && + /* estimate by compression mode plugin */ + (mplug->should_deflate ? + mplug->should_deflate(inode, index) : + 1); +} + +/* Evaluating results of compression transform. + Returns true, if we need to accept this results */ +static int save_compressed(int size_before, int size_after, struct inode *inode) +{ + return (size_after + deflate_overhead(inode) + + max_cipher_overhead(inode) < size_before); +} + +/* Guess result of the evaluation above */ +static int need_inflate(struct cluster_handle * ch, struct inode * inode, + int encrypted /* is cluster encrypted */ ) +{ + struct tfm_cluster * tc = &ch->tc; + + assert("edward-142", tc != 0); + assert("edward-143", inode != NULL); + + return tc->len < + (encrypted ? + inode_scaled_offset(inode, tc->lsize) : + tc->lsize); +} + +/* If results of compression were accepted, then we add + a checksum to catch possible disk cluster corruption. + The following is a format of the data stored in disk clusters: + + data This is (transformed) logical cluster. + cipher_overhead This is created by ->align() method + of cipher plugin. May be absent. + checksum (4) This is created by ->checksum method + of compression plugin to check + integrity. May be absent. + + Crypto overhead format: + + data + control_byte (1) contains aligned overhead size: + 1 <= overhead <= cipher_blksize +*/ +/* Append a checksum at the end of a transformed stream */ +static void dc_set_checksum(compression_plugin * cplug, struct tfm_cluster * tc) +{ + __u32 checksum; + + assert("edward-1309", tc != NULL); + assert("edward-1310", tc->len > 0); + assert("edward-1311", cplug->checksum != NULL); + + checksum = cplug->checksum(tfm_stream_data(tc, OUTPUT_STREAM), tc->len); + put_unaligned(cpu_to_le32(checksum), + (d32 *)(tfm_stream_data(tc, OUTPUT_STREAM) + tc->len)); + tc->len += (int)DC_CHECKSUM_SIZE; +} + +/* Check a disk cluster checksum. + Returns 0 if checksum is correct, otherwise returns 1 */ +static int dc_check_checksum(compression_plugin * cplug, struct tfm_cluster * tc) +{ + assert("edward-1312", tc != NULL); + assert("edward-1313", tc->len > (int)DC_CHECKSUM_SIZE); + assert("edward-1314", cplug->checksum != NULL); + + if (cplug->checksum(tfm_stream_data(tc, INPUT_STREAM), + tc->len - (int)DC_CHECKSUM_SIZE) != + le32_to_cpu(get_unaligned((d32 *) + (tfm_stream_data(tc, INPUT_STREAM) + + tc->len - (int)DC_CHECKSUM_SIZE)))) { + warning("edward-156", + "Bad disk cluster checksum %d, (should be %d) Fsck?\n", + (int)le32_to_cpu + (get_unaligned((d32 *) + (tfm_stream_data(tc, INPUT_STREAM) + + tc->len - (int)DC_CHECKSUM_SIZE))), + (int)cplug->checksum + (tfm_stream_data(tc, INPUT_STREAM), + tc->len - (int)DC_CHECKSUM_SIZE)); + return 1; + } + tc->len -= (int)DC_CHECKSUM_SIZE; + return 0; +} + +/* get input/output stream for some transform action */ +int grab_tfm_stream(struct inode * inode, struct tfm_cluster * tc, + tfm_stream_id id) +{ + size_t size = inode_scaled_cluster_size(inode); + + assert("edward-901", tc != NULL); + assert("edward-1027", inode_compression_plugin(inode) != NULL); + + if (cluster_get_tfm_act(tc) == TFMA_WRITE) + size += deflate_overrun(inode, inode_cluster_size(inode)); + + if (!get_tfm_stream(tc, id) && id == INPUT_STREAM) + alternate_streams(tc); + if (!get_tfm_stream(tc, id)) + return alloc_tfm_stream(tc, size, id); + + assert("edward-902", tfm_stream_is_set(tc, id)); + + if (tfm_stream_size(tc, id) < size) + return realloc_tfm_stream(tc, size, id); + return 0; +} + +/* Common deflate manager */ +int reiser4_deflate_cluster(struct cluster_handle * clust, struct inode * inode) +{ + int result = 0; + int compressed = 0; + int encrypted = 0; + struct tfm_cluster * tc = &clust->tc; + compression_plugin * coplug; + + assert("edward-401", inode != NULL); + assert("edward-903", tfm_stream_is_set(tc, INPUT_STREAM)); + assert("edward-1348", cluster_get_tfm_act(tc) == TFMA_WRITE); + assert("edward-498", !tfm_cluster_is_uptodate(tc)); + + coplug = inode_compression_plugin(inode); + if (should_compress(tc, clust->index, inode)) { + /* try to compress, discard bad results */ + size_t dst_len; + compression_mode_plugin * mplug = + inode_compression_mode_plugin(inode); + assert("edward-602", coplug != NULL); + assert("edward-1423", coplug->compress != NULL); + + result = grab_coa(tc, coplug); + if (result) + /* + * can not allocate memory to perform + * compression, leave data uncompressed + */ + goto cipher; + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) { + warning("edward-1425", + "alloc stream failed with ret=%d, skipped compression", + result); + goto cipher; + } + dst_len = tfm_stream_size(tc, OUTPUT_STREAM); + coplug->compress(get_coa(tc, coplug->h.id, tc->act), + tfm_input_data(clust), tc->len, + tfm_output_data(clust), &dst_len); + /* make sure we didn't overwrite extra bytes */ + assert("edward-603", + dst_len <= tfm_stream_size(tc, OUTPUT_STREAM)); + + /* evaluate results of compression transform */ + if (save_compressed(tc->len, dst_len, inode)) { + /* good result, accept */ + tc->len = dst_len; + if (mplug->accept_hook != NULL) { + result = mplug->accept_hook(inode, clust->index); + if (result) + warning("edward-1426", + "accept_hook failed with ret=%d", + result); + } + compressed = 1; + } + else { + /* bad result, discard */ +#if 0 + if (cluster_is_complete(clust, inode)) + warning("edward-1496", + "incompressible cluster %lu (inode %llu)", + clust->index, + (unsigned long long)get_inode_oid(inode)); +#endif + if (mplug->discard_hook != NULL && + cluster_is_complete(clust, inode)) { + result = mplug->discard_hook(inode, + clust->index); + if (result) + warning("edward-1427", + "discard_hook failed with ret=%d", + result); + } + } + } + cipher: + if (need_cipher(inode)) { + cipher_plugin * ciplug; + struct blkcipher_desc desc; + struct scatterlist src; + struct scatterlist dst; + + ciplug = inode_cipher_plugin(inode); + desc.tfm = info_get_cipher(inode_crypto_info(inode)); + desc.flags = 0; + if (compressed) + alternate_streams(tc); + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + return result; + + align_or_cut_overhead(inode, clust, WRITE_OP); + sg_init_one(&src, tfm_input_data(clust), tc->len); + sg_init_one(&dst, tfm_output_data(clust), tc->len); + + result = crypto_blkcipher_encrypt(&desc, &dst, &src, tc->len); + if (result) { + warning("edward-1405", + "encryption failed flags=%x\n", desc.flags); + return result; + } + encrypted = 1; + } + if (compressed && coplug->checksum != NULL) + dc_set_checksum(coplug, tc); + if (!compressed && !encrypted) + alternate_streams(tc); + return result; +} + +/* Common inflate manager. */ +int reiser4_inflate_cluster(struct cluster_handle * clust, struct inode * inode) +{ + int result = 0; + int transformed = 0; + struct tfm_cluster * tc = &clust->tc; + compression_plugin * coplug; + + assert("edward-905", inode != NULL); + assert("edward-1178", clust->dstat == PREP_DISK_CLUSTER); + assert("edward-906", tfm_stream_is_set(&clust->tc, INPUT_STREAM)); + assert("edward-1349", tc->act == TFMA_READ); + assert("edward-907", !tfm_cluster_is_uptodate(tc)); + + /* Handle a checksum (if any) */ + coplug = inode_compression_plugin(inode); + if (need_inflate(clust, inode, need_cipher(inode)) && + coplug->checksum != NULL) { + result = dc_check_checksum(coplug, tc); + if (unlikely(result)) { + warning("edward-1460", + "Inode %llu: disk cluster %lu looks corrupted", + (unsigned long long)get_inode_oid(inode), + clust->index); + return RETERR(-EIO); + } + } + if (need_cipher(inode)) { + cipher_plugin * ciplug; + struct blkcipher_desc desc; + struct scatterlist src; + struct scatterlist dst; + + ciplug = inode_cipher_plugin(inode); + desc.tfm = info_get_cipher(inode_crypto_info(inode)); + desc.flags = 0; + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + return result; + assert("edward-909", tfm_cluster_is_set(tc)); + + sg_init_one(&src, tfm_input_data(clust), tc->len); + sg_init_one(&dst, tfm_output_data(clust), tc->len); + + result = crypto_blkcipher_decrypt(&desc, &dst, &src, tc->len); + if (result) { + warning("edward-1600", "decrypt failed flags=%x\n", + desc.flags); + return result; + } + align_or_cut_overhead(inode, clust, READ_OP); + transformed = 1; + } + if (need_inflate(clust, inode, 0)) { + size_t dst_len = inode_cluster_size(inode); + if(transformed) + alternate_streams(tc); + + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + return result; + assert("edward-1305", coplug->decompress != NULL); + assert("edward-910", tfm_cluster_is_set(tc)); + + coplug->decompress(get_coa(tc, coplug->h.id, tc->act), + tfm_input_data(clust), tc->len, + tfm_output_data(clust), &dst_len); + /* check length */ + tc->len = dst_len; + assert("edward-157", dst_len == tc->lsize); + transformed = 1; + } + if (!transformed) + alternate_streams(tc); + return result; +} + +/* This is implementation of readpage method of struct + address_space_operations for cryptcompress plugin. */ +int readpage_cryptcompress(struct file *file, struct page *page) +{ + reiser4_context *ctx; + struct cluster_handle clust; + item_plugin *iplug; + int result; + + assert("edward-88", PageLocked(page)); + assert("vs-976", !PageUptodate(page)); + assert("edward-89", page->mapping && page->mapping->host); + + ctx = reiser4_init_context(page->mapping->host->i_sb); + if (IS_ERR(ctx)) { + unlock_page(page); + return PTR_ERR(ctx); + } + assert("edward-113", + ergo(file != NULL, + page->mapping == file_inode(file)->i_mapping)); + + if (PageUptodate(page)) { + warning("edward-1338", "page is already uptodate\n"); + unlock_page(page); + reiser4_exit_context(ctx); + return 0; + } + cluster_init_read(&clust, NULL); + clust.file = file; + iplug = item_plugin_by_id(CTAIL_ID); + if (!iplug->s.file.readpage) { + unlock_page(page); + put_cluster_handle(&clust); + reiser4_exit_context(ctx); + return -EINVAL; + } + result = iplug->s.file.readpage(&clust, page); + + put_cluster_handle(&clust); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* number of pages to check in */ +static int get_new_nrpages(struct cluster_handle * clust) +{ + switch (clust->op) { + case LC_APPOV: + case LC_EXPAND: + return clust->nr_pages; + case LC_SHRINK: + assert("edward-1179", clust->win != NULL); + return size_in_pages(clust->win->off + clust->win->count); + default: + impossible("edward-1180", "bad page cluster option"); + return 0; + } +} + +static void set_cluster_pages_dirty(struct cluster_handle * clust, + struct inode * inode) +{ + int i; + struct page *pg; + int nrpages = get_new_nrpages(clust); + + for (i = 0; i < nrpages; i++) { + + pg = clust->pages[i]; + assert("edward-968", pg != NULL); + lock_page(pg); + assert("edward-1065", PageUptodate(pg)); + set_page_dirty_notag(pg); + unlock_page(pg); + mark_page_accessed(pg); + } +} + +/* Grab a page cluster for read/write operations. + Attach a jnode for write operations (when preparing for modifications, which + are supposed to be committed). + + We allocate only one jnode per page cluster; this jnode is binded to the + first page of this cluster, so we have an extra-reference that will be put + as soon as jnode is evicted from memory), other references will be cleaned + up in flush time (assume that check in page cluster was successful). +*/ +int grab_page_cluster(struct inode * inode, + struct cluster_handle * clust, rw_op rw) +{ + int i; + int result = 0; + jnode *node = NULL; + + assert("edward-182", clust != NULL); + assert("edward-183", clust->pages != NULL); + assert("edward-1466", clust->node == NULL); + assert("edward-1428", inode != NULL); + assert("edward-1429", inode->i_mapping != NULL); + assert("edward-184", clust->nr_pages <= cluster_nrpages(inode)); + + if (clust->nr_pages == 0) + return 0; + + for (i = 0; i < clust->nr_pages; i++) { + + assert("edward-1044", clust->pages[i] == NULL); + + clust->pages[i] = + find_or_create_page(inode->i_mapping, + clust_to_pg(clust->index, inode) + i, + reiser4_ctx_gfp_mask_get()); + if (!clust->pages[i]) { + result = RETERR(-ENOMEM); + break; + } + if (i == 0 && rw == WRITE_OP) { + node = jnode_of_page(clust->pages[i]); + if (IS_ERR(node)) { + result = PTR_ERR(node); + unlock_page(clust->pages[i]); + break; + } + JF_SET(node, JNODE_CLUSTER_PAGE); + assert("edward-920", jprivate(clust->pages[0])); + } + INODE_PGCOUNT_INC(inode); + unlock_page(clust->pages[i]); + } + if (unlikely(result)) { + while (i) { + put_cluster_page(clust->pages[--i]); + INODE_PGCOUNT_DEC(inode); + } + if (node && !IS_ERR(node)) + jput(node); + return result; + } + clust->node = node; + return 0; +} + +static void truncate_page_cluster_range(struct inode * inode, + struct page ** pages, + cloff_t index, + int from, int count, + int even_cows) +{ + assert("edward-1467", count > 0); + reiser4_invalidate_pages(inode->i_mapping, + clust_to_pg(index, inode) + from, + count, even_cows); +} + +/* Put @count pages starting from @from offset */ +void __put_page_cluster(int from, int count, + struct page ** pages, struct inode * inode) +{ + int i; + assert("edward-1468", pages != NULL); + assert("edward-1469", inode != NULL); + assert("edward-1470", from >= 0 && count >= 0); + + for (i = 0; i < count; i++) { + assert("edward-1471", pages[from + i] != NULL); + assert("edward-1472", + pages[from + i]->index == pages[from]->index + i); + + put_cluster_page(pages[from + i]); + INODE_PGCOUNT_DEC(inode); + } +} + +/* + * This is dual to grab_page_cluster, + * however if @rw == WRITE_OP, then we call this function + * only if something is failed before checkin page cluster. + */ +void put_page_cluster(struct cluster_handle * clust, + struct inode * inode, rw_op rw) +{ + assert("edward-445", clust != NULL); + assert("edward-922", clust->pages != NULL); + assert("edward-446", + ergo(clust->nr_pages != 0, clust->pages[0] != NULL)); + + __put_page_cluster(0, clust->nr_pages, clust->pages, inode); + if (rw == WRITE_OP) { + if (unlikely(clust->node)) { + assert("edward-447", + clust->node == jprivate(clust->pages[0])); + jput(clust->node); + clust->node = NULL; + } + } +} + +#if REISER4_DEBUG +int cryptcompress_inode_ok(struct inode *inode) +{ + if (!(reiser4_inode_data(inode)->plugin_mask & (1 << PSET_FILE))) + return 0; + if (!cluster_shift_ok(inode_cluster_shift(inode))) + return 0; + return 1; +} + +static int window_ok(struct reiser4_slide * win, struct inode *inode) +{ + assert("edward-1115", win != NULL); + assert("edward-1116", ergo(win->delta, win->stat == HOLE_WINDOW)); + + return (win->off != inode_cluster_size(inode)) && + (win->off + win->count + win->delta <= inode_cluster_size(inode)); +} + +static int cluster_ok(struct cluster_handle * clust, struct inode *inode) +{ + assert("edward-279", clust != NULL); + + if (!clust->pages) + return 0; + return (clust->win ? window_ok(clust->win, inode) : 1); +} +#if 0 +static int pages_truncate_ok(struct inode *inode, pgoff_t start) +{ + int found; + struct page * page; + + + found = find_get_pages(inode->i_mapping, &start, 1, &page); + if (found) + put_cluster_page(page); + return !found; +} +#else +#define pages_truncate_ok(inode, start) 1 +#endif + +static int jnode_truncate_ok(struct inode *inode, cloff_t index) +{ + jnode *node; + node = jlookup(current_tree, get_inode_oid(inode), + clust_to_pg(index, inode)); + if (likely(!node)) + return 1; + jput(node); + return 0; +} +#endif + +/* guess next window stat */ +static inline window_stat next_window_stat(struct reiser4_slide * win) +{ + assert("edward-1130", win != NULL); + return ((win->stat == HOLE_WINDOW && win->delta == 0) ? + HOLE_WINDOW : DATA_WINDOW); +} + +/* guess and set next cluster index and window params */ +static void move_update_window(struct inode * inode, + struct cluster_handle * clust, + loff_t file_off, loff_t to_file) +{ + struct reiser4_slide * win; + + assert("edward-185", clust != NULL); + assert("edward-438", clust->pages != NULL); + assert("edward-281", cluster_ok(clust, inode)); + + win = clust->win; + if (!win) + return; + + switch (win->stat) { + case DATA_WINDOW: + /* increment */ + clust->index++; + win->stat = DATA_WINDOW; + win->off = 0; + win->count = min((loff_t)inode_cluster_size(inode), to_file); + break; + case HOLE_WINDOW: + switch (next_window_stat(win)) { + case HOLE_WINDOW: + /* skip */ + clust->index = off_to_clust(file_off, inode); + win->stat = HOLE_WINDOW; + win->off = 0; + win->count = off_to_cloff(file_off, inode); + win->delta = min((loff_t)(inode_cluster_size(inode) - + win->count), to_file); + break; + case DATA_WINDOW: + /* stay */ + win->stat = DATA_WINDOW; + /* off+count+delta=inv */ + win->off = win->off + win->count; + win->count = win->delta; + win->delta = 0; + break; + default: + impossible("edward-282", "wrong next window state"); + } + break; + default: + impossible("edward-283", "wrong current window state"); + } + assert("edward-1068", cluster_ok(clust, inode)); +} + +static int update_sd_cryptcompress(struct inode *inode) +{ + int result = 0; + + assert("edward-978", reiser4_schedulable()); + + result = reiser4_grab_space_force(/* one for stat data update */ + estimate_update_common(inode), + BA_CAN_COMMIT); + if (result) + return result; + if (!IS_NOCMTIME(inode)) + inode->i_ctime = inode->i_mtime = current_time(inode); + + result = reiser4_update_sd(inode); + + if (unlikely(result != 0)) + warning("edward-1573", + "Can not update stat-data: %i. FSCK?", + result); + return result; +} + +static void uncapture_cluster_jnode(jnode * node) +{ + txn_atom *atom; + + assert_spin_locked(&(node->guard)); + + atom = jnode_get_atom(node); + if (atom == NULL) { + assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY)); + spin_unlock_jnode(node); + return; + } + reiser4_uncapture_block(node); + spin_unlock_atom(atom); + jput(node); +} + +static void put_found_pages(struct page **pages, int nr) +{ + int i; + for (i = 0; i < nr; i++) { + assert("edward-1045", pages[i] != NULL); + put_cluster_page(pages[i]); + } +} + +/* Lifecycle of a logical cluster in the system. + * + * + * Logical cluster of a cryptcompress file is represented in the system by + * . page cluster (in memory, primary cache, contains plain text); + * . disk cluster (in memory, secondary cache, contains transformed text). + * Primary cache is to reduce number of transform operations (compression, + * encryption), i.e. to implement transform-caching strategy. + * Secondary cache is to reduce number of I/O operations, i.e. for usual + * write-caching strategy. Page cluster is a set of pages, i.e. mapping of + * a logical cluster to the primary cache. Disk cluster is a set of items + * of the same type defined by some reiser4 item plugin id. + * + * 1. Performing modifications + * + * Every modification of a cryptcompress file is considered as a set of + * operations performed on file's logical clusters. Every such "atomic" + * modification is truncate, append and(or) overwrite some bytes of a + * logical cluster performed in the primary cache with the following + * synchronization with the secondary cache (in flush time). Disk clusters, + * which live in the secondary cache, are supposed to be synchronized with + * disk. The mechanism of synchronization of primary and secondary caches + * includes so-called checkin/checkout technique described below. + * + * 2. Submitting modifications + * + * Each page cluster has associated jnode (a special in-memory header to + * keep a track of transactions in reiser4), which is attached to its first + * page when grabbing page cluster for modifications (see grab_page_cluster). + * Submitting modifications (see checkin_logical_cluster) is going per logical + * cluster and includes: + * . checkin_cluster_size; + * . checkin_page_cluster. + * checkin_cluster_size() is resolved to file size update (which completely + * defines new size of logical cluster (number of file's bytes in a logical + * cluster). + * checkin_page_cluster() captures jnode of a page cluster and installs + * jnode's dirty flag (if needed) to indicate that modifications are + * successfully checked in. + * + * 3. Checking out modifications + * + * Is going per logical cluster in flush time (see checkout_logical_cluster). + * This is the time of synchronizing primary and secondary caches. + * checkout_logical_cluster() includes: + * . checkout_page_cluster (retrieving checked in pages). + * . uncapture jnode (including clear dirty flag and unlock) + * + * 4. Committing modifications + * + * Proceeding a synchronization of primary and secondary caches. When checking + * out page cluster (the phase above) pages are locked/flushed/unlocked + * one-by-one in ascending order of their indexes to contiguous stream, which + * is supposed to be transformed (compressed, encrypted), chopped up into items + * and committed to disk as a disk cluster. + * + * 5. Managing page references + * + * Every checked in page have a special additional "control" reference, + * which is dropped at checkout. We need this to avoid unexpected evicting + * pages from memory before checkout. Control references are managed so + * they are not accumulated with every checkin: + * + * 0 + * checkin -> 1 + * 0 -> checkout + * checkin -> 1 + * checkin -> 1 + * checkin -> 1 + * 0 -> checkout + * ... + * + * Every page cluster has its own unique "cluster lock". Update/drop + * references are serialized via this lock. Number of checked in cluster + * pages is calculated by i_size under cluster lock. File size is updated + * at every checkin action also under cluster lock (except cases of + * appending/truncating fake logical clusters). + * + * Proof of correctness: + * + * Since we update file size under cluster lock, in the case of non-fake + * logical cluster with its lock held we do have expected number of checked + * in pages. On the other hand, append/truncate of fake logical clusters + * doesn't change number of checked in pages of any cluster. + * + * NOTE-EDWARD: As cluster lock we use guard (spinlock_t) of its jnode. + * Currently, I don't see any reason to create a special lock for those + * needs. + */ + +static inline void lock_cluster(jnode * node) +{ + spin_lock_jnode(node); +} + +static inline void unlock_cluster(jnode * node) +{ + spin_unlock_jnode(node); +} + +static inline void unlock_cluster_uncapture(jnode * node) +{ + uncapture_cluster_jnode(node); +} + +/* Set new file size by window. Cluster lock is required. */ +static void checkin_file_size(struct cluster_handle * clust, + struct inode * inode) +{ + loff_t new_size; + struct reiser4_slide * win; + + assert("edward-1181", clust != NULL); + assert("edward-1182", inode != NULL); + assert("edward-1473", clust->pages != NULL); + assert("edward-1474", clust->pages[0] != NULL); + assert("edward-1475", jprivate(clust->pages[0]) != NULL); + assert_spin_locked(&(jprivate(clust->pages[0])->guard)); + + + win = clust->win; + assert("edward-1183", win != NULL); + + new_size = clust_to_off(clust->index, inode) + win->off; + + switch (clust->op) { + case LC_APPOV: + case LC_EXPAND: + if (new_size + win->count <= i_size_read(inode)) + /* overwrite only */ + return; + new_size += win->count; + break; + case LC_SHRINK: + break; + default: + impossible("edward-1184", "bad page cluster option"); + break; + } + inode_check_scale_nolock(inode, i_size_read(inode), new_size); + i_size_write(inode, new_size); + return; +} + +static inline void checkin_cluster_size(struct cluster_handle * clust, + struct inode * inode) +{ + if (clust->win) + checkin_file_size(clust, inode); +} + +static int checkin_page_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + int result; + jnode * node; + int old_nrpages = clust->old_nrpages; + int new_nrpages = get_new_nrpages(clust); + + node = clust->node; + + assert("edward-221", node != NULL); + assert("edward-971", clust->reserved == 1); + assert("edward-1263", + clust->reserved_prepped == estimate_update_cluster(inode)); + assert("edward-1264", clust->reserved_unprepped == 0); + + if (JF_ISSET(node, JNODE_DIRTY)) { + /* + * page cluster was checked in, but not yet + * checked out, so release related resources + */ + free_reserved4cluster(inode, clust, + estimate_update_cluster(inode)); + __put_page_cluster(0, clust->old_nrpages, + clust->pages, inode); + } else { + result = capture_cluster_jnode(node); + if (unlikely(result)) { + unlock_cluster(node); + return result; + } + jnode_make_dirty_locked(node); + clust->reserved = 0; + } + unlock_cluster(node); + + if (new_nrpages < old_nrpages) { + /* truncate >= 1 complete pages */ + __put_page_cluster(new_nrpages, + old_nrpages - new_nrpages, + clust->pages, inode); + truncate_page_cluster_range(inode, + clust->pages, clust->index, + new_nrpages, + old_nrpages - new_nrpages, + 0); + } +#if REISER4_DEBUG + clust->reserved_prepped -= estimate_update_cluster(inode); +#endif + return 0; +} + +/* Submit modifications of a logical cluster */ +static int checkin_logical_cluster(struct cluster_handle * clust, + struct inode *inode) +{ + int result = 0; + jnode * node; + + node = clust->node; + + assert("edward-1035", node != NULL); + assert("edward-1029", clust != NULL); + assert("edward-1030", clust->reserved == 1); + assert("edward-1031", clust->nr_pages != 0); + assert("edward-1032", clust->pages != NULL); + assert("edward-1033", clust->pages[0] != NULL); + assert("edward-1446", jnode_is_cluster_page(node)); + assert("edward-1476", node == jprivate(clust->pages[0])); + + lock_cluster(node); + checkin_cluster_size(clust, inode); + /* + * this will unlock the cluster + */ + result = checkin_page_cluster(clust, inode); + jput(node); + clust->node = NULL; + return result; +} + +/* + * Retrieve size of logical cluster that was checked in at + * the latest modifying session (cluster lock is required) + */ +static inline void checkout_cluster_size(struct cluster_handle * clust, + struct inode * inode) +{ + struct tfm_cluster *tc = &clust->tc; + + tc->len = lbytes(clust->index, inode); + assert("edward-1478", tc->len != 0); +} + +/* + * Retrieve a page cluster with the latest submitted modifications + * and flush its pages to previously allocated contiguous stream. + */ +static void checkout_page_cluster(struct cluster_handle * clust, + jnode * node, struct inode * inode) +{ + int i; + int found; + int to_put; + pgoff_t page_index = clust_to_pg(clust->index, inode); + struct tfm_cluster *tc = &clust->tc; + + /* find and put checked in pages: cluster is locked, + * so we must get expected number (to_put) of pages + */ + to_put = size_in_pages(lbytes(clust->index, inode)); + found = find_get_pages(inode->i_mapping, &page_index, + to_put, clust->pages); + BUG_ON(found != to_put); + + __put_page_cluster(0, to_put, clust->pages, inode); + unlock_cluster_uncapture(node); + + /* Flush found pages. + * + * Note, that we don't disable modifications while flushing, + * moreover, some found pages can be truncated, as we have + * released cluster lock. + */ + for (i = 0; i < found; i++) { + int in_page; + char * data; + assert("edward-1479", + clust->pages[i]->index == clust->pages[0]->index + i); + + lock_page(clust->pages[i]); + if (!PageUptodate(clust->pages[i])) { + /* page was truncated */ + assert("edward-1480", + i_size_read(inode) <= page_offset(clust->pages[i])); + assert("edward-1481", + clust->pages[i]->mapping != inode->i_mapping); + unlock_page(clust->pages[i]); + break; + } + /* Update the number of bytes in the logical cluster, + * as it could be partially truncated. Note, that only + * partial truncate is possible (complete truncate can + * not go here, as it is performed via ->kill_hook() + * called by cut_file_items(), and the last one must + * wait for znode locked with parent coord). + */ + checkout_cluster_size(clust, inode); + + /* this can be zero, as new file size is + checked in before truncating pages */ + in_page = __mbp(tc->len, i); + + data = kmap_atomic(clust->pages[i]); + memcpy(tfm_stream_data(tc, INPUT_STREAM) + pg_to_off(i), + data, in_page); + kunmap_atomic(data); + /* + * modifications have been checked out and will be + * committed later. Anyway, the dirty status of the + * page is no longer relevant. However, the uptodate + * status of the page is still relevant! + */ + if (PageDirty(clust->pages[i])) + cancel_dirty_page(clust->pages[i]); + + unlock_page(clust->pages[i]); + + if (in_page < PAGE_SIZE) + /* end of the file */ + break; + } + put_found_pages(clust->pages, found); /* find_get_pages */ + tc->lsize = tc->len; + return; +} + +/* Check out modifications of a logical cluster */ +int checkout_logical_cluster(struct cluster_handle * clust, + jnode * node, struct inode *inode) +{ + int result; + struct tfm_cluster *tc = &clust->tc; + + assert("edward-980", node != NULL); + assert("edward-236", inode != NULL); + assert("edward-237", clust != NULL); + assert("edward-240", !clust->win); + assert("edward-241", reiser4_schedulable()); + assert("edward-718", cryptcompress_inode_ok(inode)); + + result = grab_tfm_stream(inode, tc, INPUT_STREAM); + if (result) { + warning("edward-1430", "alloc stream failed with ret=%d", + result); + return RETERR(-E_REPEAT); + } + lock_cluster(node); + + if (unlikely(!JF_ISSET(node, JNODE_DIRTY))) { + /* race with another flush */ + warning("edward-982", + "checking out logical cluster %lu of inode %llu: " + "jnode is not dirty", clust->index, + (unsigned long long)get_inode_oid(inode)); + unlock_cluster(node); + return RETERR(-E_REPEAT); + } + cluster_reserved2grabbed(estimate_update_cluster(inode)); + + /* this will unlock cluster */ + checkout_page_cluster(clust, node, inode); + return 0; +} + +/* set hint for the cluster of the index @index */ +static void set_hint_cluster(struct inode *inode, hint_t * hint, + cloff_t index, znode_lock_mode mode) +{ + reiser4_key key; + assert("edward-722", cryptcompress_inode_ok(inode)); + assert("edward-723", + inode_file_plugin(inode) == + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + + inode_file_plugin(inode)->key_by_inode(inode, + clust_to_off(index, inode), + &key); + + reiser4_seal_init(&hint->seal, &hint->ext_coord.coord, &key); + hint->offset = get_key_offset(&key); + hint->mode = mode; +} + +void invalidate_hint_cluster(struct cluster_handle * clust) +{ + assert("edward-1291", clust != NULL); + assert("edward-1292", clust->hint != NULL); + + done_lh(&clust->hint->lh); + hint_clr_valid(clust->hint); +} + +static void put_hint_cluster(struct cluster_handle * clust, + struct inode *inode, znode_lock_mode mode) +{ + assert("edward-1286", clust != NULL); + assert("edward-1287", clust->hint != NULL); + + set_hint_cluster(inode, clust->hint, clust->index + 1, mode); + invalidate_hint_cluster(clust); +} + +static int balance_dirty_page_cluster(struct cluster_handle * clust, + struct inode *inode, loff_t off, + loff_t to_file, + int nr_dirtied) +{ + int result; + struct cryptcompress_info * info; + + assert("edward-724", inode != NULL); + assert("edward-725", cryptcompress_inode_ok(inode)); + assert("edward-1547", nr_dirtied <= cluster_nrpages(inode)); + + /* set next window params */ + move_update_window(inode, clust, off, to_file); + + result = update_sd_cryptcompress(inode); + if (result) + return result; + assert("edward-726", clust->hint->lh.owner == NULL); + info = cryptcompress_inode_data(inode); + + if (nr_dirtied == 0) + return 0; + mutex_unlock(&info->checkin_mutex); + reiser4_throttle_write(inode); + mutex_lock(&info->checkin_mutex); + return 0; +} + +/* + * Check in part of a hole within a logical cluster + */ +static int write_hole(struct inode *inode, struct cluster_handle * clust, + loff_t file_off, loff_t to_file) +{ + int result = 0; + unsigned cl_off, cl_count = 0; + unsigned to_pg, pg_off; + struct reiser4_slide * win; + + assert("edward-190", clust != NULL); + assert("edward-1069", clust->win != NULL); + assert("edward-191", inode != NULL); + assert("edward-727", cryptcompress_inode_ok(inode)); + assert("edward-1171", clust->dstat != INVAL_DISK_CLUSTER); + assert("edward-1154", + ergo(clust->dstat != FAKE_DISK_CLUSTER, clust->reserved == 1)); + + win = clust->win; + + assert("edward-1070", win != NULL); + assert("edward-201", win->stat == HOLE_WINDOW); + assert("edward-192", cluster_ok(clust, inode)); + + if (win->off == 0 && win->count == inode_cluster_size(inode)) { + /* + * This part of the hole occupies the whole logical + * cluster, so it won't be represented by any items. + * Nothing to submit. + */ + move_update_window(inode, clust, file_off, to_file); + return 0; + } + /* + * This part of the hole starts not at logical cluster + * boundary, so it has to be converted to zeros and written to disk + */ + cl_count = win->count; /* number of zeroes to write */ + cl_off = win->off; + pg_off = off_to_pgoff(win->off); + + while (cl_count) { + struct page *page; + page = clust->pages[off_to_pg(cl_off)]; + + assert("edward-284", page != NULL); + + to_pg = min((typeof(pg_off))PAGE_SIZE - pg_off, cl_count); + lock_page(page); + zero_user(page, pg_off, to_pg); + SetPageUptodate(page); + set_page_dirty_notag(page); + mark_page_accessed(page); + unlock_page(page); + + cl_off += to_pg; + cl_count -= to_pg; + pg_off = 0; + } + if (win->delta == 0) { + /* only zeroes in this window, try to capture + */ + result = checkin_logical_cluster(clust, inode); + if (result) + return result; + put_hint_cluster(clust, inode, ZNODE_WRITE_LOCK); + result = balance_dirty_page_cluster(clust, + inode, file_off, to_file, + win_count_to_nrpages(win)); + } else + move_update_window(inode, clust, file_off, to_file); + return result; +} + +/* + The main disk search procedure for cryptcompress plugin, which + . scans all items of disk cluster with the lock mode @mode + . maybe reads each one (if @read) + . maybe makes its znode dirty (if write lock mode was specified) + + NOTE-EDWARD: Callers should handle the case when disk cluster + is incomplete (-EIO) +*/ +int find_disk_cluster(struct cluster_handle * clust, + struct inode *inode, int read, znode_lock_mode mode) +{ + flow_t f; + hint_t *hint; + int result = 0; + int was_grabbed; + ra_info_t ra_info; + file_plugin *fplug; + item_plugin *iplug; + struct tfm_cluster *tc; + struct cryptcompress_info * info; + + assert("edward-138", clust != NULL); + assert("edward-728", clust->hint != NULL); + assert("edward-226", reiser4_schedulable()); + assert("edward-137", inode != NULL); + assert("edward-729", cryptcompress_inode_ok(inode)); + + hint = clust->hint; + fplug = inode_file_plugin(inode); + was_grabbed = get_current_context()->grabbed_blocks; + info = cryptcompress_inode_data(inode); + tc = &clust->tc; + + assert("edward-462", !tfm_cluster_is_uptodate(tc)); + assert("edward-461", ergo(read, tfm_stream_is_set(tc, INPUT_STREAM))); + + dclust_init_extension(hint); + + /* set key of the first disk cluster item */ + fplug->flow_by_inode(inode, + (read ? (char __user *)tfm_stream_data(tc, INPUT_STREAM) : NULL), + 0 /* kernel space */ , + inode_scaled_cluster_size(inode), + clust_to_off(clust->index, inode), READ_OP, &f); + if (mode == ZNODE_WRITE_LOCK) { + /* reserve for flush to make dirty all the leaf nodes + which contain disk cluster */ + result = + reiser4_grab_space_force(estimate_dirty_cluster(inode), + BA_CAN_COMMIT); + if (result) + goto out; + } + + ra_info.key_to_stop = f.key; + set_key_offset(&ra_info.key_to_stop, get_key_offset(reiser4_max_key())); + + while (f.length) { + result = find_cluster_item(hint, &f.key, mode, + NULL, FIND_EXACT, + (mode == ZNODE_WRITE_LOCK ? + CBK_FOR_INSERT : 0)); + switch (result) { + case CBK_COORD_NOTFOUND: + result = 0; + if (inode_scaled_offset + (inode, clust_to_off(clust->index, inode)) == + get_key_offset(&f.key)) { + /* first item not found, this is treated + as disk cluster is absent */ + clust->dstat = FAKE_DISK_CLUSTER; + goto out; + } + /* we are outside the cluster, stop search here */ + assert("edward-146", + f.length != inode_scaled_cluster_size(inode)); + goto ok; + case CBK_COORD_FOUND: + assert("edward-148", + hint->ext_coord.coord.between == AT_UNIT); + assert("edward-460", + hint->ext_coord.coord.unit_pos == 0); + + coord_clear_iplug(&hint->ext_coord.coord); + result = zload_ra(hint->ext_coord.coord.node, &ra_info); + if (unlikely(result)) + goto out; + iplug = item_plugin_by_coord(&hint->ext_coord.coord); + assert("edward-147", + item_id_by_coord(&hint->ext_coord.coord) == + CTAIL_ID); + + result = iplug->s.file.read(NULL, &f, hint); + if (result) { + zrelse(hint->ext_coord.coord.node); + goto out; + } + if (mode == ZNODE_WRITE_LOCK) { + /* Don't make dirty more nodes then it was + estimated (see comments before + estimate_dirty_cluster). Missed nodes will be + read up in flush time if they are evicted from + memory */ + if (dclust_get_extension_ncount(hint) <= + estimate_dirty_cluster(inode)) + znode_make_dirty(hint->ext_coord.coord.node); + + znode_set_convertible(hint->ext_coord.coord. + node); + } + zrelse(hint->ext_coord.coord.node); + break; + default: + goto out; + } + } + ok: + /* at least one item was found */ + /* NOTE-EDWARD: Callers should handle the case + when disk cluster is incomplete (-EIO) */ + tc->len = inode_scaled_cluster_size(inode) - f.length; + tc->lsize = lbytes(clust->index, inode); + assert("edward-1196", tc->len > 0); + assert("edward-1406", tc->lsize > 0); + + if (hint_is_unprepped_dclust(clust->hint)) { + clust->dstat = UNPR_DISK_CLUSTER; + } else if (clust->index == info->trunc_index) { + clust->dstat = TRNC_DISK_CLUSTER; + } else { + clust->dstat = PREP_DISK_CLUSTER; + dclust_set_extension_dsize(clust->hint, tc->len); + } + out: + assert("edward-1339", + get_current_context()->grabbed_blocks >= was_grabbed); + grabbed2free(get_current_context(), + get_current_super_private(), + get_current_context()->grabbed_blocks - was_grabbed); + return result; +} + +int get_disk_cluster_locked(struct cluster_handle * clust, struct inode *inode, + znode_lock_mode lock_mode) +{ + reiser4_key key; + ra_info_t ra_info; + + assert("edward-730", reiser4_schedulable()); + assert("edward-731", clust != NULL); + assert("edward-732", inode != NULL); + + if (hint_is_valid(clust->hint)) { + assert("edward-1293", clust->dstat != INVAL_DISK_CLUSTER); + assert("edward-1294", + znode_is_write_locked(clust->hint->lh.node)); + /* already have a valid locked position */ + return (clust->dstat == + FAKE_DISK_CLUSTER ? CBK_COORD_NOTFOUND : + CBK_COORD_FOUND); + } + key_by_inode_cryptcompress(inode, clust_to_off(clust->index, inode), + &key); + ra_info.key_to_stop = key; + set_key_offset(&ra_info.key_to_stop, get_key_offset(reiser4_max_key())); + + return find_cluster_item(clust->hint, &key, lock_mode, NULL, FIND_EXACT, + CBK_FOR_INSERT); +} + +/* Read needed cluster pages before modifying. + If success, @clust->hint contains locked position in the tree. + Also: + . find and set disk cluster state + . make disk cluster dirty if its state is not FAKE_DISK_CLUSTER. +*/ +static int read_some_cluster_pages(struct inode * inode, + struct cluster_handle * clust) +{ + int i; + int result = 0; + item_plugin *iplug; + struct reiser4_slide * win = clust->win; + znode_lock_mode mode = ZNODE_WRITE_LOCK; + + iplug = item_plugin_by_id(CTAIL_ID); + + assert("edward-924", !tfm_cluster_is_uptodate(&clust->tc)); + +#if REISER4_DEBUG + if (clust->nr_pages == 0) { + /* start write hole from fake disk cluster */ + assert("edward-1117", win != NULL); + assert("edward-1118", win->stat == HOLE_WINDOW); + assert("edward-1119", new_logical_cluster(clust, inode)); + } +#endif + if (new_logical_cluster(clust, inode)) { + /* + new page cluster is about to be written, nothing to read, + */ + assert("edward-734", reiser4_schedulable()); + assert("edward-735", clust->hint->lh.owner == NULL); + + if (clust->nr_pages) { + int off; + struct page * pg; + assert("edward-1419", clust->pages != NULL); + pg = clust->pages[clust->nr_pages - 1]; + assert("edward-1420", pg != NULL); + off = off_to_pgoff(win->off+win->count+win->delta); + if (off) { + lock_page(pg); + zero_user_segment(pg, off, PAGE_SIZE); + unlock_page(pg); + } + } + clust->dstat = FAKE_DISK_CLUSTER; + return 0; + } + /* + Here we should search for disk cluster to figure out its real state. + Also there is one more important reason to do disk search: we need + to make disk cluster _dirty_ if it exists + */ + + /* if windows is specified, read the only pages + that will be modified partially */ + + for (i = 0; i < clust->nr_pages; i++) { + struct page *pg = clust->pages[i]; + + lock_page(pg); + if (PageUptodate(pg)) { + unlock_page(pg); + continue; + } + unlock_page(pg); + + if (win && + i >= size_in_pages(win->off) && + i < off_to_pg(win->off + win->count + win->delta)) + /* page will be completely overwritten */ + continue; + + if (win && (i == clust->nr_pages - 1) && + /* the last page is + partially modified, + not uptodate .. */ + (size_in_pages(i_size_read(inode)) <= pg->index)) { + /* .. and appended, + so set zeroes to the rest */ + int offset; + lock_page(pg); + assert("edward-1260", + size_in_pages(win->off + win->count + + win->delta) - 1 == i); + + offset = + off_to_pgoff(win->off + win->count + win->delta); + zero_user_segment(pg, offset, PAGE_SIZE); + unlock_page(pg); + /* still not uptodate */ + break; + } + lock_page(pg); + result = do_readpage_ctail(inode, clust, pg, mode); + + assert("edward-1526", ergo(!result, PageUptodate(pg))); + unlock_page(pg); + if (result) { + warning("edward-219", "do_readpage_ctail failed"); + goto out; + } + } + if (!tfm_cluster_is_uptodate(&clust->tc)) { + /* disk cluster unclaimed, but we need to make its znodes dirty + * to make flush update convert its content + */ + result = find_disk_cluster(clust, inode, + 0 /* do not read items */, + mode); + } + out: + tfm_cluster_clr_uptodate(&clust->tc); + return result; +} + +static int should_create_unprepped_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + assert("edward-737", clust != NULL); + + switch (clust->dstat) { + case PREP_DISK_CLUSTER: + case UNPR_DISK_CLUSTER: + return 0; + case FAKE_DISK_CLUSTER: + if (clust->win && + clust->win->stat == HOLE_WINDOW && clust->nr_pages == 0) { + assert("edward-1172", + new_logical_cluster(clust, inode)); + return 0; + } + return 1; + default: + impossible("edward-1173", "bad disk cluster state"); + return 0; + } +} + +static int cryptcompress_make_unprepped_cluster(struct cluster_handle * clust, + struct inode *inode) +{ + int result; + + assert("edward-1123", reiser4_schedulable()); + assert("edward-737", clust != NULL); + assert("edward-738", inode != NULL); + assert("edward-739", cryptcompress_inode_ok(inode)); + assert("edward-1053", clust->hint != NULL); + + if (!should_create_unprepped_cluster(clust, inode)) { + if (clust->reserved) { + cluster_reserved2free(estimate_insert_cluster(inode)); +#if REISER4_DEBUG + assert("edward-1267", + clust->reserved_unprepped == + estimate_insert_cluster(inode)); + clust->reserved_unprepped -= + estimate_insert_cluster(inode); +#endif + } + return 0; + } + assert("edward-1268", clust->reserved); + cluster_reserved2grabbed(estimate_insert_cluster(inode)); +#if REISER4_DEBUG + assert("edward-1441", + clust->reserved_unprepped == estimate_insert_cluster(inode)); + clust->reserved_unprepped -= estimate_insert_cluster(inode); +#endif + result = ctail_insert_unprepped_cluster(clust, inode); + if (result) + return result; + + inode_add_bytes(inode, inode_cluster_size(inode)); + + assert("edward-743", cryptcompress_inode_ok(inode)); + assert("edward-744", znode_is_write_locked(clust->hint->lh.node)); + + clust->dstat = UNPR_DISK_CLUSTER; + return 0; +} + +/* . Grab page cluster for read, write, setattr, etc. operations; + * . Truncate its complete pages, if needed; + */ +int prepare_page_cluster(struct inode * inode, struct cluster_handle * clust, + rw_op rw) +{ + assert("edward-177", inode != NULL); + assert("edward-741", cryptcompress_inode_ok(inode)); + assert("edward-740", clust->pages != NULL); + + set_cluster_nrpages(clust, inode); + reset_cluster_pgset(clust, cluster_nrpages(inode)); + return grab_page_cluster(inode, clust, rw); +} + +/* Truncate complete page cluster of index @index. + * This is called by ->kill_hook() method of item + * plugin when deleting a disk cluster of such index. + */ +void truncate_complete_page_cluster(struct inode *inode, cloff_t index, + int even_cows) +{ + int found; + int nr_pages; + jnode *node; + pgoff_t page_index = clust_to_pg(index, inode); + struct page *pages[MAX_CLUSTER_NRPAGES]; + + node = jlookup(current_tree, get_inode_oid(inode), + clust_to_pg(index, inode)); + nr_pages = size_in_pages(lbytes(index, inode)); + assert("edward-1483", nr_pages != 0); + if (!node) + goto truncate; + found = find_get_pages(inode->i_mapping, &page_index, + cluster_nrpages(inode), pages); + if (!found) { + assert("edward-1484", jnode_truncate_ok(inode, index)); + return; + } + lock_cluster(node); + + if (reiser4_inode_get_flag(inode, REISER4_FILE_CONV_IN_PROGRESS) + && index == 0) + /* converting to unix_file is in progress */ + JF_CLR(node, JNODE_CLUSTER_PAGE); + if (JF_ISSET(node, JNODE_DIRTY)) { + /* + * @nr_pages were checked in, but not yet checked out - + * we need to release them. (also there can be pages + * attached to page cache by read(), etc. - don't take + * them into account). + */ + assert("edward-1198", found >= nr_pages); + + /* free disk space grabbed for disk cluster converting */ + cluster_reserved2grabbed(estimate_update_cluster(inode)); + grabbed2free(get_current_context(), + get_current_super_private(), + estimate_update_cluster(inode)); + __put_page_cluster(0, nr_pages, pages, inode); + + /* This will clear dirty bit, uncapture and unlock jnode */ + unlock_cluster_uncapture(node); + } else + unlock_cluster(node); + jput(node); /* jlookup */ + put_found_pages(pages, found); /* find_get_pages */ + truncate: + if (reiser4_inode_get_flag(inode, REISER4_FILE_CONV_IN_PROGRESS) && + index == 0) + return; + truncate_page_cluster_range(inode, pages, index, 0, + cluster_nrpages(inode), + even_cows); + assert("edward-1201", + ergo(!reiser4_inode_get_flag(inode, + REISER4_FILE_CONV_IN_PROGRESS), + jnode_truncate_ok(inode, index))); + return; +} + +/* + * Set cluster handle @clust of a logical cluster before + * modifications which are supposed to be committed. + * + * . grab cluster pages; + * . reserve disk space; + * . maybe read pages from disk and set the disk cluster dirty; + * . maybe write hole and check in (partially zeroed) logical cluster; + * . create 'unprepped' disk cluster for new or fake logical one. + */ +static int prepare_logical_cluster(struct inode *inode, + loff_t file_off, /* write position + in the file */ + loff_t to_file, /* bytes of users data + to write to the file */ + struct cluster_handle * clust, + logical_cluster_op op) +{ + int result = 0; + struct reiser4_slide * win = clust->win; + + reset_cluster_params(clust); + cluster_set_tfm_act(&clust->tc, TFMA_READ); +#if REISER4_DEBUG + clust->ctx = get_current_context(); +#endif + assert("edward-1190", op != LC_INVAL); + + clust->op = op; + + result = prepare_page_cluster(inode, clust, WRITE_OP); + if (result) + return result; + assert("edward-1447", + ergo(clust->nr_pages != 0, jprivate(clust->pages[0]))); + assert("edward-1448", + ergo(clust->nr_pages != 0, + jnode_is_cluster_page(jprivate(clust->pages[0])))); + + result = reserve4cluster(inode, clust); + if (result) + goto out; + + result = read_some_cluster_pages(inode, clust); + + if (result || + /* + * don't submit data modifications + * when expanding or shrinking holes + */ + (op == LC_SHRINK && clust->dstat == FAKE_DISK_CLUSTER) || + (op == LC_EXPAND && clust->dstat == FAKE_DISK_CLUSTER)){ + free_reserved4cluster(inode, + clust, + estimate_update_cluster(inode) + + estimate_insert_cluster(inode)); + goto out; + } + assert("edward-1124", clust->dstat != INVAL_DISK_CLUSTER); + + result = cryptcompress_make_unprepped_cluster(clust, inode); + if (result) + goto error; + if (win && win->stat == HOLE_WINDOW) { + result = write_hole(inode, clust, file_off, to_file); + if (result) + goto error; + } + return 0; + error: + free_reserved4cluster(inode, clust, + estimate_update_cluster(inode)); + out: + put_page_cluster(clust, inode, WRITE_OP); + return result; +} + +/* set window by two offsets */ +static void set_window(struct cluster_handle * clust, + struct reiser4_slide * win, struct inode *inode, + loff_t o1, loff_t o2) +{ + assert("edward-295", clust != NULL); + assert("edward-296", inode != NULL); + assert("edward-1071", win != NULL); + assert("edward-297", o1 <= o2); + + clust->index = off_to_clust(o1, inode); + + win->off = off_to_cloff(o1, inode); + win->count = min((loff_t)(inode_cluster_size(inode) - win->off), + o2 - o1); + win->delta = 0; + + clust->win = win; +} + +static int set_window_and_cluster(struct inode *inode, + struct cluster_handle * clust, + struct reiser4_slide * win, size_t length, + loff_t file_off) +{ + int result; + + assert("edward-197", clust != NULL); + assert("edward-1072", win != NULL); + assert("edward-198", inode != NULL); + + result = alloc_cluster_pgset(clust, cluster_nrpages(inode)); + if (result) + return result; + + if (file_off > i_size_read(inode)) { + /* Uhmm, hole in cryptcompress file... */ + loff_t hole_size; + hole_size = file_off - inode->i_size; + + set_window(clust, win, inode, inode->i_size, file_off); + win->stat = HOLE_WINDOW; + if (win->off + hole_size < inode_cluster_size(inode)) + /* there is also user's data to append to the hole */ + win->delta = min(inode_cluster_size(inode) - + (win->off + win->count), length); + return 0; + } + set_window(clust, win, inode, file_off, file_off + length); + win->stat = DATA_WINDOW; + return 0; +} + +int set_cluster_by_page(struct cluster_handle * clust, struct page * page, + int count) +{ + int result = 0; + int (*setting_actor)(struct cluster_handle * clust, int count); + + assert("edward-1358", clust != NULL); + assert("edward-1359", page != NULL); + assert("edward-1360", page->mapping != NULL); + assert("edward-1361", page->mapping->host != NULL); + + setting_actor = + (clust->pages ? reset_cluster_pgset : alloc_cluster_pgset); + result = setting_actor(clust, count); + clust->index = pg_to_clust(page->index, page->mapping->host); + return result; +} + +/* reset all the params that not get updated */ +void reset_cluster_params(struct cluster_handle * clust) +{ + assert("edward-197", clust != NULL); + + clust->dstat = INVAL_DISK_CLUSTER; + clust->tc.uptodate = 0; + clust->tc.len = 0; +} + +/* the heart of write_cryptcompress */ +static loff_t do_write_cryptcompress(struct file *file, struct inode *inode, + const char __user *buf, size_t to_write, + loff_t pos, struct dispatch_context *cont) +{ + int i; + hint_t *hint; + int result = 0; + size_t count; + struct reiser4_slide win; + struct cluster_handle clust; + struct cryptcompress_info * info; + + assert("edward-154", buf != NULL); + assert("edward-161", reiser4_schedulable()); + assert("edward-748", cryptcompress_inode_ok(inode)); + assert("edward-159", current_blocksize == PAGE_SIZE); + assert("edward-1274", get_current_context()->grabbed_blocks == 0); + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + + result = load_file_hint(file, hint); + if (result) { + kfree(hint); + return result; + } + count = to_write; + + reiser4_slide_init(&win); + cluster_init_read(&clust, &win); + clust.hint = hint; + info = cryptcompress_inode_data(inode); + + mutex_lock(&info->checkin_mutex); + + result = set_window_and_cluster(inode, &clust, &win, to_write, pos); + if (result) + goto out; + + if (next_window_stat(&win) == HOLE_WINDOW) { + /* write hole in this iteration + separated from the loop below */ + result = write_dispatch_hook(file, inode, + pos, &clust, cont); + if (result) + goto out; + result = prepare_logical_cluster(inode, pos, count, &clust, + LC_APPOV); + if (result) + goto out; + } + do { + const char __user * src; + unsigned page_off, to_page; + + assert("edward-750", reiser4_schedulable()); + + result = write_dispatch_hook(file, inode, + pos + to_write - count, + &clust, cont); + if (result) + goto out; + if (cont->state == DISPATCH_ASSIGNED_NEW) + /* done_lh was called in write_dispatch_hook */ + goto out_no_longterm_lock; + + result = prepare_logical_cluster(inode, pos, count, &clust, + LC_APPOV); + if (result) + goto out; + + assert("edward-751", cryptcompress_inode_ok(inode)); + assert("edward-204", win.stat == DATA_WINDOW); + assert("edward-1288", hint_is_valid(clust.hint)); + assert("edward-752", + znode_is_write_locked(hint->ext_coord.coord.node)); + put_hint_cluster(&clust, inode, ZNODE_WRITE_LOCK); + + /* set write position in page */ + page_off = off_to_pgoff(win.off); + + /* copy user's data to cluster pages */ + for (i = off_to_pg(win.off), src = buf; + i < size_in_pages(win.off + win.count); + i++, src += to_page) { + to_page = __mbp(win.off + win.count, i) - page_off; + assert("edward-1039", + page_off + to_page <= PAGE_SIZE); + assert("edward-287", clust.pages[i] != NULL); + + fault_in_pages_readable(src, to_page); + + lock_page(clust.pages[i]); + result = + __copy_from_user((char *)kmap(clust.pages[i]) + + page_off, src, to_page); + kunmap(clust.pages[i]); + if (unlikely(result)) { + unlock_page(clust.pages[i]); + result = -EFAULT; + goto err2; + } + SetPageUptodate(clust.pages[i]); + set_page_dirty_notag(clust.pages[i]); + flush_dcache_page(clust.pages[i]); + mark_page_accessed(clust.pages[i]); + unlock_page(clust.pages[i]); + page_off = 0; + } + assert("edward-753", cryptcompress_inode_ok(inode)); + + result = checkin_logical_cluster(&clust, inode); + if (result) + goto err2; + + buf += win.count; + count -= win.count; + + result = balance_dirty_page_cluster(&clust, inode, 0, count, + win_count_to_nrpages(&win)); + if (result) + goto err1; + assert("edward-755", hint->lh.owner == NULL); + reset_cluster_params(&clust); + continue; + err2: + put_page_cluster(&clust, inode, WRITE_OP); + err1: + if (clust.reserved) + free_reserved4cluster(inode, + &clust, + estimate_update_cluster(inode)); + break; + } while (count); + out: + done_lh(&hint->lh); + save_file_hint(file, hint); + out_no_longterm_lock: + mutex_unlock(&info->checkin_mutex); + kfree(hint); + put_cluster_handle(&clust); + assert("edward-195", + ergo((to_write == count), + (result < 0 || cont->state == DISPATCH_ASSIGNED_NEW))); + return (to_write - count) ? (to_write - count) : result; +} + +/** + * plugin->write() + * @file: file to write to + * @buf: address of user-space buffer + * @read_amount: number of bytes to write + * @off: position in file to write to + */ +ssize_t write_cryptcompress(struct file *file, const char __user *buf, + size_t count, loff_t *off, + struct dispatch_context *cont) +{ + ssize_t result; + struct inode *inode; + reiser4_context *ctx; + loff_t pos = *off; + struct cryptcompress_info *info; + + assert("edward-1449", cont->state == DISPATCH_INVAL_STATE); + + inode = file_inode(file); + assert("edward-196", cryptcompress_inode_ok(inode)); + + info = cryptcompress_inode_data(inode); + ctx = get_current_context(); + + result = file_remove_privs(file); + if (unlikely(result != 0)) { + context_set_commit_async(ctx); + return result; + } + /* remove_suid might create a transaction */ + reiser4_txn_restart(ctx); + + result = do_write_cryptcompress(file, inode, buf, count, pos, cont); + + if (unlikely(result < 0)) { + context_set_commit_async(ctx); + return result; + } + /* update position in a file */ + *off = pos + result; + return result; +} + +/* plugin->readpages */ +int readpages_cryptcompress(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + reiser4_context * ctx; + int ret; + + ctx = reiser4_init_context(mapping->host->i_sb); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto err; + } + /* cryptcompress file can be built of ctail items only */ + ret = readpages_ctail(file, mapping, pages); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + if (ret) { +err: + put_pages_list(pages); + } + return ret; +} + +static reiser4_block_nr cryptcompress_estimate_read(struct inode *inode) +{ + /* reserve one block to update stat data item */ + assert("edward-1193", + inode_file_plugin(inode)->estimate.update == + estimate_update_common); + return estimate_update_common(inode); +} + +/** + * plugin->read + * @file: file to read from + * @buf: address of user-space buffer + * @read_amount: number of bytes to read + * @off: position in file to read from + */ +ssize_t read_cryptcompress(struct file * file, char __user *buf, size_t size, + loff_t * off) +{ + ssize_t result; + struct inode *inode; + reiser4_context *ctx; + struct cryptcompress_info *info; + reiser4_block_nr needed; + + inode = file_inode(file); + assert("edward-1194", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + info = cryptcompress_inode_data(inode); + needed = cryptcompress_estimate_read(inode); + + result = reiser4_grab_space(needed, BA_CAN_COMMIT); + if (result != 0) { + reiser4_exit_context(ctx); + return result; + } + result = new_sync_read(file, buf, size, off); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return result; +} + +/* Set left coord when unit is not found after node_lookup() + This takes into account that there can be holes in a sequence + of disk clusters */ + +static void adjust_left_coord(coord_t * left_coord) +{ + switch (left_coord->between) { + case AFTER_UNIT: + left_coord->between = AFTER_ITEM; + case AFTER_ITEM: + case BEFORE_UNIT: + break; + default: + impossible("edward-1204", "bad left coord to cut"); + } + return; +} + +#define CRC_CUT_TREE_MIN_ITERATIONS 64 + +/* plugin->cut_tree_worker */ +int cut_tree_worker_cryptcompress(tap_t * tap, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, + struct inode *object, int truncate, + int *progress) +{ + lock_handle next_node_lock; + coord_t left_coord; + int result; + + assert("edward-1158", tap->coord->node != NULL); + assert("edward-1159", znode_is_write_locked(tap->coord->node)); + assert("edward-1160", znode_get_level(tap->coord->node) == LEAF_LEVEL); + + *progress = 0; + init_lh(&next_node_lock); + + while (1) { + znode *node; /* node from which items are cut */ + node_plugin *nplug; /* node plugin for @node */ + + node = tap->coord->node; + + /* Move next_node_lock to the next node on the left. */ + result = + reiser4_get_left_neighbor(&next_node_lock, node, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result != 0 && result != -E_NO_NEIGHBOR) + break; + /* FIXME-EDWARD: Check can we delete the node as a whole. */ + result = reiser4_tap_load(tap); + if (result) + return result; + + /* Prepare the second (right) point for cut_node() */ + if (*progress) + coord_init_last_unit(tap->coord, node); + + else if (item_plugin_by_coord(tap->coord)->b.lookup == NULL) + /* set rightmost unit for the items without lookup method */ + tap->coord->unit_pos = coord_last_unit_pos(tap->coord); + + nplug = node->nplug; + + assert("edward-1161", nplug); + assert("edward-1162", nplug->lookup); + + /* left_coord is leftmost unit cut from @node */ + result = nplug->lookup(node, from_key, FIND_EXACT, &left_coord); + + if (IS_CBKERR(result)) + break; + + if (result == CBK_COORD_NOTFOUND) + adjust_left_coord(&left_coord); + + /* adjust coordinates so that they are set to existing units */ + if (coord_set_to_right(&left_coord) + || coord_set_to_left(tap->coord)) { + result = 0; + break; + } + + if (coord_compare(&left_coord, tap->coord) == + COORD_CMP_ON_RIGHT) { + /* keys from @from_key to @to_key are not in the tree */ + result = 0; + break; + } + + /* cut data from one node */ + *smallest_removed = *reiser4_min_key(); + result = kill_node_content(&left_coord, + tap->coord, + from_key, + to_key, + smallest_removed, + next_node_lock.node, + object, truncate); + reiser4_tap_relse(tap); + + if (result) + break; + + ++(*progress); + + /* Check whether all items with keys >= from_key were removed + * from the tree. */ + if (keyle(smallest_removed, from_key)) + /* result = 0; */ + break; + + if (next_node_lock.node == NULL) + break; + + result = reiser4_tap_move(tap, &next_node_lock); + done_lh(&next_node_lock); + if (result) + break; + + /* Break long cut_tree operation (deletion of a large file) if + * atom requires commit. */ + if (*progress > CRC_CUT_TREE_MIN_ITERATIONS + && current_atom_should_commit()) { + result = -E_REPEAT; + break; + } + } + done_lh(&next_node_lock); + return result; +} + +static int expand_cryptcompress(struct inode *inode /* old size */, + loff_t new_size) +{ + int result = 0; + hint_t *hint; + lock_handle *lh; + loff_t hole_size; + int nr_zeroes; + struct reiser4_slide win; + struct cluster_handle clust; + + assert("edward-1133", inode->i_size < new_size); + assert("edward-1134", reiser4_schedulable()); + assert("edward-1135", cryptcompress_inode_ok(inode)); + assert("edward-1136", current_blocksize == PAGE_SIZE); + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + hint_init_zero(hint); + lh = &hint->lh; + + reiser4_slide_init(&win); + cluster_init_read(&clust, &win); + clust.hint = hint; + + if (off_to_cloff(inode->i_size, inode) == 0) + goto append_hole; + /* + * It can happen that + * a part of the hole will be converted + * to zeros. If so, it should be submitted + */ + result = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (result) + goto out; + hole_size = new_size - inode->i_size; + nr_zeroes = inode_cluster_size(inode) - + off_to_cloff(inode->i_size, inode); + if (nr_zeroes > hole_size) + nr_zeroes = hole_size; + + set_window(&clust, &win, inode, inode->i_size, + inode->i_size + nr_zeroes); + win.stat = HOLE_WINDOW; + + assert("edward-1137", + clust.index == off_to_clust(inode->i_size, inode)); + + result = prepare_logical_cluster(inode, 0, 0, &clust, LC_EXPAND); + if (result) + goto out; + assert("edward-1139", + clust.dstat == PREP_DISK_CLUSTER || + clust.dstat == UNPR_DISK_CLUSTER || + clust.dstat == FAKE_DISK_CLUSTER); + + assert("edward-1431", hole_size >= nr_zeroes); + + append_hole: + INODE_SET_SIZE(inode, new_size); + out: + done_lh(lh); + kfree(hint); + put_cluster_handle(&clust); + return result; +} + +static int update_size_actor(struct inode *inode, + loff_t new_size, int update_sd) +{ + if (new_size & ((loff_t) (inode_cluster_size(inode)) - 1)) + /* + * cut not at logical cluster boundary, + * size will be updated by write_hole() + */ + return 0; + else + return reiser4_update_file_size(inode, new_size, update_sd); +} + +static int prune_cryptcompress(struct inode *inode, + loff_t new_size, int update_sd) +{ + int result = 0; + unsigned nr_zeros; + loff_t to_prune; + loff_t old_size; + cloff_t from_idx; + cloff_t to_idx; + + hint_t *hint; + lock_handle *lh; + struct reiser4_slide win; + struct cluster_handle clust; + + assert("edward-1140", inode->i_size >= new_size); + assert("edward-1141", reiser4_schedulable()); + assert("edward-1142", cryptcompress_inode_ok(inode)); + assert("edward-1143", current_blocksize == PAGE_SIZE); + + old_size = inode->i_size; + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + hint_init_zero(hint); + lh = &hint->lh; + + reiser4_slide_init(&win); + cluster_init_read(&clust, &win); + clust.hint = hint; + + /* + * index of the leftmost logical cluster + * that will be completely truncated + */ + from_idx = size_in_lc(new_size, inode); + to_idx = size_in_lc(inode->i_size, inode); + /* + * truncate all complete disk clusters starting from @from_idx + */ + assert("edward-1174", from_idx <= to_idx); + + old_size = inode->i_size; + if (from_idx != to_idx) { + struct cryptcompress_info *info; + info = cryptcompress_inode_data(inode); + + result = cut_file_items(inode, + clust_to_off(from_idx, inode), + update_sd, + clust_to_off(to_idx, inode), + update_size_actor); + info->trunc_index = ULONG_MAX; + if (unlikely(result == CBK_COORD_NOTFOUND)) + result = 0; + if (unlikely(result)) + goto out; + } + if (off_to_cloff(new_size, inode) == 0) + goto truncate_hole; + + assert("edward-1146", new_size < inode->i_size); + + to_prune = inode->i_size - new_size; + /* + * Partial truncate of the last logical cluster. + * Partial hole will be converted to zeros. The resulted + * logical cluster will be captured and submitted to disk + */ + result = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (result) + goto out; + + nr_zeros = off_to_pgoff(new_size); + if (nr_zeros) + nr_zeros = PAGE_SIZE - nr_zeros; + + set_window(&clust, &win, inode, new_size, new_size + nr_zeros); + win.stat = HOLE_WINDOW; + + assert("edward-1149", clust.index == from_idx - 1); + + result = prepare_logical_cluster(inode, 0, 0, &clust, LC_SHRINK); + if (result) + goto out; + assert("edward-1151", + clust.dstat == PREP_DISK_CLUSTER || + clust.dstat == UNPR_DISK_CLUSTER || + clust.dstat == FAKE_DISK_CLUSTER); + truncate_hole: + /* + * drop all the pages that don't have jnodes (i.e. pages + * which can not be truncated by cut_file_items() because + * of holes represented by fake disk clusters) including + * the pages of partially truncated cluster which was + * released by prepare_logical_cluster() + */ + INODE_SET_SIZE(inode, new_size); + truncate_inode_pages(inode->i_mapping, new_size); + out: + assert("edward-1497", + pages_truncate_ok(inode, size_in_pages(new_size))); + + done_lh(lh); + kfree(hint); + put_cluster_handle(&clust); + return result; +} + +/** + * Capture a pager cluster. + * @clust must be set up by a caller. + */ +static int capture_page_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + int result; + + assert("edward-1073", clust != NULL); + assert("edward-1074", inode != NULL); + assert("edward-1075", clust->dstat == INVAL_DISK_CLUSTER); + + result = prepare_logical_cluster(inode, 0, 0, clust, LC_APPOV); + if (result) + return result; + + set_cluster_pages_dirty(clust, inode); + result = checkin_logical_cluster(clust, inode); + put_hint_cluster(clust, inode, ZNODE_WRITE_LOCK); + if (unlikely(result)) + put_page_cluster(clust, inode, WRITE_OP); + return result; +} + +/* Starting from @index find tagged pages of the same page cluster. + * Clear the tag for each of them. Return number of found pages. + */ +static int find_anon_page_cluster(struct address_space * mapping, + pgoff_t * index, struct page ** pages) +{ + int i = 0; + int found; + spin_lock_irq(&mapping->tree_lock); + do { + /* looking for one page */ + found = radix_tree_gang_lookup_tag(&mapping->page_tree, + (void **)&pages[i], + *index, 1, + PAGECACHE_TAG_REISER4_MOVED); + if (!found) + break; + if (!same_page_cluster(pages[0], pages[i])) + break; + + /* found */ + get_page(pages[i]); + *index = pages[i]->index + 1; + + radix_tree_tag_clear(&mapping->page_tree, + pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + if (last_page_in_cluster(pages[i++])) + break; + } while (1); + spin_unlock_irq(&mapping->tree_lock); + return i; +} + +#define MAX_PAGES_TO_CAPTURE (1024) + +/* Capture anonymous page clusters */ +static int capture_anon_pages(struct address_space * mapping, pgoff_t * index, + int to_capture) +{ + int count = 0; + int found = 0; + int result = 0; + hint_t *hint; + lock_handle *lh; + struct inode * inode; + struct cluster_handle clust; + struct page * pages[MAX_CLUSTER_NRPAGES]; + + assert("edward-1127", mapping != NULL); + assert("edward-1128", mapping->host != NULL); + assert("edward-1440", mapping->host->i_mapping == mapping); + + inode = mapping->host; + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + hint_init_zero(hint); + lh = &hint->lh; + + cluster_init_read(&clust, NULL /* no sliding window */); + clust.hint = hint; + + result = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (result) + goto out; + + while (to_capture > 0) { + found = find_anon_page_cluster(mapping, index, pages); + if (!found) { + *index = (pgoff_t) - 1; + break; + } + move_cluster_forward(&clust, inode, pages[0]->index); + result = capture_page_cluster(&clust, inode); + + put_found_pages(pages, found); /* find_anon_page_cluster */ + if (result) + break; + to_capture -= clust.nr_pages; + count += clust.nr_pages; + } + if (result) { + warning("edward-1077", + "Capture failed (inode %llu, result=%i, captured=%d)\n", + (unsigned long long)get_inode_oid(inode), result, count); + } else { + assert("edward-1078", ergo(found > 0, count > 0)); + if (to_capture <= 0) + /* there may be left more pages */ + __mark_inode_dirty(inode, I_DIRTY_PAGES); + result = count; + } + out: + done_lh(lh); + kfree(hint); + put_cluster_handle(&clust); + return result; +} + +/* Returns true if inode's mapping has dirty pages + which do not belong to any atom */ +static int cryptcompress_inode_has_anon_pages(struct inode *inode) +{ + int result; + spin_lock_irq(&inode->i_mapping->tree_lock); + result = radix_tree_tagged(&inode->i_mapping->page_tree, + PAGECACHE_TAG_REISER4_MOVED); + spin_unlock_irq(&inode->i_mapping->tree_lock); + return result; +} + +/* plugin->writepages */ +int writepages_cryptcompress(struct address_space *mapping, + struct writeback_control *wbc) +{ + int result = 0; + long to_capture; + pgoff_t nrpages; + pgoff_t index = 0; + struct inode *inode; + struct cryptcompress_info *info; + + inode = mapping->host; + if (!cryptcompress_inode_has_anon_pages(inode)) + goto end; + info = cryptcompress_inode_data(inode); + nrpages = size_in_pages(i_size_read(inode)); + + if (wbc->sync_mode != WB_SYNC_ALL) + to_capture = min(wbc->nr_to_write, (long)MAX_PAGES_TO_CAPTURE); + else + to_capture = MAX_PAGES_TO_CAPTURE; + do { + reiser4_context *ctx; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + result = PTR_ERR(ctx); + break; + } + /* avoid recursive calls to ->sync_inodes */ + ctx->nobalance = 1; + + assert("edward-1079", + lock_stack_isclean(get_current_lock_stack())); + + reiser4_txn_restart_current(); + + if (get_current_context()->entd) { + if (mutex_trylock(&info->checkin_mutex) == 0) { + /* the mutex might be occupied by + entd caller */ + result = RETERR(-EBUSY); + reiser4_exit_context(ctx); + break; + } + } else + mutex_lock(&info->checkin_mutex); + + result = capture_anon_pages(inode->i_mapping, &index, + to_capture); + mutex_unlock(&info->checkin_mutex); + + if (result < 0) { + reiser4_exit_context(ctx); + break; + } + wbc->nr_to_write -= result; + if (wbc->sync_mode != WB_SYNC_ALL) { + reiser4_exit_context(ctx); + break; + } + result = txnmgr_force_commit_all(inode->i_sb, 0); + reiser4_exit_context(ctx); + } while (result >= 0 && index < nrpages); + + end: + if (is_in_reiser4_context()) { + if (get_current_context()->nr_captured >= CAPTURE_APAGE_BURST) { + /* there are already pages to flush, flush them out, + do not delay until end of reiser4_sync_inodes */ + reiser4_writeout(inode->i_sb, wbc); + get_current_context()->nr_captured = 0; + } + } + return result; +} + +/* plugin->ioctl */ +int ioctl_cryptcompress(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return RETERR(-ENOTTY); +} + +/* plugin->mmap */ +int mmap_cryptcompress(struct file *file, struct vm_area_struct *vma) +{ + int result; + struct inode *inode; + reiser4_context *ctx; + + inode = file_inode(file); + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + /* + * generic_file_mmap will do update_atime. Grab space for stat data + * update. + */ + result = reiser4_grab_space_force + (inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); + if (result) { + reiser4_exit_context(ctx); + return result; + } + result = generic_file_mmap(file, vma); + reiser4_exit_context(ctx); + return result; +} + +/* plugin->delete_object */ +int delete_object_cryptcompress(struct inode *inode) +{ + int result; + struct cryptcompress_info * info; + + assert("edward-429", inode->i_nlink == 0); + + reiser4_txn_restart_current(); + info = cryptcompress_inode_data(inode); + + mutex_lock(&info->checkin_mutex); + result = prune_cryptcompress(inode, 0, 0); + mutex_unlock(&info->checkin_mutex); + + if (result) { + warning("edward-430", + "cannot truncate cryptcompress file %lli: %i", + (unsigned long long)get_inode_oid(inode), + result); + } + /* and remove stat data */ + return reiser4_delete_object_common(inode); +} + +/* + * plugin->setattr + * This implements actual truncate (see comments in reiser4/page_cache.c) + */ +int setattr_cryptcompress(struct dentry *dentry, struct iattr *attr) +{ + int result; + struct inode *inode; + struct cryptcompress_info * info; + + inode = dentry->d_inode; + info = cryptcompress_inode_data(inode); + + if (attr->ia_valid & ATTR_SIZE) { + if (i_size_read(inode) != attr->ia_size) { + reiser4_context *ctx; + loff_t old_size; + + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + result = setattr_dispatch_hook(inode); + if (result) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + old_size = i_size_read(inode); + inode_check_scale(inode, old_size, attr->ia_size); + + mutex_lock(&info->checkin_mutex); + if (attr->ia_size > inode->i_size) + result = expand_cryptcompress(inode, + attr->ia_size); + else + result = prune_cryptcompress(inode, + attr->ia_size, + 1/* update sd */); + mutex_unlock(&info->checkin_mutex); + if (result) { + warning("edward-1192", + "truncate_cryptcompress failed: oid %lli, " + "old size %lld, new size %lld, retval %d", + (unsigned long long) + get_inode_oid(inode), old_size, + attr->ia_size, result); + } + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + } else + result = 0; + } else + result = reiser4_setattr_common(dentry, attr); + return result; +} + +/* plugin->release */ +int release_cryptcompress(struct inode *inode, struct file *file) +{ + reiser4_context *ctx = reiser4_init_context(inode->i_sb); + + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + reiser4_free_file_fsdata(file); + reiser4_exit_context(ctx); + return 0; +} + +/* plugin->write_begin() */ +int write_begin_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata) +{ + int ret = -ENOMEM; + char *buf; + hint_t *hint; + struct inode *inode; + struct reiser4_slide *win; + struct cluster_handle *clust; + struct cryptcompress_info *info; + reiser4_context *ctx; + + ctx = get_current_context(); + inode = page->mapping->host; + info = cryptcompress_inode_data(inode); + + assert("edward-1564", PageLocked(page)); + buf = kmalloc(sizeof(*clust) + + sizeof(*win) + + sizeof(*hint), + reiser4_ctx_gfp_mask_get()); + if (!buf) + goto err2; + clust = (struct cluster_handle *)buf; + win = (struct reiser4_slide *)(buf + sizeof(*clust)); + hint = (hint_t *)(buf + sizeof(*clust) + sizeof(*win)); + + hint_init_zero(hint); + cluster_init_read(clust, NULL); + clust->hint = hint; + + mutex_lock(&info->checkin_mutex); + + ret = set_window_and_cluster(inode, clust, win, len, pos); + if (ret) + goto err1; + unlock_page(page); + ret = prepare_logical_cluster(inode, pos, len, clust, LC_APPOV); + done_lh(&hint->lh); + assert("edward-1565", lock_stack_isclean(get_current_lock_stack())); + lock_page(page); + if (ret) { + SetPageError(page); + ClearPageUptodate(page); + goto err0; + } + /* + * Success. All resources (including checkin_mutex) + * will be released in ->write_end() + */ + ctx->locked_page = page; + *fsdata = (void *)buf; + + return 0; + err0: + put_cluster_handle(clust); + err1: + mutex_unlock(&info->checkin_mutex); + kfree(buf); + err2: + assert("edward-1568", !ret); + return ret; +} + +/* plugin->write_end() */ +int write_end_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata) +{ + int ret; + hint_t *hint; + struct inode *inode; + struct cluster_handle *clust; + struct cryptcompress_info *info; + reiser4_context *ctx; + + assert("edward-1566", + lock_stack_isclean(get_current_lock_stack())); + ctx = get_current_context(); + inode = page->mapping->host; + info = cryptcompress_inode_data(inode); + clust = (struct cluster_handle *)fsdata; + hint = clust->hint; + + unlock_page(page); + ctx->locked_page = NULL; + set_cluster_pages_dirty(clust, inode); + ret = checkin_logical_cluster(clust, inode); + if (ret) { + SetPageError(page); + goto exit; + } + exit: + mutex_unlock(&info->checkin_mutex); + + put_cluster_handle(clust); + + if (pos + copied > inode->i_size) { + /* + * i_size has been updated in + * checkin_logical_cluster + */ + ret = reiser4_update_sd(inode); + if (unlikely(ret != 0)) + warning("edward-1603", + "Can not update stat-data: %i. FSCK?", + ret); + } + kfree(fsdata); + return ret; +} + +/* plugin->bmap */ +sector_t bmap_cryptcompress(struct address_space *mapping, sector_t lblock) +{ + return -EINVAL; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/cryptcompress.h b/fs/reiser4/plugin/file/cryptcompress.h new file mode 100644 index 000000000000..fbdd85f157a2 --- /dev/null +++ b/fs/reiser4/plugin/file/cryptcompress.h @@ -0,0 +1,619 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* See http://www.namesys.com/cryptcompress_design.html */ + +#if !defined( __FS_REISER4_CRYPTCOMPRESS_H__ ) +#define __FS_REISER4_CRYPTCOMPRESS_H__ + +#include "../../page_cache.h" +#include "../compress/compress.h" +#include "../crypto/cipher.h" + +#include + +#define MIN_CLUSTER_SHIFT PAGE_SHIFT +#define MAX_CLUSTER_SHIFT 16 +#define MAX_CLUSTER_NRPAGES (1U << MAX_CLUSTER_SHIFT >> PAGE_SHIFT) +#define DC_CHECKSUM_SIZE 4 + +#define MIN_LATTICE_FACTOR 1 +#define MAX_LATTICE_FACTOR 32 + +/* this mask contains all non-standard plugins that might + be present in reiser4-specific part of inode managed by + cryptcompress file plugin */ +#define cryptcompress_mask \ + ((1 << PSET_FILE) | \ + (1 << PSET_CLUSTER) | \ + (1 << PSET_CIPHER) | \ + (1 << PSET_DIGEST) | \ + (1 << PSET_COMPRESSION) | \ + (1 << PSET_COMPRESSION_MODE)) + +#if REISER4_DEBUG +static inline int cluster_shift_ok(int shift) +{ + return (shift >= MIN_CLUSTER_SHIFT) && (shift <= MAX_CLUSTER_SHIFT); +} +#endif + +#if REISER4_DEBUG +#define INODE_PGCOUNT(inode) \ +({ \ + assert("edward-1530", inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \ + atomic_read(&cryptcompress_inode_data(inode)->pgcount); \ + }) +#define INODE_PGCOUNT_INC(inode) \ +do { \ + assert("edward-1531", inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \ + atomic_inc(&cryptcompress_inode_data(inode)->pgcount); \ +} while (0) +#define INODE_PGCOUNT_DEC(inode) \ +do { \ + if (inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) \ + atomic_dec(&cryptcompress_inode_data(inode)->pgcount); \ +} while (0) +#else +#define INODE_PGCOUNT(inode) (0) +#define INODE_PGCOUNT_INC(inode) +#define INODE_PGCOUNT_DEC(inode) +#endif /* REISER4_DEBUG */ + +struct tfm_stream { + __u8 *data; + size_t size; +}; + +typedef enum { + INPUT_STREAM, + OUTPUT_STREAM, + LAST_STREAM +} tfm_stream_id; + +typedef struct tfm_stream * tfm_unit[LAST_STREAM]; + +static inline __u8 *ts_data(struct tfm_stream * stm) +{ + assert("edward-928", stm != NULL); + return stm->data; +} + +static inline size_t ts_size(struct tfm_stream * stm) +{ + assert("edward-929", stm != NULL); + return stm->size; +} + +static inline void set_ts_size(struct tfm_stream * stm, size_t size) +{ + assert("edward-930", stm != NULL); + + stm->size = size; +} + +static inline int alloc_ts(struct tfm_stream ** stm) +{ + assert("edward-931", stm); + assert("edward-932", *stm == NULL); + + *stm = kzalloc(sizeof(**stm), reiser4_ctx_gfp_mask_get()); + if (!*stm) + return -ENOMEM; + return 0; +} + +static inline void free_ts(struct tfm_stream * stm) +{ + assert("edward-933", !ts_data(stm)); + assert("edward-934", !ts_size(stm)); + + kfree(stm); +} + +static inline int alloc_ts_data(struct tfm_stream * stm, size_t size) +{ + assert("edward-935", !ts_data(stm)); + assert("edward-936", !ts_size(stm)); + assert("edward-937", size != 0); + + stm->data = reiser4_vmalloc(size); + if (!stm->data) + return -ENOMEM; + set_ts_size(stm, size); + return 0; +} + +static inline void free_ts_data(struct tfm_stream * stm) +{ + assert("edward-938", equi(ts_data(stm), ts_size(stm))); + + if (ts_data(stm)) + vfree(ts_data(stm)); + memset(stm, 0, sizeof *stm); +} + +/* Write modes for item conversion in flush convert phase */ +typedef enum { + CTAIL_INVAL_CONVERT_MODE = 0, + CTAIL_APPEND_ITEM = 1, + CTAIL_OVERWRITE_ITEM = 2, + CTAIL_CUT_ITEM = 3 +} ctail_convert_mode_t; + +typedef enum { + LC_INVAL = 0, /* invalid value */ + LC_APPOV = 1, /* append and/or overwrite */ + LC_EXPAND = 2, /* expanding truncate */ + LC_SHRINK = 3 /* shrinking truncate */ +} logical_cluster_op; + +/* Transform cluster. + * Intermediate state between page cluster and disk cluster + * Is used for data transform (compression/encryption) + */ +struct tfm_cluster { + coa_set coa; /* compression algorithms info */ + tfm_unit tun; /* plain and transformed streams */ + tfm_action act; + int uptodate; + int lsize; /* number of bytes in logical cluster */ + int len; /* length of the transform stream */ + unsigned int hole:1; /* should punch hole */ +}; + +static inline coa_t get_coa(struct tfm_cluster * tc, reiser4_compression_id id, + tfm_action act) +{ + return tc->coa[id][act]; +} + +static inline void set_coa(struct tfm_cluster * tc, reiser4_compression_id id, + tfm_action act, coa_t coa) +{ + tc->coa[id][act] = coa; +} + +static inline int alloc_coa(struct tfm_cluster * tc, compression_plugin * cplug) +{ + coa_t coa; + + coa = cplug->alloc(tc->act); + if (IS_ERR(coa)) + return PTR_ERR(coa); + set_coa(tc, cplug->h.id, tc->act, coa); + return 0; +} + +static inline int +grab_coa(struct tfm_cluster * tc, compression_plugin * cplug) +{ + return (cplug->alloc && !get_coa(tc, cplug->h.id, tc->act) ? + alloc_coa(tc, cplug) : 0); +} + +static inline void free_coa_set(struct tfm_cluster * tc) +{ + tfm_action j; + reiser4_compression_id i; + compression_plugin *cplug; + + assert("edward-810", tc != NULL); + + for (j = 0; j < TFMA_LAST; j++) + for (i = 0; i < LAST_COMPRESSION_ID; i++) { + if (!get_coa(tc, i, j)) + continue; + cplug = compression_plugin_by_id(i); + assert("edward-812", cplug->free != NULL); + cplug->free(get_coa(tc, i, j), j); + set_coa(tc, i, j, 0); + } + return; +} + +static inline struct tfm_stream * get_tfm_stream(struct tfm_cluster * tc, + tfm_stream_id id) +{ + return tc->tun[id]; +} + +static inline void set_tfm_stream(struct tfm_cluster * tc, + tfm_stream_id id, struct tfm_stream * ts) +{ + tc->tun[id] = ts; +} + +static inline __u8 *tfm_stream_data(struct tfm_cluster * tc, tfm_stream_id id) +{ + return ts_data(get_tfm_stream(tc, id)); +} + +static inline void set_tfm_stream_data(struct tfm_cluster * tc, + tfm_stream_id id, __u8 * data) +{ + get_tfm_stream(tc, id)->data = data; +} + +static inline size_t tfm_stream_size(struct tfm_cluster * tc, tfm_stream_id id) +{ + return ts_size(get_tfm_stream(tc, id)); +} + +static inline void +set_tfm_stream_size(struct tfm_cluster * tc, tfm_stream_id id, size_t size) +{ + get_tfm_stream(tc, id)->size = size; +} + +static inline int +alloc_tfm_stream(struct tfm_cluster * tc, size_t size, tfm_stream_id id) +{ + assert("edward-939", tc != NULL); + assert("edward-940", !get_tfm_stream(tc, id)); + + tc->tun[id] = kzalloc(sizeof(struct tfm_stream), + reiser4_ctx_gfp_mask_get()); + if (!tc->tun[id]) + return -ENOMEM; + return alloc_ts_data(get_tfm_stream(tc, id), size); +} + +static inline int +realloc_tfm_stream(struct tfm_cluster * tc, size_t size, tfm_stream_id id) +{ + assert("edward-941", tfm_stream_size(tc, id) < size); + free_ts_data(get_tfm_stream(tc, id)); + return alloc_ts_data(get_tfm_stream(tc, id), size); +} + +static inline void free_tfm_stream(struct tfm_cluster * tc, tfm_stream_id id) +{ + free_ts_data(get_tfm_stream(tc, id)); + free_ts(get_tfm_stream(tc, id)); + set_tfm_stream(tc, id, 0); +} + +static inline unsigned coa_overrun(compression_plugin * cplug, int ilen) +{ + return (cplug->overrun != NULL ? cplug->overrun(ilen) : 0); +} + +static inline void free_tfm_unit(struct tfm_cluster * tc) +{ + tfm_stream_id id; + for (id = 0; id < LAST_STREAM; id++) { + if (!get_tfm_stream(tc, id)) + continue; + free_tfm_stream(tc, id); + } +} + +static inline void put_tfm_cluster(struct tfm_cluster * tc) +{ + assert("edward-942", tc != NULL); + free_coa_set(tc); + free_tfm_unit(tc); +} + +static inline int tfm_cluster_is_uptodate(struct tfm_cluster * tc) +{ + assert("edward-943", tc != NULL); + assert("edward-944", tc->uptodate == 0 || tc->uptodate == 1); + return (tc->uptodate == 1); +} + +static inline void tfm_cluster_set_uptodate(struct tfm_cluster * tc) +{ + assert("edward-945", tc != NULL); + assert("edward-946", tc->uptodate == 0 || tc->uptodate == 1); + tc->uptodate = 1; + return; +} + +static inline void tfm_cluster_clr_uptodate(struct tfm_cluster * tc) +{ + assert("edward-947", tc != NULL); + assert("edward-948", tc->uptodate == 0 || tc->uptodate == 1); + tc->uptodate = 0; + return; +} + +static inline int tfm_stream_is_set(struct tfm_cluster * tc, tfm_stream_id id) +{ + return (get_tfm_stream(tc, id) && + tfm_stream_data(tc, id) && tfm_stream_size(tc, id)); +} + +static inline int tfm_cluster_is_set(struct tfm_cluster * tc) +{ + int i; + for (i = 0; i < LAST_STREAM; i++) + if (!tfm_stream_is_set(tc, i)) + return 0; + return 1; +} + +static inline void alternate_streams(struct tfm_cluster * tc) +{ + struct tfm_stream *tmp = get_tfm_stream(tc, INPUT_STREAM); + + set_tfm_stream(tc, INPUT_STREAM, get_tfm_stream(tc, OUTPUT_STREAM)); + set_tfm_stream(tc, OUTPUT_STREAM, tmp); +} + +/* Set of states to indicate a kind of data + * that will be written to the window */ +typedef enum { + DATA_WINDOW, /* user's data */ + HOLE_WINDOW /* zeroes (such kind of data can be written + * if we start to write from offset > i_size) */ +} window_stat; + +/* Window (of logical cluster size) discretely sliding along a file. + * Is used to locate hole region in a logical cluster to be properly + * represented on disk. + * We split a write to cryptcompress file into writes to its logical + * clusters. Before writing to a logical cluster we set a window, i.e. + * calculate values of the following fields: + */ +struct reiser4_slide { + unsigned off; /* offset to write from */ + unsigned count; /* number of bytes to write */ + unsigned delta; /* number of bytes to append to the hole */ + window_stat stat; /* what kind of data will be written starting + from @off */ +}; + +/* Possible states of a disk cluster */ +typedef enum { + INVAL_DISK_CLUSTER, /* unknown state */ + PREP_DISK_CLUSTER, /* disk cluster got converted by flush + * at least 1 time */ + UNPR_DISK_CLUSTER, /* disk cluster just created and should be + * converted by flush */ + FAKE_DISK_CLUSTER, /* disk cluster doesn't exist neither in memory + * nor on disk */ + TRNC_DISK_CLUSTER /* disk cluster is partially truncated */ +} disk_cluster_stat; + +/* The following structure represents various stages of the same logical + * cluster of index @index: + * . fixed slide + * . page cluster (stage in primary cache) + * . transform cluster (transition stage) + * . disk cluster (stage in secondary cache) + * This structure is used in transition and synchronizing operations, e.g. + * transform cluster is a transition state when synchronizing page cluster + * and disk cluster. + * FIXME: Encapsulate page cluster, disk cluster. + */ +struct cluster_handle { + cloff_t index; /* offset in a file (unit is a cluster size) */ + int index_valid; /* for validating the index above, if needed */ + struct file *file; /* host file */ + + /* logical cluster */ + struct reiser4_slide *win; /* sliding window to locate holes */ + logical_cluster_op op; /* logical cluster operation (truncate or + append/overwrite) */ + /* transform cluster */ + struct tfm_cluster tc; /* contains all needed info to synchronize + page cluster and disk cluster) */ + /* page cluster */ + int nr_pages; /* number of pages of current checkin action */ + int old_nrpages; /* number of pages of last checkin action */ + struct page **pages; /* attached pages */ + jnode * node; /* jnode for capture */ + + /* disk cluster */ + hint_t *hint; /* current position in the tree */ + disk_cluster_stat dstat; /* state of the current disk cluster */ + int reserved; /* is space for disk cluster reserved */ +#if REISER4_DEBUG + reiser4_context *ctx; + int reserved_prepped; + int reserved_unprepped; +#endif + +}; + +static inline __u8 * tfm_input_data (struct cluster_handle * clust) +{ + return tfm_stream_data(&clust->tc, INPUT_STREAM); +} + +static inline __u8 * tfm_output_data (struct cluster_handle * clust) +{ + return tfm_stream_data(&clust->tc, OUTPUT_STREAM); +} + +static inline int reset_cluster_pgset(struct cluster_handle * clust, + int nrpages) +{ + assert("edward-1057", clust->pages != NULL); + memset(clust->pages, 0, sizeof(*clust->pages) * nrpages); + return 0; +} + +static inline int alloc_cluster_pgset(struct cluster_handle * clust, + int nrpages) +{ + assert("edward-949", clust != NULL); + assert("edward-1362", clust->pages == NULL); + assert("edward-950", nrpages != 0 && nrpages <= MAX_CLUSTER_NRPAGES); + + clust->pages = kzalloc(sizeof(*clust->pages) * nrpages, + reiser4_ctx_gfp_mask_get()); + if (!clust->pages) + return RETERR(-ENOMEM); + return 0; +} + +static inline void move_cluster_pgset(struct cluster_handle *clust, + struct page ***pages, int * nr_pages) +{ + assert("edward-1545", clust != NULL && clust->pages != NULL); + assert("edward-1546", pages != NULL && *pages == NULL); + *pages = clust->pages; + *nr_pages = clust->nr_pages; + clust->pages = NULL; +} + +static inline void free_cluster_pgset(struct cluster_handle * clust) +{ + assert("edward-951", clust->pages != NULL); + kfree(clust->pages); + clust->pages = NULL; +} + +static inline void put_cluster_handle(struct cluster_handle * clust) +{ + assert("edward-435", clust != NULL); + + put_tfm_cluster(&clust->tc); + if (clust->pages) + free_cluster_pgset(clust); + memset(clust, 0, sizeof *clust); +} + +static inline void inc_keyload_count(struct reiser4_crypto_info * data) +{ + assert("edward-1410", data != NULL); + data->keyload_count++; +} + +static inline void dec_keyload_count(struct reiser4_crypto_info * data) +{ + assert("edward-1411", data != NULL); + assert("edward-1412", data->keyload_count > 0); + data->keyload_count--; +} + +static inline int capture_cluster_jnode(jnode * node) +{ + return reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); +} + +/* cryptcompress specific part of reiser4_inode */ +struct cryptcompress_info { + struct mutex checkin_mutex; /* This is to serialize + * checkin_logical_cluster operations */ + cloff_t trunc_index; /* Index of the leftmost truncated disk + * cluster (to resolve races with read) */ + struct reiser4_crypto_info *crypt; + /* + * the following 2 fields are controlled by compression mode plugin + */ + int compress_toggle; /* Current status of compressibility */ + int lattice_factor; /* Factor of dynamic lattice. FIXME: Have + * a compression_toggle to keep the factor + */ +#if REISER4_DEBUG + atomic_t pgcount; /* number of grabbed pages */ +#endif +}; + +static inline void set_compression_toggle (struct cryptcompress_info * info, int val) +{ + info->compress_toggle = val; +} + +static inline int get_compression_toggle (struct cryptcompress_info * info) +{ + return info->compress_toggle; +} + +static inline int compression_is_on(struct cryptcompress_info * info) +{ + return get_compression_toggle(info) == 1; +} + +static inline void turn_on_compression(struct cryptcompress_info * info) +{ + set_compression_toggle(info, 1); +} + +static inline void turn_off_compression(struct cryptcompress_info * info) +{ + set_compression_toggle(info, 0); +} + +static inline void set_lattice_factor(struct cryptcompress_info * info, int val) +{ + info->lattice_factor = val; +} + +static inline int get_lattice_factor(struct cryptcompress_info * info) +{ + return info->lattice_factor; +} + +struct cryptcompress_info *cryptcompress_inode_data(const struct inode *); +int equal_to_rdk(znode *, const reiser4_key *); +int goto_right_neighbor(coord_t *, lock_handle *); +int cryptcompress_inode_ok(struct inode *inode); +int coord_is_unprepped_ctail(const coord_t * coord); +extern int do_readpage_ctail(struct inode *, struct cluster_handle *, + struct page * page, znode_lock_mode mode); +extern int ctail_insert_unprepped_cluster(struct cluster_handle * clust, + struct inode * inode); +extern int readpages_cryptcompress(struct file*, struct address_space*, + struct list_head*, unsigned); +int bind_cryptcompress(struct inode *child, struct inode *parent); +void destroy_inode_cryptcompress(struct inode * inode); +int grab_page_cluster(struct inode *inode, struct cluster_handle * clust, + rw_op rw); +int write_dispatch_hook(struct file *file, struct inode * inode, + loff_t pos, struct cluster_handle * clust, + struct dispatch_context * cont); +int setattr_dispatch_hook(struct inode * inode); +struct reiser4_crypto_info * inode_crypto_info(struct inode * inode); +void inherit_crypto_info_common(struct inode * parent, struct inode * object, + int (*can_inherit)(struct inode * child, + struct inode * parent)); +void reiser4_attach_crypto_info(struct inode * inode, + struct reiser4_crypto_info * info); +void change_crypto_info(struct inode * inode, struct reiser4_crypto_info * new); +struct reiser4_crypto_info * reiser4_alloc_crypto_info (struct inode * inode); + +static inline struct crypto_blkcipher * info_get_cipher(struct reiser4_crypto_info * info) +{ + return info->cipher; +} + +static inline void info_set_cipher(struct reiser4_crypto_info * info, + struct crypto_blkcipher * tfm) +{ + info->cipher = tfm; +} + +static inline struct crypto_hash * info_get_digest(struct reiser4_crypto_info * info) +{ + return info->digest; +} + +static inline void info_set_digest(struct reiser4_crypto_info * info, + struct crypto_hash * tfm) +{ + info->digest = tfm; +} + +static inline void put_cluster_page(struct page * page) +{ + put_page(page); +} + +#endif /* __FS_REISER4_CRYPTCOMPRESS_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/file.c b/fs/reiser4/plugin/file/file.c new file mode 100644 index 000000000000..9da0744cc750 --- /dev/null +++ b/fs/reiser4/plugin/file/file.c @@ -0,0 +1,2796 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * this file contains implementations of inode/file/address_space/file plugin + * operations specific for "unix file plugin" (plugin id is + * UNIX_FILE_PLUGIN_ID). "Unix file" is either built of tail items only + * (FORMATTING_ID) or of extent items only (EXTENT_POINTER_ID) or empty (have + * no items but stat data) + */ + +#include "../../inode.h" +#include "../../super.h" +#include "../../tree_walk.h" +#include "../../carry.h" +#include "../../page_cache.h" +#include "../../ioctl.h" +#include "../object.h" +#include "../cluster.h" +#include "../../safe_link.h" + +#include +#include +#include + + +static int unpack(struct file *file, struct inode *inode, int forever); +static void drop_access(struct unix_file_info *); +static int hint_validate(hint_t * hint, const reiser4_key * key, int check_key, + znode_lock_mode lock_mode); + +/* Get exclusive access and make sure that file is not partially + * converted (It may happen that another process is doing tail + * conversion. If so, wait until it completes) + */ +static inline void get_exclusive_access_careful(struct unix_file_info * uf_info, + struct inode *inode) +{ + do { + get_exclusive_access(uf_info); + if (!reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)) + break; + drop_exclusive_access(uf_info); + schedule(); + } while (1); +} + +/* get unix file plugin specific portion of inode */ +struct unix_file_info *unix_file_inode_data(const struct inode *inode) +{ + return &reiser4_inode_data(inode)->file_plugin_data.unix_file_info; +} + +/** + * equal_to_rdk - compare key and znode's right delimiting key + * @node: node whose right delimiting key to compare with @key + * @key: key to compare with @node's right delimiting key + * + * Returns true if @key is equal to right delimiting key of @node. + */ +int equal_to_rdk(znode *node, const reiser4_key *key) +{ + int result; + + read_lock_dk(znode_get_tree(node)); + result = keyeq(key, znode_get_rd_key(node)); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +#if REISER4_DEBUG + +/** + * equal_to_ldk - compare key and znode's left delimiting key + * @node: node whose left delimiting key to compare with @key + * @key: key to compare with @node's left delimiting key + * + * Returns true if @key is equal to left delimiting key of @node. + */ +int equal_to_ldk(znode *node, const reiser4_key *key) +{ + int result; + + read_lock_dk(znode_get_tree(node)); + result = keyeq(key, znode_get_ld_key(node)); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +/** + * check_coord - check whether coord corresponds to key + * @coord: coord to check + * @key: key @coord has to correspond to + * + * Returns true if @coord is set as if it was set as result of lookup with @key + * in coord->node. + */ +static int check_coord(const coord_t *coord, const reiser4_key *key) +{ + coord_t twin; + + node_plugin_by_node(coord->node)->lookup(coord->node, key, + FIND_MAX_NOT_MORE_THAN, &twin); + return coords_equal(coord, &twin); +} + +#endif /* REISER4_DEBUG */ + +/** + * init_uf_coord - initialize extended coord + * @uf_coord: + * @lh: + * + * + */ +void init_uf_coord(uf_coord_t *uf_coord, lock_handle *lh) +{ + coord_init_zero(&uf_coord->coord); + coord_clear_iplug(&uf_coord->coord); + uf_coord->lh = lh; + init_lh(lh); + memset(&uf_coord->extension, 0, sizeof(uf_coord->extension)); + uf_coord->valid = 0; +} + +static void validate_extended_coord(uf_coord_t *uf_coord, loff_t offset) +{ + assert("vs-1333", uf_coord->valid == 0); + + if (coord_is_between_items(&uf_coord->coord)) + return; + + assert("vs-1348", + item_plugin_by_coord(&uf_coord->coord)->s.file. + init_coord_extension); + + item_body_by_coord(&uf_coord->coord); + item_plugin_by_coord(&uf_coord->coord)->s.file. + init_coord_extension(uf_coord, offset); +} + +/** + * goto_right_neighbor - lock right neighbor, drop current node lock + * @coord: + * @lh: + * + * Obtain lock on right neighbor and drop lock on current node. + */ +int goto_right_neighbor(coord_t *coord, lock_handle *lh) +{ + int result; + lock_handle lh_right; + + assert("vs-1100", znode_is_locked(coord->node)); + + init_lh(&lh_right); + result = reiser4_get_right_neighbor(&lh_right, coord->node, + znode_is_wlocked(coord->node) ? + ZNODE_WRITE_LOCK : ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result) { + done_lh(&lh_right); + return result; + } + + /* + * we hold two longterm locks on neighboring nodes. Unlock left of + * them + */ + done_lh(lh); + + coord_init_first_unit_nocheck(coord, lh_right.node); + move_lh(lh, &lh_right); + + return 0; + +} + +/** + * set_file_state + * @uf_info: + * @cbk_result: + * @level: + * + * This is to be used by find_file_item and in find_file_state to + * determine real state of file + */ +static void set_file_state(struct unix_file_info *uf_info, int cbk_result, + tree_level level) +{ + if (cbk_errored(cbk_result)) + /* error happened in find_file_item */ + return; + + assert("vs-1164", level == LEAF_LEVEL || level == TWIG_LEVEL); + + if (uf_info->container == UF_CONTAINER_UNKNOWN) { + if (cbk_result == CBK_COORD_NOTFOUND) + uf_info->container = UF_CONTAINER_EMPTY; + else if (level == LEAF_LEVEL) + uf_info->container = UF_CONTAINER_TAILS; + else + uf_info->container = UF_CONTAINER_EXTENTS; + } else { + /* + * file state is known, check whether it is set correctly if + * file is not being tail converted + */ + if (!reiser4_inode_get_flag(unix_file_info_to_inode(uf_info), + REISER4_PART_IN_CONV)) { + assert("vs-1162", + ergo(level == LEAF_LEVEL && + cbk_result == CBK_COORD_FOUND, + uf_info->container == UF_CONTAINER_TAILS)); + assert("vs-1165", + ergo(level == TWIG_LEVEL && + cbk_result == CBK_COORD_FOUND, + uf_info->container == UF_CONTAINER_EXTENTS)); + } + } +} + +int find_file_item_nohint(coord_t *coord, lock_handle *lh, + const reiser4_key *key, znode_lock_mode lock_mode, + struct inode *inode) +{ + return reiser4_object_lookup(inode, key, coord, lh, lock_mode, + FIND_MAX_NOT_MORE_THAN, + TWIG_LEVEL, LEAF_LEVEL, + (lock_mode == ZNODE_READ_LOCK) ? CBK_UNIQUE : + (CBK_UNIQUE | CBK_FOR_INSERT), + NULL /* ra_info */ ); +} + +/** + * find_file_item - look for file item in the tree + * @hint: provides coordinate, lock handle, seal + * @key: key for search + * @mode: mode of lock to put on returned node + * @ra_info: + * @inode: + * + * This finds position in the tree corresponding to @key. It first tries to use + * @hint's seal if it is set. + */ +int find_file_item(hint_t *hint, const reiser4_key *key, + znode_lock_mode lock_mode, + struct inode *inode) +{ + int result; + coord_t *coord; + lock_handle *lh; + + assert("nikita-3030", reiser4_schedulable()); + assert("vs-1707", hint != NULL); + assert("vs-47", inode != NULL); + + coord = &hint->ext_coord.coord; + lh = hint->ext_coord.lh; + init_lh(lh); + + result = hint_validate(hint, key, 1 /* check key */, lock_mode); + if (!result) { + if (coord->between == AFTER_UNIT && + equal_to_rdk(coord->node, key)) { + result = goto_right_neighbor(coord, lh); + if (result == -E_NO_NEIGHBOR) + return RETERR(-EIO); + if (result) + return result; + assert("vs-1152", equal_to_ldk(coord->node, key)); + /* + * we moved to different node. Invalidate coord + * extension, zload is necessary to init it again + */ + hint->ext_coord.valid = 0; + } + + set_file_state(unix_file_inode_data(inode), CBK_COORD_FOUND, + znode_get_level(coord->node)); + + return CBK_COORD_FOUND; + } + + coord_init_zero(coord); + result = find_file_item_nohint(coord, lh, key, lock_mode, inode); + set_file_state(unix_file_inode_data(inode), result, + znode_get_level(coord->node)); + + /* FIXME: we might already have coord extension initialized */ + hint->ext_coord.valid = 0; + return result; +} + +void hint_init_zero(hint_t * hint) +{ + memset(hint, 0, sizeof(*hint)); + init_lh(&hint->lh); + hint->ext_coord.lh = &hint->lh; +} + +static int find_file_state(struct inode *inode, struct unix_file_info *uf_info) +{ + int result; + reiser4_key key; + coord_t coord; + lock_handle lh; + + assert("vs-1628", ea_obtained(uf_info)); + + if (uf_info->container == UF_CONTAINER_UNKNOWN) { + key_by_inode_and_offset_common(inode, 0, &key); + init_lh(&lh); + result = find_file_item_nohint(&coord, &lh, &key, + ZNODE_READ_LOCK, inode); + set_file_state(uf_info, result, znode_get_level(coord.node)); + done_lh(&lh); + if (!cbk_errored(result)) + result = 0; + } else + result = 0; + assert("vs-1074", + ergo(result == 0, uf_info->container != UF_CONTAINER_UNKNOWN)); + reiser4_txn_restart_current(); + return result; +} + +/** + * Estimate and reserve space needed to truncate page + * which gets partially truncated: one block for page + * itself, stat-data update (estimate_one_insert_into_item) + * and one item insertion (estimate_one_insert_into_item) + * which may happen if page corresponds to hole extent and + * unallocated one will have to be created + */ +static int reserve_partial_page(reiser4_tree * tree) +{ + grab_space_enable(); + return reiser4_grab_reserved(reiser4_get_current_sb(), + 1 + + 2 * estimate_one_insert_into_item(tree), + BA_CAN_COMMIT); +} + +/* estimate and reserve space needed to cut one item and update one stat data */ +static int reserve_cut_iteration(reiser4_tree * tree) +{ + __u64 estimate = estimate_one_item_removal(tree) + + estimate_one_insert_into_item(tree); + + assert("nikita-3172", lock_stack_isclean(get_current_lock_stack())); + + grab_space_enable(); + /* We need to double our estimate now that we can delete more than one + node. */ + return reiser4_grab_reserved(reiser4_get_current_sb(), estimate * 2, + BA_CAN_COMMIT); +} + +int reiser4_update_file_size(struct inode *inode, loff_t new_size, + int update_sd) +{ + int result = 0; + + INODE_SET_SIZE(inode, new_size); + if (update_sd) { + inode->i_ctime = inode->i_mtime = current_time(inode); + result = reiser4_update_sd(inode); + } + return result; +} + +/** + * Cut file items one by one starting from the last one until + * new file size (inode->i_size) is reached. Reserve space + * and update file stat data on every single cut from the tree + */ +int cut_file_items(struct inode *inode, loff_t new_size, + int update_sd, loff_t cur_size, + int (*update_actor) (struct inode *, loff_t, int)) +{ + reiser4_key from_key, to_key; + reiser4_key smallest_removed; + file_plugin *fplug = inode_file_plugin(inode); + int result; + int progress = 0; + + assert("vs-1248", + fplug == file_plugin_by_id(UNIX_FILE_PLUGIN_ID) || + fplug == file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + + fplug->key_by_inode(inode, new_size, &from_key); + to_key = from_key; + set_key_offset(&to_key, cur_size - 1 /*get_key_offset(reiser4_max_key()) */ ); + /* this loop normally runs just once */ + while (1) { + result = reserve_cut_iteration(reiser4_tree_by_inode(inode)); + if (result) + break; + + result = reiser4_cut_tree_object(current_tree, &from_key, &to_key, + &smallest_removed, inode, 1, + &progress); + if (result == -E_REPEAT) { + /** + * -E_REPEAT is a signal to interrupt a long + * file truncation process + */ + if (progress) { + result = update_actor(inode, + get_key_offset(&smallest_removed), + update_sd); + if (result) + break; + } + /* the below does up(sbinfo->delete_mutex). + * Do not get folled */ + reiser4_release_reserved(inode->i_sb); + /** + * reiser4_cut_tree_object() was interrupted probably + * because current atom requires commit, we have to + * release transaction handle to allow atom commit. + */ + reiser4_txn_restart_current(); + continue; + } + if (result + && !(result == CBK_COORD_NOTFOUND && new_size == 0 + && inode->i_size == 0)) + break; + + set_key_offset(&smallest_removed, new_size); + /* Final sd update after the file gets its correct size */ + result = update_actor(inode, get_key_offset(&smallest_removed), + update_sd); + break; + } + + /* the below does up(sbinfo->delete_mutex). Do not get folled */ + reiser4_release_reserved(inode->i_sb); + + return result; +} + +int find_or_create_extent(struct page *page); + +/* part of truncate_file_body: it is called when truncate is used to make file + shorter */ +static int shorten_file(struct inode *inode, loff_t new_size) +{ + int result; + struct page *page; + int padd_from; + unsigned long index; + struct unix_file_info *uf_info; + + /* + * all items of ordinary reiser4 file are grouped together. That is why + * we can use reiser4_cut_tree. Plan B files (for instance) can not be + * truncated that simply + */ + result = cut_file_items(inode, new_size, 1 /*update_sd */ , + get_key_offset(reiser4_max_key()), + reiser4_update_file_size); + if (result) + return result; + + uf_info = unix_file_inode_data(inode); + assert("vs-1105", new_size == inode->i_size); + if (new_size == 0) { + uf_info->container = UF_CONTAINER_EMPTY; + return 0; + } + + result = find_file_state(inode, uf_info); + if (result) + return result; + if (uf_info->container == UF_CONTAINER_TAILS) + /* + * No need to worry about zeroing last page after new file + * end + */ + return 0; + + padd_from = inode->i_size & (PAGE_SIZE - 1); + if (!padd_from) + /* file is truncated to page boundary */ + return 0; + + result = reserve_partial_page(reiser4_tree_by_inode(inode)); + if (result) { + reiser4_release_reserved(inode->i_sb); + return result; + } + + /* last page is partially truncated - zero its content */ + index = (inode->i_size >> PAGE_SHIFT); + page = read_mapping_page(inode->i_mapping, index, NULL); + if (IS_ERR(page)) { + /* + * the below does up(sbinfo->delete_mutex). Do not get + * confused + */ + reiser4_release_reserved(inode->i_sb); + if (likely(PTR_ERR(page) == -EINVAL)) { + /* looks like file is built of tail items */ + return 0; + } + return PTR_ERR(page); + } + wait_on_page_locked(page); + if (!PageUptodate(page)) { + put_page(page); + /* + * the below does up(sbinfo->delete_mutex). Do not get + * confused + */ + reiser4_release_reserved(inode->i_sb); + return RETERR(-EIO); + } + + /* + * if page correspons to hole extent unit - unallocated one will be + * created here. This is not necessary + */ + result = find_or_create_extent(page); + + /* + * FIXME: cut_file_items has already updated inode. Probably it would + * be better to update it here when file is really truncated + */ + if (result) { + put_page(page); + /* + * the below does up(sbinfo->delete_mutex). Do not get + * confused + */ + reiser4_release_reserved(inode->i_sb); + return result; + } + + lock_page(page); + assert("vs-1066", PageLocked(page)); + zero_user_segment(page, padd_from, PAGE_SIZE); + unlock_page(page); + put_page(page); + /* the below does up(sbinfo->delete_mutex). Do not get confused */ + reiser4_release_reserved(inode->i_sb); + return 0; +} + +/** + * should_have_notail + * @uf_info: + * @new_size: + * + * Calls formatting plugin to see whether file of size @new_size has to be + * stored in unformatted nodes or in tail items. 0 is returned for later case. + */ +static int should_have_notail(const struct unix_file_info *uf_info, loff_t new_size) +{ + if (!uf_info->tplug) + return 1; + return !uf_info->tplug->have_tail(unix_file_info_to_inode(uf_info), + new_size); + +} + +/** + * truncate_file_body - change length of file + * @inode: inode of file + * @new_size: new file length + * + * Adjusts items file @inode is built of to match @new_size. It may either cut + * items or add them to represent a hole at the end of file. The caller has to + * obtain exclusive access to the file. + */ +static int truncate_file_body(struct inode *inode, struct iattr *attr) +{ + int result; + loff_t new_size = attr->ia_size; + + if (inode->i_size < new_size) { + /* expanding truncate */ + struct unix_file_info *uf_info = unix_file_inode_data(inode); + + result = find_file_state(inode, uf_info); + if (result) + return result; + + if (should_have_notail(uf_info, new_size)) { + /* + * file of size @new_size has to be built of + * extents. If it is built of tails - convert to + * extents + */ + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * if file is being convered by another process + * - wait until it completes + */ + while (1) { + if (reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)) { + drop_exclusive_access(uf_info); + schedule(); + get_exclusive_access(uf_info); + continue; + } + break; + } + + if (uf_info->container == UF_CONTAINER_TAILS) { + result = tail2extent(uf_info); + if (result) + return result; + } + } + result = reiser4_write_extent(NULL, inode, NULL, + 0, &new_size); + if (result) + return result; + uf_info->container = UF_CONTAINER_EXTENTS; + } else { + if (uf_info->container == UF_CONTAINER_EXTENTS) { + result = reiser4_write_extent(NULL, inode, NULL, + 0, &new_size); + if (result) + return result; + } else { + result = reiser4_write_tail(NULL, inode, NULL, + 0, &new_size); + if (result) + return result; + uf_info->container = UF_CONTAINER_TAILS; + } + } + BUG_ON(result > 0); + result = reiser4_update_file_size(inode, new_size, 1); + BUG_ON(result != 0); + } else + result = shorten_file(inode, new_size); + return result; +} + +/** + * load_file_hint - copy hint from struct file to local variable + * @file: file to get hint from + * @hint: structure to fill + * + * Reiser4 specific portion of struct file may contain information (hint) + * stored on exiting from previous read or write. That information includes + * seal of znode and coord within that znode where previous read or write + * stopped. This function copies that information to @hint if it was stored or + * initializes @hint by 0s otherwise. + */ +int load_file_hint(struct file *file, hint_t *hint) +{ + reiser4_file_fsdata *fsdata; + + if (file) { + fsdata = reiser4_get_file_fsdata(file); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + + spin_lock_inode(file_inode(file)); + if (reiser4_seal_is_set(&fsdata->reg.hint.seal)) { + memcpy(hint, &fsdata->reg.hint, sizeof(*hint)); + init_lh(&hint->lh); + hint->ext_coord.lh = &hint->lh; + spin_unlock_inode(file_inode(file)); + /* + * force re-validation of the coord on the first + * iteration of the read/write loop. + */ + hint->ext_coord.valid = 0; + assert("nikita-19892", + coords_equal(&hint->seal.coord1, + &hint->ext_coord.coord)); + return 0; + } + memset(&fsdata->reg.hint, 0, sizeof(hint_t)); + spin_unlock_inode(file_inode(file)); + } + hint_init_zero(hint); + return 0; +} + +/** + * save_file_hint - copy hint to reiser4 private struct file's part + * @file: file to save hint in + * @hint: hint to save + * + * This copies @hint to reiser4 private part of struct file. It can help + * speedup future accesses to the file. + */ +void save_file_hint(struct file *file, const hint_t *hint) +{ + reiser4_file_fsdata *fsdata; + + assert("edward-1337", hint != NULL); + + if (!file || !reiser4_seal_is_set(&hint->seal)) + return; + fsdata = reiser4_get_file_fsdata(file); + assert("vs-965", !IS_ERR(fsdata)); + assert("nikita-19891", + coords_equal(&hint->seal.coord1, &hint->ext_coord.coord)); + assert("vs-30", hint->lh.owner == NULL); + spin_lock_inode(file_inode(file)); + fsdata->reg.hint = *hint; + spin_unlock_inode(file_inode(file)); + return; +} + +void reiser4_unset_hint(hint_t * hint) +{ + assert("vs-1315", hint); + hint->ext_coord.valid = 0; + reiser4_seal_done(&hint->seal); + done_lh(&hint->lh); +} + +/* coord must be set properly. So, that reiser4_set_hint + has nothing to do */ +void reiser4_set_hint(hint_t * hint, const reiser4_key * key, + znode_lock_mode mode) +{ + ON_DEBUG(coord_t * coord = &hint->ext_coord.coord); + assert("vs-1207", WITH_DATA(coord->node, check_coord(coord, key))); + + reiser4_seal_init(&hint->seal, &hint->ext_coord.coord, key); + hint->offset = get_key_offset(key); + hint->mode = mode; + done_lh(&hint->lh); +} + +int hint_is_set(const hint_t * hint) +{ + return reiser4_seal_is_set(&hint->seal); +} + +#if REISER4_DEBUG +static int all_but_offset_key_eq(const reiser4_key * k1, const reiser4_key * k2) +{ + return (get_key_locality(k1) == get_key_locality(k2) && + get_key_type(k1) == get_key_type(k2) && + get_key_band(k1) == get_key_band(k2) && + get_key_ordering(k1) == get_key_ordering(k2) && + get_key_objectid(k1) == get_key_objectid(k2)); +} +#endif + +static int +hint_validate(hint_t * hint, const reiser4_key * key, int check_key, + znode_lock_mode lock_mode) +{ + if (!hint || !hint_is_set(hint) || hint->mode != lock_mode) + /* hint either not set or set by different operation */ + return RETERR(-E_REPEAT); + + assert("vs-1277", all_but_offset_key_eq(key, &hint->seal.key)); + + if (check_key && get_key_offset(key) != hint->offset) + /* hint is set for different key */ + return RETERR(-E_REPEAT); + + assert("vs-31", hint->ext_coord.lh == &hint->lh); + return reiser4_seal_validate(&hint->seal, &hint->ext_coord.coord, key, + hint->ext_coord.lh, lock_mode, + ZNODE_LOCK_LOPRI); +} + +/** + * Look for place at twig level for extent corresponding to page, + * call extent's writepage method to create unallocated extent if + * it does not exist yet, initialize jnode, capture page + */ +int find_or_create_extent(struct page *page) +{ + int result; + struct inode *inode; + int plugged_hole; + + jnode *node; + + assert("vs-1065", page->mapping && page->mapping->host); + inode = page->mapping->host; + + lock_page(page); + node = jnode_of_page(page); + if (IS_ERR(node)) { + unlock_page(page); + return PTR_ERR(node); + } + JF_SET(node, JNODE_WRITE_PREPARED); + unlock_page(page); + + if (node->blocknr == 0) { + plugged_hole = 0; + result = reiser4_update_extent(inode, node, page_offset(page), + &plugged_hole); + if (result) { + JF_CLR(node, JNODE_WRITE_PREPARED); + jput(node); + warning("edward-1549", + "reiser4_update_extent failed: %d", result); + return result; + } + if (plugged_hole) + reiser4_update_sd(inode); + } else { + spin_lock_jnode(node); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + } + + BUG_ON(node->atom == NULL); + JF_CLR(node, JNODE_WRITE_PREPARED); + + if (get_current_context()->entd) { + entd_context *ent = get_entd_context(node->tree->super); + + if (ent->cur_request->page == page) + /* the following reference will be + dropped in reiser4_writeout */ + ent->cur_request->node = jref(node); + } + jput(node); + return 0; +} + +/** + * has_anonymous_pages - check whether inode has pages dirtied via mmap + * @inode: inode to check + * + * Returns true if inode's mapping has dirty pages which do not belong to any + * atom. Those are either tagged PAGECACHE_TAG_REISER4_MOVED in mapping's page + * tree or were eflushed and can be found via jnodes tagged + * EFLUSH_TAG_ANONYMOUS in radix tree of jnodes. + */ +static int has_anonymous_pages(struct inode *inode) +{ + int result; + + spin_lock_irq(&inode->i_mapping->tree_lock); + result = radix_tree_tagged(&inode->i_mapping->page_tree, PAGECACHE_TAG_REISER4_MOVED); + spin_unlock_irq(&inode->i_mapping->tree_lock); + return result; +} + +/** + * capture_page_and_create_extent - + * @page: page to be captured + * + * Grabs space for extent creation and stat data update and calls function to + * do actual work. + * Exclusive, or non-exclusive lock must be held. + */ +static int capture_page_and_create_extent(struct page *page) +{ + int result; + struct inode *inode; + + assert("vs-1084", page->mapping && page->mapping->host); + inode = page->mapping->host; + assert("vs-1139", + unix_file_inode_data(inode)->container == UF_CONTAINER_EXTENTS); + /* page belongs to file */ + assert("vs-1393", + inode->i_size > page_offset(page)); + + /* page capture may require extent creation (if it does not exist yet) + and stat data's update (number of blocks changes on extent + creation) */ + grab_space_enable(); + result = reiser4_grab_space(2 * estimate_one_insert_into_item + (reiser4_tree_by_inode(inode)), + BA_CAN_COMMIT); + if (likely(!result)) + result = find_or_create_extent(page); + + if (result != 0) + SetPageError(page); + return result; +} + +/* + * Support for "anonymous" pages and jnodes. + * + * When file is write-accessed through mmap pages can be dirtied from the user + * level. In this case kernel is not notified until one of following happens: + * + * (1) msync() + * + * (2) truncate() (either explicit or through unlink) + * + * (3) VM scanner starts reclaiming mapped pages, dirtying them before + * starting write-back. + * + * As a result of (3) ->writepage may be called on a dirty page without + * jnode. Such page is called "anonymous" in reiser4. Certain work-loads + * (iozone) generate huge number of anonymous pages. + * + * reiser4_sync_sb() method tries to insert anonymous pages into + * tree. This is done by capture_anonymous_*() functions below. + */ + +/** + * capture_anonymous_page - involve page into transaction + * @pg: page to deal with + * + * Takes care that @page has corresponding metadata in the tree, creates jnode + * for @page and captures it. On success 1 is returned. + */ +static int capture_anonymous_page(struct page *page) +{ + int result; + + if (PageWriteback(page)) + /* FIXME: do nothing? */ + return 0; + + result = capture_page_and_create_extent(page); + if (result == 0) { + result = 1; + } else + warning("nikita-3329", + "Cannot capture anon page: %i", result); + + return result; +} + +/** + * capture_anonymous_pages - find and capture pages dirtied via mmap + * @mapping: address space where to look for pages + * @index: start index + * @to_capture: maximum number of pages to capture + * + * Looks for pages tagged REISER4_MOVED starting from the *@index-th page, + * captures (involves into atom) them, returns number of captured pages, + * updates @index to next page after the last captured one. + */ +static int +capture_anonymous_pages(struct address_space *mapping, pgoff_t *index, + unsigned int to_capture) +{ + int result; + struct pagevec pvec; + unsigned int i, count; + int nr; + + pagevec_init(&pvec, 0); + count = min(pagevec_space(&pvec), to_capture); + nr = 0; + + /* find pages tagged MOVED */ + spin_lock_irq(&mapping->tree_lock); + pvec.nr = radix_tree_gang_lookup_tag(&mapping->page_tree, + (void **)pvec.pages, *index, count, + PAGECACHE_TAG_REISER4_MOVED); + if (pagevec_count(&pvec) == 0) { + /* + * there are no pages tagged MOVED in mapping->page_tree + * starting from *index + */ + spin_unlock_irq(&mapping->tree_lock); + *index = (pgoff_t)-1; + return 0; + } + + /* clear MOVED tag for all found pages */ + for (i = 0; i < pagevec_count(&pvec); i++) { + get_page(pvec.pages[i]); + radix_tree_tag_clear(&mapping->page_tree, pvec.pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + } + spin_unlock_irq(&mapping->tree_lock); + + + *index = pvec.pages[i - 1]->index + 1; + + for (i = 0; i < pagevec_count(&pvec); i++) { + result = capture_anonymous_page(pvec.pages[i]); + if (result == 1) + nr++; + else { + if (result < 0) { + warning("vs-1454", + "failed to capture page: " + "result=%d, captured=%d)\n", + result, i); + + /* + * set MOVED tag to all pages which left not + * captured + */ + spin_lock_irq(&mapping->tree_lock); + for (; i < pagevec_count(&pvec); i ++) { + radix_tree_tag_set(&mapping->page_tree, + pvec.pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + } + spin_unlock_irq(&mapping->tree_lock); + + pagevec_release(&pvec); + return result; + } else { + /* + * result == 0. capture_anonymous_page returns + * 0 for Writeback-ed page. Set MOVED tag on + * that page + */ + spin_lock_irq(&mapping->tree_lock); + radix_tree_tag_set(&mapping->page_tree, + pvec.pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + spin_unlock_irq(&mapping->tree_lock); + if (i == 0) + *index = pvec.pages[0]->index; + else + *index = pvec.pages[i - 1]->index + 1; + } + } + } + pagevec_release(&pvec); + return nr; +} + +/** + * capture_anonymous_jnodes - find and capture anonymous jnodes + * @mapping: address space where to look for jnodes + * @from: start index + * @to: end index + * @to_capture: maximum number of jnodes to capture + * + * Looks for jnodes tagged EFLUSH_TAG_ANONYMOUS in inode's tree of jnodes in + * the range of indexes @from-@to and captures them, returns number of captured + * jnodes, updates @from to next jnode after the last captured one. + */ +static int +capture_anonymous_jnodes(struct address_space *mapping, + pgoff_t *from, pgoff_t to, int to_capture) +{ + *from = to; + return 0; +} + +/* + * Commit atom of the jnode of a page. + */ +static int sync_page(struct page *page) +{ + int result; + do { + jnode *node; + txn_atom *atom; + + lock_page(page); + node = jprivate(page); + if (node != NULL) { + spin_lock_jnode(node); + atom = jnode_get_atom(node); + spin_unlock_jnode(node); + } else + atom = NULL; + unlock_page(page); + result = reiser4_sync_atom(atom); + } while (result == -E_REPEAT); + /* + * ZAM-FIXME-HANS: document the logic of this loop, is it just to + * handle the case where more pages get added to the atom while we are + * syncing it? + */ + assert("nikita-3485", ergo(result == 0, + get_current_context()->trans->atom == NULL)); + return result; +} + +/* + * Commit atoms of pages on @pages list. + * call sync_page for each page from mapping's page tree + */ +static int sync_page_list(struct inode *inode) +{ + int result; + struct address_space *mapping; + unsigned long from; /* start index for radix_tree_gang_lookup */ + unsigned int found; /* return value for radix_tree_gang_lookup */ + + mapping = inode->i_mapping; + from = 0; + result = 0; + spin_lock_irq(&mapping->tree_lock); + while (result == 0) { + struct page *page; + + found = + radix_tree_gang_lookup(&mapping->page_tree, (void **)&page, + from, 1); + assert("edward-1550", found < 2); + if (found == 0) + break; + /** + * page may not leave radix tree because it is protected from + * truncating by inode->i_mutex locked by sys_fsync + */ + get_page(page); + spin_unlock_irq(&mapping->tree_lock); + + from = page->index + 1; + + result = sync_page(page); + + put_page(page); + spin_lock_irq(&mapping->tree_lock); + } + + spin_unlock_irq(&mapping->tree_lock); + return result; +} + +static int commit_file_atoms(struct inode *inode) +{ + int result; + struct unix_file_info *uf_info; + + uf_info = unix_file_inode_data(inode); + + get_exclusive_access(uf_info); + /* + * find what items file is made from + */ + result = find_file_state(inode, uf_info); + drop_exclusive_access(uf_info); + if (result != 0) + return result; + + /* + * file state cannot change because we are under ->i_mutex + */ + switch (uf_info->container) { + case UF_CONTAINER_EXTENTS: + /* find_file_state might open join an atom */ + reiser4_txn_restart_current(); + result = + /* + * when we are called by + * filemap_fdatawrite-> + * do_writepages()-> + * reiser4_writepages_dispatch() + * + * inode->i_mapping->dirty_pages are spices into + * ->io_pages, leaving ->dirty_pages dirty. + * + * When we are called from + * reiser4_fsync()->sync_unix_file(), we have to + * commit atoms of all pages on the ->dirty_list. + * + * So for simplicity we just commit ->io_pages and + * ->dirty_pages. + */ + sync_page_list(inode); + break; + case UF_CONTAINER_TAILS: + /* + * NOTE-NIKITA probably we can be smarter for tails. For now + * just commit all existing atoms. + */ + result = txnmgr_force_commit_all(inode->i_sb, 0); + break; + case UF_CONTAINER_EMPTY: + result = 0; + break; + case UF_CONTAINER_UNKNOWN: + default: + result = -EIO; + break; + } + + /* + * commit current transaction: there can be captured nodes from + * find_file_state() and finish_conversion(). + */ + reiser4_txn_restart_current(); + return result; +} + +/** + * writepages_unix_file - writepages of struct address_space_operations + * @mapping: + * @wbc: + * + * This captures anonymous pages and anonymous jnodes. Anonymous pages are + * pages which are dirtied via mmapping. Anonymous jnodes are ones which were + * created by reiser4_writepage. + */ +int writepages_unix_file(struct address_space *mapping, + struct writeback_control *wbc) +{ + int result; + struct unix_file_info *uf_info; + pgoff_t pindex, jindex, nr_pages; + long to_capture; + struct inode *inode; + + inode = mapping->host; + if (!has_anonymous_pages(inode)) { + result = 0; + goto end; + } + jindex = pindex = wbc->range_start >> PAGE_SHIFT; + result = 0; + nr_pages = size_in_pages(i_size_read(inode)); + + uf_info = unix_file_inode_data(inode); + + do { + reiser4_context *ctx; + + if (wbc->sync_mode != WB_SYNC_ALL) + to_capture = min(wbc->nr_to_write, CAPTURE_APAGE_BURST); + else + to_capture = CAPTURE_APAGE_BURST; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + result = PTR_ERR(ctx); + break; + } + /* avoid recursive calls to ->sync_inodes */ + ctx->nobalance = 1; + assert("zam-760", lock_stack_isclean(get_current_lock_stack())); + assert("edward-1551", LOCK_CNT_NIL(inode_sem_w)); + assert("edward-1552", LOCK_CNT_NIL(inode_sem_r)); + + reiser4_txn_restart_current(); + + /* we have to get nonexclusive access to the file */ + if (get_current_context()->entd) { + /* + * use nonblocking version of nonexclusive_access to + * avoid deadlock which might look like the following: + * process P1 holds NEA on file F1 and called entd to + * reclaim some memory. Entd works for P1 and is going + * to capture pages of file F2. To do that entd has to + * get NEA to F2. F2 is held by process P2 which also + * called entd. But entd is serving P1 at the moment + * and P2 has to wait. Process P3 trying to get EA to + * file F2. Existence of pending EA request to file F2 + * makes impossible for entd to get NEA to file + * F2. Neither of these process can continue. Using + * nonblocking version of gettign NEA is supposed to + * avoid this deadlock. + */ + if (try_to_get_nonexclusive_access(uf_info) == 0) { + result = RETERR(-EBUSY); + reiser4_exit_context(ctx); + break; + } + } else + get_nonexclusive_access(uf_info); + + while (to_capture > 0) { + pgoff_t start; + + assert("vs-1727", jindex <= pindex); + if (pindex == jindex) { + start = pindex; + result = + capture_anonymous_pages(inode->i_mapping, + &pindex, + to_capture); + if (result <= 0) + break; + to_capture -= result; + wbc->nr_to_write -= result; + if (start + result == pindex) { + jindex = pindex; + continue; + } + if (to_capture <= 0) + break; + } + /* deal with anonymous jnodes between jindex and pindex */ + result = + capture_anonymous_jnodes(inode->i_mapping, &jindex, + pindex, to_capture); + if (result < 0) + break; + to_capture -= result; + get_current_context()->nr_captured += result; + + if (jindex == (pgoff_t) - 1) { + assert("vs-1728", pindex == (pgoff_t) - 1); + break; + } + } + if (to_capture <= 0) + /* there may be left more pages */ + __mark_inode_dirty(inode, I_DIRTY_PAGES); + + drop_nonexclusive_access(uf_info); + if (result < 0) { + /* error happened */ + reiser4_exit_context(ctx); + return result; + } + if (wbc->sync_mode != WB_SYNC_ALL) { + reiser4_exit_context(ctx); + return 0; + } + result = commit_file_atoms(inode); + reiser4_exit_context(ctx); + if (pindex >= nr_pages && jindex == pindex) + break; + } while (1); + + end: + if (is_in_reiser4_context()) { + if (get_current_context()->nr_captured >= CAPTURE_APAGE_BURST) { + /* + * there are already pages to flush, flush them out, do + * not delay until end of reiser4_sync_inodes + */ + reiser4_writeout(inode->i_sb, wbc); + get_current_context()->nr_captured = 0; + } + } + return result; +} + +/** + * readpage_unix_file_nolock - readpage of struct address_space_operations + * @file: + * @page: + * + * Compose a key and search for item containing information about @page + * data. If item is found - its readpage method is called. + */ +int readpage_unix_file(struct file *file, struct page *page) +{ + reiser4_context *ctx; + int result; + struct inode *inode; + reiser4_key key; + item_plugin *iplug; + hint_t *hint; + lock_handle *lh; + coord_t *coord; + + assert("vs-1062", PageLocked(page)); + assert("vs-976", !PageUptodate(page)); + assert("vs-1061", page->mapping && page->mapping->host); + + if (page->mapping->host->i_size <= page_offset(page)) { + /* page is out of file */ + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + + inode = page->mapping->host; + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + unlock_page(page); + return PTR_ERR(ctx); + } + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) { + unlock_page(page); + reiser4_exit_context(ctx); + return RETERR(-ENOMEM); + } + + result = load_file_hint(file, hint); + if (result) { + kfree(hint); + unlock_page(page); + reiser4_exit_context(ctx); + return result; + } + lh = &hint->lh; + + /* get key of first byte of the page */ + key_by_inode_and_offset_common(inode, page_offset(page), &key); + + /* look for file metadata corresponding to first byte of page */ + get_page(page); + unlock_page(page); + result = find_file_item(hint, &key, ZNODE_READ_LOCK, inode); + lock_page(page); + put_page(page); + + if (page->mapping == NULL) { + /* + * readpage allows truncate to run concurrently. Page was + * truncated while it was not locked + */ + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return -EINVAL; + } + + if (result != CBK_COORD_FOUND || hint->ext_coord.coord.between != AT_UNIT) { + if (result == CBK_COORD_FOUND && + hint->ext_coord.coord.between != AT_UNIT) + /* file is truncated */ + result = -EINVAL; + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; + } + + /* + * item corresponding to page is found. It can not be removed because + * znode lock is held + */ + if (PageUptodate(page)) { + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return 0; + } + + coord = &hint->ext_coord.coord; + result = zload(coord->node); + if (result) { + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; + } + + validate_extended_coord(&hint->ext_coord, page_offset(page)); + + if (!coord_is_existing_unit(coord)) { + /* this indicates corruption */ + warning("vs-280", + "Looking for page %lu of file %llu (size %lli). " + "No file items found (%d). File is corrupted?\n", + page->index, (unsigned long long)get_inode_oid(inode), + inode->i_size, result); + zrelse(coord->node); + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return RETERR(-EIO); + } + + /* + * get plugin of found item or use plugin if extent if there are no + * one + */ + iplug = item_plugin_by_coord(coord); + if (iplug->s.file.readpage) + result = iplug->s.file.readpage(coord, page); + else + result = RETERR(-EINVAL); + + if (!result) { + set_key_offset(&key, + (loff_t) (page->index + 1) << PAGE_SHIFT); + /* FIXME should call reiser4_set_hint() */ + reiser4_unset_hint(hint); + } else { + unlock_page(page); + reiser4_unset_hint(hint); + } + assert("vs-979", + ergo(result == 0, (PageLocked(page) || PageUptodate(page)))); + assert("vs-9791", ergo(result != 0, !PageLocked(page))); + + zrelse(coord->node); + done_lh(lh); + + save_file_hint(file, hint); + kfree(hint); + + /* + * FIXME: explain why it is needed. HINT: page allocation in write can + * not be done when atom is not NULL because reiser4_writepage can not + * kick entd and have to eflush + */ + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; +} + +struct uf_readpages_context { + lock_handle lh; + coord_t coord; +}; + +/* + * A callback function for readpages_unix_file/read_cache_pages. + * We don't take non-exclusive access. If an item different from + * extent pointer is found in some iteration, then return error + * (-EINVAL). + * + * @data -- a pointer to reiser4_readpages_context object, + * to save the twig lock and the coord between + * read_cache_page iterations. + * @page -- page to start read. + */ +static int readpages_filler(void * data, struct page * page) +{ + struct uf_readpages_context *rc = data; + jnode * node; + int ret = 0; + reiser4_extent *ext; + __u64 ext_index; + int cbk_done = 0; + struct address_space *mapping = page->mapping; + + if (PageUptodate(page)) { + unlock_page(page); + return 0; + } + get_page(page); + + if (rc->lh.node == 0) { + /* no twig lock - have to do tree search. */ + reiser4_key key; + repeat: + unlock_page(page); + key_by_inode_and_offset_common( + mapping->host, page_offset(page), &key); + ret = coord_by_key( + &get_super_private(mapping->host->i_sb)->tree, + &key, &rc->coord, &rc->lh, + ZNODE_READ_LOCK, FIND_EXACT, + TWIG_LEVEL, TWIG_LEVEL, CBK_UNIQUE, NULL); + if (unlikely(ret)) + goto exit; + lock_page(page); + if (PageUptodate(page)) + goto unlock; + cbk_done = 1; + } + ret = zload(rc->coord.node); + if (unlikely(ret)) + goto unlock; + if (!coord_is_existing_item(&rc->coord)) { + zrelse(rc->coord.node); + ret = RETERR(-ENOENT); + goto unlock; + } + if (!item_is_extent(&rc->coord)) { + /* + * ->readpages() is not + * defined for tail items + */ + zrelse(rc->coord.node); + ret = RETERR(-EINVAL); + goto unlock; + } + ext = extent_by_coord(&rc->coord); + ext_index = extent_unit_index(&rc->coord); + if (page->index < ext_index || + page->index >= ext_index + extent_get_width(ext)) { + /* the page index doesn't belong to the extent unit + which the coord points to - release the lock and + repeat with tree search. */ + zrelse(rc->coord.node); + done_lh(&rc->lh); + /* we can be here after a CBK call only in case of + corruption of the tree or the tree lookup algorithm bug. */ + if (unlikely(cbk_done)) { + ret = RETERR(-EIO); + goto unlock; + } + goto repeat; + } + node = jnode_of_page(page); + if (unlikely(IS_ERR(node))) { + zrelse(rc->coord.node); + ret = PTR_ERR(node); + goto unlock; + } + ret = reiser4_do_readpage_extent(ext, page->index - ext_index, page); + jput(node); + zrelse(rc->coord.node); + if (likely(!ret)) + goto exit; + unlock: + unlock_page(page); + exit: + put_page(page); + return ret; +} + +/** + * readpages_unix_file - called by the readahead code, starts reading for each + * page of given list of pages + */ +int readpages_unix_file(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + reiser4_context *ctx; + struct uf_readpages_context rc; + int ret; + + ctx = reiser4_init_context(mapping->host->i_sb); + if (IS_ERR(ctx)) { + put_pages_list(pages); + return PTR_ERR(ctx); + } + init_lh(&rc.lh); + ret = read_cache_pages(mapping, pages, readpages_filler, &rc); + done_lh(&rc.lh); + + context_set_commit_async(ctx); + /* close the transaction to protect further page allocation from deadlocks */ + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return ret; +} + +static reiser4_block_nr unix_file_estimate_read(struct inode *inode, + loff_t count UNUSED_ARG) +{ + /* We should reserve one block, because of updating of the stat data + item */ + assert("vs-1249", + inode_file_plugin(inode)->estimate.update == + estimate_update_common); + return estimate_update_common(inode); +} + +/* this is called with nonexclusive access obtained, + file's container can not change */ +static ssize_t do_read_compound_file(hint_t *hint, struct file *file, + char __user *buf, size_t count, + loff_t *off) +{ + int result; + struct inode *inode; + flow_t flow; + coord_t *coord; + znode *loaded; + + inode = file_inode(file); + + /* build flow */ + assert("vs-1250", + inode_file_plugin(inode)->flow_by_inode == + flow_by_inode_unix_file); + result = flow_by_inode_unix_file(inode, buf, 1 /* user space */, + count, *off, READ_OP, &flow); + if (unlikely(result)) + return result; + + /* get seal and coord sealed with it from reiser4 private data + of struct file. The coord will tell us where our last read + of this file finished, and the seal will help to determine + if that location is still valid. + */ + coord = &hint->ext_coord.coord; + while (flow.length && result == 0) { + result = find_file_item(hint, &flow.key, + ZNODE_READ_LOCK, inode); + if (cbk_errored(result)) + /* error happened */ + break; + + if (coord->between != AT_UNIT) { + /* there were no items corresponding to given offset */ + done_lh(hint->ext_coord.lh); + break; + } + + loaded = coord->node; + result = zload(loaded); + if (unlikely(result)) { + done_lh(hint->ext_coord.lh); + break; + } + + if (hint->ext_coord.valid == 0) + validate_extended_coord(&hint->ext_coord, + get_key_offset(&flow.key)); + + assert("vs-4", hint->ext_coord.valid == 1); + assert("vs-33", hint->ext_coord.lh == &hint->lh); + /* call item's read method */ + result = item_plugin_by_coord(coord)->s.file.read(file, + &flow, + hint); + zrelse(loaded); + done_lh(hint->ext_coord.lh); + } + return (count - flow.length) ? (count - flow.length) : result; +} + +static ssize_t read_compound_file(struct file*, char __user*, size_t, loff_t*); + +/** + * unix-file specific ->read() method + * of struct file_operations. + */ +ssize_t read_unix_file(struct file *file, char __user *buf, + size_t read_amount, loff_t *off) +{ + reiser4_context *ctx; + ssize_t result; + struct inode *inode; + struct unix_file_info *uf_info; + + if (unlikely(read_amount == 0)) + return 0; + + inode = file_inode(file); + assert("vs-972", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + result = reiser4_grab_space_force(unix_file_estimate_read(inode, + read_amount), BA_CAN_COMMIT); + if (unlikely(result != 0)) + goto out2; + + uf_info = unix_file_inode_data(inode); + + if (uf_info->container == UF_CONTAINER_UNKNOWN) { + get_exclusive_access(uf_info); + result = find_file_state(inode, uf_info); + if (unlikely(result != 0)) + goto out; + } + else + get_nonexclusive_access(uf_info); + + switch (uf_info->container) { + case UF_CONTAINER_EXTENTS: + if (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + result = new_sync_read(file, buf, read_amount, off); + break; + } + case UF_CONTAINER_TAILS: + case UF_CONTAINER_UNKNOWN: + result = read_compound_file(file, buf, read_amount, off); + break; + case UF_CONTAINER_EMPTY: + result = 0; + } + out: + drop_access(uf_info); + out2: + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* + * Read a file, which contains tails and, maybe, + * extents. + * + * Sometimes file can consist of items of both types + * (extents and tails). It can happen, e.g. because + * of failed tail conversion. Also the conversion code + * may release exclusive lock before calling + * balance_dirty_pages(). + * + * In this case applying a generic VFS library function + * would be suboptimal. We use our own "light-weigth" + * version below. + */ +static ssize_t read_compound_file(struct file *file, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t result = 0; + struct inode *inode; + hint_t *hint; + struct unix_file_info *uf_info; + size_t to_read; + size_t was_read = 0; + loff_t i_size; + + inode = file_inode(file); + assert("vs-972", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + i_size = i_size_read(inode); + if (*off >= i_size) + /* position to read from is past the end of file */ + goto exit; + if (*off + count > i_size) + count = i_size - *off; + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + + result = load_file_hint(file, hint); + if (result) { + kfree(hint); + return result; + } + uf_info = unix_file_inode_data(inode); + + /* read by page-aligned chunks */ + to_read = PAGE_SIZE - (*off & (loff_t)(PAGE_SIZE - 1)); + if (to_read > count) + to_read = count; + while (count > 0) { + reiser4_txn_restart_current(); + /* + * faultin user page + */ + result = fault_in_pages_writeable(buf, to_read); + if (result) + return RETERR(-EFAULT); + + result = do_read_compound_file(hint, file, buf, to_read, off); + if (result < 0) + break; + count -= result; + buf += result; + + /* update position in a file */ + *off += result; + /* total number of read bytes */ + was_read += result; + to_read = count; + if (to_read > PAGE_SIZE) + to_read = PAGE_SIZE; + } + done_lh(&hint->lh); + save_file_hint(file, hint); + kfree(hint); + if (was_read) + file_accessed(file); + exit: + return was_read ? was_read : result; +} + +/* This function takes care about @file's pages. First of all it checks if + filesystems readonly and if so gets out. Otherwise, it throws out all + pages of file if it was mapped for read and going to be mapped for write + and consists of tails. This is done in order to not manage few copies + of the data (first in page cache and second one in tails them selves) + for the case of mapping files consisting tails. + + Here also tail2extent conversion is performed if it is allowed and file + is going to be written or mapped for write. This functions may be called + from write_unix_file() or mmap_unix_file(). */ +static int check_pages_unix_file(struct file *file, struct inode *inode) +{ + reiser4_invalidate_pages(inode->i_mapping, 0, + (inode->i_size + PAGE_SIZE - + 1) >> PAGE_SHIFT, 0); + return unpack(file, inode, 0 /* not forever */ ); +} + +/** + * mmap_unix_file - mmap of struct file_operations + * @file: file to mmap + * @vma: + * + * This is implementation of vfs's mmap method of struct file_operations for + * unix file plugin. It converts file to extent if necessary. Sets + * reiser4_inode's flag - REISER4_HAS_MMAP. + */ +int mmap_unix_file(struct file *file, struct vm_area_struct *vma) +{ + reiser4_context *ctx; + int result; + struct inode *inode; + struct unix_file_info *uf_info; + reiser4_block_nr needed; + + inode = file_inode(file); + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + uf_info = unix_file_inode_data(inode); + + get_exclusive_access_careful(uf_info, inode); + + if (!IS_RDONLY(inode) && (vma->vm_flags & (VM_MAYWRITE | VM_SHARED))) { + /* + * we need file built of extent items. If it is still built of + * tail items we have to convert it. Find what items the file + * is built of + */ + result = find_file_state(inode, uf_info); + if (result != 0) { + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; + } + + assert("vs-1648", (uf_info->container == UF_CONTAINER_TAILS || + uf_info->container == UF_CONTAINER_EXTENTS || + uf_info->container == UF_CONTAINER_EMPTY)); + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * invalidate all pages and convert file from tails to + * extents + */ + result = check_pages_unix_file(file, inode); + if (result) { + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; + } + } + } + + /* + * generic_file_mmap will do update_atime. Grab space for stat data + * update. + */ + needed = inode_file_plugin(inode)->estimate.update(inode); + result = reiser4_grab_space_force(needed, BA_CAN_COMMIT); + if (result) { + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; + } + + result = generic_file_mmap(file, vma); + if (result == 0) { + /* mark file as having mapping. */ + reiser4_inode_set_flag(inode, REISER4_HAS_MMAP); + } + + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; +} + +/** + * find_first_item + * @inode: + * + * Finds file item which is responsible for first byte in the file. + */ +static int find_first_item(struct inode *inode) +{ + coord_t coord; + lock_handle lh; + reiser4_key key; + int result; + + coord_init_zero(&coord); + init_lh(&lh); + inode_file_plugin(inode)->key_by_inode(inode, 0, &key); + result = find_file_item_nohint(&coord, &lh, &key, ZNODE_READ_LOCK, + inode); + if (result == CBK_COORD_FOUND) { + if (coord.between == AT_UNIT) { + result = zload(coord.node); + if (result == 0) { + result = item_id_by_coord(&coord); + zrelse(coord.node); + if (result != EXTENT_POINTER_ID && + result != FORMATTING_ID) + result = RETERR(-EIO); + } + } else + result = RETERR(-EIO); + } + done_lh(&lh); + return result; +} + +/** + * open_unix_file + * @inode: + * @file: + * + * If filesystem is not readonly - complete uncompleted tail conversion if + * there was one + */ +int open_unix_file(struct inode *inode, struct file *file) +{ + int result; + reiser4_context *ctx; + struct unix_file_info *uf_info; + + if (IS_RDONLY(inode)) + return 0; + + if (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) + return 0; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + uf_info = unix_file_inode_data(inode); + + get_exclusive_access_careful(uf_info, inode); + + if (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + /* + * other process completed the conversion + */ + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return 0; + } + + /* + * file left in semi converted state after unclean shutdown or another + * thread is doing conversion and dropped exclusive access which doing + * balance dirty pages. Complete the conversion + */ + result = find_first_item(inode); + if (result == EXTENT_POINTER_ID) + /* + * first item is extent, therefore there was incomplete + * tail2extent conversion. Complete it + */ + result = tail2extent(unix_file_inode_data(inode)); + else if (result == FORMATTING_ID) + /* + * first item is formatting item, therefore there was + * incomplete extent2tail conversion. Complete it + */ + result = extent2tail(file, unix_file_inode_data(inode)); + else + result = -EIO; + + assert("vs-1712", + ergo(result == 0, + (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED) && + !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)))); + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; +} + +#define NEITHER_OBTAINED 0 +#define EA_OBTAINED 1 +#define NEA_OBTAINED 2 + +static void drop_access(struct unix_file_info *uf_info) +{ + if (uf_info->exclusive_use) + drop_exclusive_access(uf_info); + else + drop_nonexclusive_access(uf_info); +} + +#define debug_wuf(format, ...) printk("%s: %d: %s: " format "\n", \ + __FILE__, __LINE__, __FUNCTION__, ## __VA_ARGS__) + +/** + * write_unix_file - private ->write() method of unix_file plugin. + * + * @file: file to write to + * @buf: address of user-space buffer + * @count: number of bytes to write + * @pos: position in file to write to + * @cont: unused argument, as we don't perform plugin conversion when being + * managed by unix_file plugin. + */ +ssize_t write_unix_file(struct file *file, + const char __user *buf, + size_t count, loff_t *pos, + struct dispatch_context *cont) +{ + int result; + reiser4_context *ctx; + struct inode *inode; + struct unix_file_info *uf_info; + ssize_t written; + int to_write = PAGE_SIZE * WRITE_GRANULARITY; + size_t left; + ssize_t (*write_op)(struct file *, struct inode *, + const char __user *, size_t, + loff_t *pos); + int ea; + int enospc = 0; /* item plugin ->write() returned ENOSPC */ + loff_t new_size; + + ctx = get_current_context(); + inode = file_inode(file); + + assert("vs-947", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + assert("vs-9471", (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED))); + + result = file_remove_privs(file); + if (result) { + context_set_commit_async(ctx); + return result; + } + /* remove_suid might create a transaction */ + reiser4_txn_restart(ctx); + + uf_info = unix_file_inode_data(inode); + + written = 0; + left = count; + ea = NEITHER_OBTAINED; + enospc = 0; + + new_size = i_size_read(inode); + if (*pos + count > new_size) + new_size = *pos + count; + + while (left) { + int update_sd = 0; + if (left < to_write) + to_write = left; + + if (uf_info->container == UF_CONTAINER_EMPTY) { + get_exclusive_access(uf_info); + ea = EA_OBTAINED; + if (uf_info->container != UF_CONTAINER_EMPTY) { + /* file is made not empty by another process */ + drop_exclusive_access(uf_info); + ea = NEITHER_OBTAINED; + continue; + } + } else if (uf_info->container == UF_CONTAINER_UNKNOWN) { + /* + * get exclusive access directly just to not have to + * re-obtain it if file will appear empty + */ + get_exclusive_access(uf_info); + ea = EA_OBTAINED; + result = find_file_state(inode, uf_info); + if (result) { + drop_exclusive_access(uf_info); + ea = NEITHER_OBTAINED; + break; + } + } else { + get_nonexclusive_access(uf_info); + ea = NEA_OBTAINED; + } + + /* either EA or NEA is obtained. Choose item write method */ + if (uf_info->container == UF_CONTAINER_EXTENTS) { + /* file is built of extent items */ + write_op = reiser4_write_extent; + } else if (uf_info->container == UF_CONTAINER_EMPTY) { + /* file is empty */ + if (should_have_notail(uf_info, new_size)) + write_op = reiser4_write_extent; + else + write_op = reiser4_write_tail; + } else { + /* file is built of tail items */ + if (should_have_notail(uf_info, new_size)) { + if (ea == NEA_OBTAINED) { + drop_nonexclusive_access(uf_info); + get_exclusive_access(uf_info); + ea = EA_OBTAINED; + } + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * if file is being convered by another + * process - wait until it completes + */ + while (1) { + if (reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)) { + drop_exclusive_access(uf_info); + schedule(); + get_exclusive_access(uf_info); + continue; + } + break; + } + if (uf_info->container == UF_CONTAINER_TAILS) { + result = tail2extent(uf_info); + if (result) { + drop_exclusive_access(uf_info); + context_set_commit_async(ctx); + break; + } + } + } + drop_exclusive_access(uf_info); + ea = NEITHER_OBTAINED; + continue; + } + write_op = reiser4_write_tail; + } + + written = write_op(file, inode, buf, to_write, pos); + if (written == -ENOSPC && !enospc) { + drop_access(uf_info); + txnmgr_force_commit_all(inode->i_sb, 0); + enospc = 1; + continue; + } + if (written < 0) { + /* + * If this is -ENOSPC, then it happened + * second time, so don't try to free space + * once again. + */ + drop_access(uf_info); + result = written; + break; + } + /* something is written. */ + if (enospc) + enospc = 0; + if (uf_info->container == UF_CONTAINER_EMPTY) { + assert("edward-1553", ea == EA_OBTAINED); + uf_info->container = + (write_op == reiser4_write_extent) ? + UF_CONTAINER_EXTENTS : UF_CONTAINER_TAILS; + } + assert("edward-1554", + ergo(uf_info->container == UF_CONTAINER_EXTENTS, + write_op == reiser4_write_extent)); + assert("edward-1555", + ergo(uf_info->container == UF_CONTAINER_TAILS, + write_op == reiser4_write_tail)); + if (*pos + written > inode->i_size) { + INODE_SET_FIELD(inode, i_size, *pos + written); + update_sd = 1; + } + if (!IS_NOCMTIME(inode)) { + inode->i_ctime = inode->i_mtime = current_time(inode); + update_sd = 1; + } + if (update_sd) { + /* + * space for update_sd was reserved in write_op + */ + result = reiser4_update_sd(inode); + if (result) { + warning("edward-1574", + "Can not update stat-data: %i. FSCK?", + result); + drop_access(uf_info); + context_set_commit_async(ctx); + break; + } + } + drop_access(uf_info); + ea = NEITHER_OBTAINED; + + /* + * tell VM how many pages were dirtied. Maybe number of pages + * which were dirty already should not be counted + */ + reiser4_throttle_write(inode); + left -= written; + buf += written; + *pos += written; + } + if (result == 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + reiser4_txn_restart_current(); + grab_space_enable(); + result = reiser4_sync_file_common(file, 0, LONG_MAX, + 0 /* data and stat data */); + if (result) + warning("reiser4-7", "failed to sync file %llu", + (unsigned long long)get_inode_oid(inode)); + } + /* + * return number of written bytes or error code if nothing is + * written. Note, that it does not work correctly in case when + * sync_unix_file returns error + */ + return (count - left) ? (count - left) : result; +} + +/** + * release_unix_file - release of struct file_operations + * @inode: inode of released file + * @file: file to release + * + * Implementation of release method of struct file_operations for unix file + * plugin. If last reference to indode is released - convert all extent items + * into tail items if necessary. Frees reiser4 specific file data. + */ +int release_unix_file(struct inode *inode, struct file *file) +{ + reiser4_context *ctx; + struct unix_file_info *uf_info; + int result; + int in_reiser4; + + in_reiser4 = is_in_reiser4_context(); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + result = 0; + if (in_reiser4 == 0) { + uf_info = unix_file_inode_data(inode); + + get_exclusive_access_careful(uf_info, inode); + if (file->f_path.dentry->d_lockref.count == 1 && + uf_info->container == UF_CONTAINER_EXTENTS && + !should_have_notail(uf_info, inode->i_size) && + !rofs_inode(inode)) { + result = extent2tail(file, uf_info); + if (result != 0) { + context_set_commit_async(ctx); + warning("nikita-3233", + "Failed (%d) to convert in %s (%llu)", + result, __FUNCTION__, + (unsigned long long) + get_inode_oid(inode)); + } + } + drop_exclusive_access(uf_info); + } else { + /* + we are within reiser4 context already. How latter is + possible? Simple: + + (gdb) bt + #0 get_exclusive_access () + #2 0xc01e56d3 in release_unix_file () + #3 0xc01c3643 in reiser4_release () + #4 0xc014cae0 in __fput () + #5 0xc013ffc3 in remove_vm_struct () + #6 0xc0141786 in exit_mmap () + #7 0xc0118480 in mmput () + #8 0xc0133205 in oom_kill () + #9 0xc01332d1 in out_of_memory () + #10 0xc013bc1d in try_to_free_pages () + #11 0xc013427b in __alloc_pages () + #12 0xc013f058 in do_anonymous_page () + #13 0xc013f19d in do_no_page () + #14 0xc013f60e in handle_mm_fault () + #15 0xc01131e5 in do_page_fault () + #16 0xc0104935 in error_code () + #17 0xc025c0c6 in __copy_to_user_ll () + #18 0xc01d496f in reiser4_read_tail () + #19 0xc01e4def in read_unix_file () + #20 0xc01c3504 in reiser4_read () + #21 0xc014bd4f in vfs_read () + #22 0xc014bf66 in sys_read () + */ + warning("vs-44", "out of memory?"); + } + + reiser4_free_file_fsdata(file); + + reiser4_exit_context(ctx); + return result; +} + +static void set_file_notail(struct inode *inode) +{ + reiser4_inode *state; + formatting_plugin *tplug; + + state = reiser4_inode_data(inode); + tplug = formatting_plugin_by_id(NEVER_TAILS_FORMATTING_ID); + force_plugin_pset(inode, PSET_FORMATTING, (reiser4_plugin *)tplug); +} + +/* if file is built of tails - convert it to extents */ +static int unpack(struct file *filp, struct inode *inode, int forever) +{ + int result = 0; + struct unix_file_info *uf_info; + + uf_info = unix_file_inode_data(inode); + assert("vs-1628", ea_obtained(uf_info)); + + result = find_file_state(inode, uf_info); + if (result) + return result; + assert("vs-1074", uf_info->container != UF_CONTAINER_UNKNOWN); + + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * if file is being convered by another process - wait until it + * completes + */ + while (1) { + if (reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)) { + drop_exclusive_access(uf_info); + schedule(); + get_exclusive_access(uf_info); + continue; + } + break; + } + if (uf_info->container == UF_CONTAINER_TAILS) { + result = tail2extent(uf_info); + if (result) + return result; + } + } + if (forever) { + /* safe new formatting plugin in stat data */ + __u64 tograb; + + set_file_notail(inode); + + grab_space_enable(); + tograb = inode_file_plugin(inode)->estimate.update(inode); + result = reiser4_grab_space(tograb, BA_CAN_COMMIT); + result = reiser4_update_sd(inode); + } + + return result; +} + +/* implentation of vfs' ioctl method of struct file_operations for unix file + plugin +*/ +int ioctl_unix_file(struct file *filp, unsigned int cmd, + unsigned long arg UNUSED_ARG) +{ + reiser4_context *ctx; + int result; + struct inode *inode = filp->f_path.dentry->d_inode; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + switch (cmd) { + case REISER4_IOC_UNPACK: + get_exclusive_access(unix_file_inode_data(inode)); + result = unpack(filp, inode, 1 /* forever */ ); + drop_exclusive_access(unix_file_inode_data(inode)); + break; + + default: + result = RETERR(-ENOTTY); + break; + } + reiser4_exit_context(ctx); + return result; +} + +/* implentation of vfs' bmap method of struct address_space_operations for unix + file plugin +*/ +sector_t bmap_unix_file(struct address_space * mapping, sector_t lblock) +{ + reiser4_context *ctx; + sector_t result; + reiser4_key key; + coord_t coord; + lock_handle lh; + struct inode *inode; + item_plugin *iplug; + sector_t block; + + inode = mapping->host; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + key_by_inode_and_offset_common(inode, + (loff_t) lblock * current_blocksize, + &key); + + init_lh(&lh); + result = + find_file_item_nohint(&coord, &lh, &key, ZNODE_READ_LOCK, inode); + if (cbk_errored(result)) { + done_lh(&lh); + reiser4_exit_context(ctx); + return result; + } + + result = zload(coord.node); + if (result) { + done_lh(&lh); + reiser4_exit_context(ctx); + return result; + } + + iplug = item_plugin_by_coord(&coord); + if (iplug->s.file.get_block) { + result = iplug->s.file.get_block(&coord, lblock, &block); + if (result == 0) + result = block; + } else + result = RETERR(-EINVAL); + + zrelse(coord.node); + done_lh(&lh); + reiser4_exit_context(ctx); + return result; +} + +/** + * flow_by_inode_unix_file - initizlize structure flow + * @inode: inode of file for which read or write is abou + * @buf: buffer to perform read to or write from + * @user: flag showing whether @buf is user space or kernel space + * @size: size of buffer @buf + * @off: start offset fro read or write + * @op: READ or WRITE + * @flow: + * + * Initializes fields of @flow: key, size of data, i/o mode (read or write). + */ +int flow_by_inode_unix_file(struct inode *inode, + const char __user *buf, int user, + loff_t size, loff_t off, + rw_op op, flow_t *flow) +{ + assert("nikita-1100", inode != NULL); + + flow->length = size; + memcpy(&flow->data, &buf, sizeof(buf)); + flow->user = user; + flow->op = op; + assert("nikita-1931", inode_file_plugin(inode) != NULL); + assert("nikita-1932", + inode_file_plugin(inode)->key_by_inode == + key_by_inode_and_offset_common); + /* calculate key of write position and insert it into flow->key */ + return key_by_inode_and_offset_common(inode, off, &flow->key); +} + +/* plugin->u.file.set_plug_in_sd = NULL + plugin->u.file.set_plug_in_inode = NULL + plugin->u.file.create_blank_sd = NULL */ +/* plugin->u.file.delete */ +/* + plugin->u.file.add_link = reiser4_add_link_common + plugin->u.file.rem_link = NULL */ + +/* plugin->u.file.owns_item + this is common_file_owns_item with assertion */ +/* Audited by: green(2002.06.15) */ +int +owns_item_unix_file(const struct inode *inode /* object to check against */ , + const coord_t * coord /* coord to check */ ) +{ + int result; + + result = owns_item_common(inode, coord); + if (!result) + return 0; + if (!plugin_of_group(item_plugin_by_coord(coord), + UNIX_FILE_METADATA_ITEM_TYPE)) + return 0; + assert("vs-547", + item_id_by_coord(coord) == EXTENT_POINTER_ID || + item_id_by_coord(coord) == FORMATTING_ID); + return 1; +} + +static int setattr_truncate(struct inode *inode, struct iattr *attr) +{ + int result; + int s_result; + loff_t old_size; + reiser4_tree *tree; + + inode_check_scale(inode, inode->i_size, attr->ia_size); + + old_size = inode->i_size; + tree = reiser4_tree_by_inode(inode); + + result = safe_link_grab(tree, BA_CAN_COMMIT); + if (result == 0) + result = safe_link_add(inode, SAFE_TRUNCATE); + if (result == 0) + result = truncate_file_body(inode, attr); + if (result) + warning("vs-1588", "truncate_file failed: oid %lli, " + "old size %lld, new size %lld, retval %d", + (unsigned long long)get_inode_oid(inode), + old_size, attr->ia_size, result); + + s_result = safe_link_grab(tree, BA_CAN_COMMIT); + if (s_result == 0) + s_result = + safe_link_del(tree, get_inode_oid(inode), SAFE_TRUNCATE); + if (s_result != 0) { + warning("nikita-3417", "Cannot kill safelink %lli: %i", + (unsigned long long)get_inode_oid(inode), s_result); + } + safe_link_release(tree); + return result; +} + +/* plugin->u.file.setattr method */ +/* This calls inode_setattr and if truncate is in effect it also takes + exclusive inode access to avoid races */ +int setattr_unix_file(struct dentry *dentry, /* Object to change attributes */ + struct iattr *attr /* change description */ ) +{ + int result; + + if (attr->ia_valid & ATTR_SIZE) { + reiser4_context *ctx; + struct unix_file_info *uf_info; + + /* truncate does reservation itself and requires exclusive + access obtained */ + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + uf_info = unix_file_inode_data(dentry->d_inode); + get_exclusive_access_careful(uf_info, dentry->d_inode); + result = setattr_truncate(dentry->d_inode, attr); + drop_exclusive_access(uf_info); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + } else + result = reiser4_setattr_common(dentry, attr); + + return result; +} + +/* plugin->u.file.init_inode_data */ +void +init_inode_data_unix_file(struct inode *inode, + reiser4_object_create_data * crd, int create) +{ + struct unix_file_info *data; + + data = unix_file_inode_data(inode); + data->container = create ? UF_CONTAINER_EMPTY : UF_CONTAINER_UNKNOWN; + init_rwsem(&data->latch); + data->tplug = inode_formatting_plugin(inode); + data->exclusive_use = 0; + +#if REISER4_DEBUG + data->ea_owner = NULL; + atomic_set(&data->nr_neas, 0); +#endif + init_inode_ordering(inode, crd, create); +} + +/** + * delete_unix_file - delete_object of file_plugin + * @inode: inode to be deleted + * + * Truncates file to length 0, removes stat data and safe link. + */ +int delete_object_unix_file(struct inode *inode) +{ + struct unix_file_info *uf_info; + int result; + + if (reiser4_inode_get_flag(inode, REISER4_NO_SD)) + return 0; + + /* truncate file bogy first */ + uf_info = unix_file_inode_data(inode); + get_exclusive_access(uf_info); + result = shorten_file(inode, 0 /* size */ ); + drop_exclusive_access(uf_info); + + if (result) + warning("edward-1556", + "failed to truncate file (%llu) on removal: %d", + get_inode_oid(inode), result); + + /* remove stat data and safe link */ + return reiser4_delete_object_common(inode); +} + +static int do_write_begin(struct file *file, struct page *page, + loff_t pos, unsigned len) +{ + int ret; + if (len == PAGE_SIZE || PageUptodate(page)) + return 0; + + ret = readpage_unix_file(file, page); + if (ret) { + SetPageError(page); + ClearPageUptodate(page); + /* All reiser4 readpage() implementations should return the + * page locked in case of error. */ + assert("nikita-3472", PageLocked(page)); + return ret; + } + /* + * ->readpage() either: + * + * 1. starts IO against @page. @page is locked for IO in + * this case. + * + * 2. doesn't start IO. @page is unlocked. + * + * In either case, page should be locked. + */ + lock_page(page); + /* + * IO (if any) is completed at this point. Check for IO + * errors. + */ + if (!PageUptodate(page)) + return RETERR(-EIO); + return ret; +} + +/* plugin->write_begin() */ +int write_begin_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata) +{ + int ret; + struct inode * inode; + struct unix_file_info *info; + + inode = file_inode(file); + info = unix_file_inode_data(inode); + + ret = reiser4_grab_space_force(estimate_one_insert_into_item + (reiser4_tree_by_inode(inode)), + BA_CAN_COMMIT); + if (ret) + return ret; + get_exclusive_access(info); + ret = find_file_state(file_inode(file), info); + if (unlikely(ret != 0)) { + drop_exclusive_access(info); + return ret; + } + if (info->container == UF_CONTAINER_TAILS) { + ret = tail2extent(info); + if (ret) { + warning("edward-1575", + "tail conversion failed: %d", ret); + drop_exclusive_access(info); + return ret; + } + } + ret = do_write_begin(file, page, pos, len); + if (unlikely(ret != 0)) + drop_exclusive_access(info); + /* else exclusive access will be dropped in ->write_end() */ + return ret; +} + +/* plugin->write_end() */ +int write_end_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata) +{ + int ret; + struct inode *inode; + struct unix_file_info *info; + + inode = file_inode(file); + info = unix_file_inode_data(inode); + + unlock_page(page); + ret = find_or_create_extent(page); + if (ret) { + SetPageError(page); + goto exit; + } + if (pos + copied > inode->i_size) { + INODE_SET_FIELD(inode, i_size, pos + copied); + ret = reiser4_update_sd(inode); + if (unlikely(ret != 0)) + warning("edward-1604", + "Can not update stat-data: %i. FSCK?", + ret); + } + exit: + drop_exclusive_access(info); + return ret; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/file/file.h b/fs/reiser4/plugin/file/file.h new file mode 100644 index 000000000000..523f86855dbc --- /dev/null +++ b/fs/reiser4/plugin/file/file.h @@ -0,0 +1,322 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* this file contains declarations of methods implementing + file plugins (UNIX_FILE_PLUGIN_ID, CRYPTCOMPRESS_FILE_PLUGIN_ID + and SYMLINK_FILE_PLUGIN_ID) */ + +#if !defined( __REISER4_FILE_H__ ) +#define __REISER4_FILE_H__ + +/* possible states in dispatching process */ +typedef enum { + DISPATCH_INVAL_STATE, /* invalid state */ + DISPATCH_POINT, /* dispatching point has been achieved */ + DISPATCH_REMAINS_OLD, /* made a decision to manage by old plugin */ + DISPATCH_ASSIGNED_NEW /* a new plugin has been assigned */ +} dispatch_state; + +struct dispatch_context { + int nr_pages; + struct page **pages; + dispatch_state state; +}; + +/* + * Declarations of methods provided for VFS. + */ + +/* inode operations */ +int reiser4_setattr_dispatch(struct dentry *, struct iattr *); + +/* file operations */ +ssize_t reiser4_read_dispatch(struct file *, char __user *buf, + size_t count, loff_t *off); +ssize_t reiser4_write_dispatch(struct file *, const char __user *buf, + size_t count, loff_t * off); +long reiser4_ioctl_dispatch(struct file *filp, unsigned int cmd, + unsigned long arg); +int reiser4_mmap_dispatch(struct file *, struct vm_area_struct *); +int reiser4_open_dispatch(struct inode *inode, struct file *file); +int reiser4_release_dispatch(struct inode *, struct file *); +int reiser4_sync_file_common(struct file *, loff_t, loff_t, int datasync); + +/* address space operations */ +int reiser4_readpage_dispatch(struct file *, struct page *); +int reiser4_readpages_dispatch(struct file *, struct address_space *, + struct list_head *, unsigned); +int reiser4_writepages_dispatch(struct address_space *, + struct writeback_control *); +int reiser4_write_begin_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +int reiser4_write_end_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); +sector_t reiser4_bmap_dispatch(struct address_space *, sector_t lblock); + +/* + * Private methods of unix-file plugin + * (UNIX_FILE_PLUGIN_ID) + */ + +/* private inode operations */ +int setattr_unix_file(struct dentry *, struct iattr *); + +/* private file operations */ + +ssize_t read_unix_file(struct file *, char __user *buf, size_t read_amount, + loff_t *off); +ssize_t write_unix_file(struct file *, const char __user *buf, size_t write_amount, + loff_t * off, struct dispatch_context * cont); +int ioctl_unix_file(struct file *, unsigned int cmd, unsigned long arg); +int mmap_unix_file(struct file *, struct vm_area_struct *); +int open_unix_file(struct inode *, struct file *); +int release_unix_file(struct inode *, struct file *); + +/* private address space operations */ +int readpage_unix_file(struct file *, struct page *); +int readpages_unix_file(struct file*, struct address_space*, struct list_head*, + unsigned); +int writepages_unix_file(struct address_space *, struct writeback_control *); +int write_begin_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata); +int write_end_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata); +sector_t bmap_unix_file(struct address_space *, sector_t lblock); + +/* other private methods */ +int delete_object_unix_file(struct inode *); +int flow_by_inode_unix_file(struct inode *, const char __user *buf, + int user, loff_t, loff_t, rw_op, flow_t *); +int owns_item_unix_file(const struct inode *, const coord_t *); +void init_inode_data_unix_file(struct inode *, reiser4_object_create_data *, + int create); + +/* + * Private methods of cryptcompress file plugin + * (CRYPTCOMPRESS_FILE_PLUGIN_ID) + */ + +/* private inode operations */ +int setattr_cryptcompress(struct dentry *, struct iattr *); + +/* private file operations */ +ssize_t read_cryptcompress(struct file *, char __user *buf, + size_t count, loff_t *off); +ssize_t write_cryptcompress(struct file *, const char __user *buf, + size_t count, loff_t * off, + struct dispatch_context *cont); +int ioctl_cryptcompress(struct file *, unsigned int cmd, unsigned long arg); +int mmap_cryptcompress(struct file *, struct vm_area_struct *); +int open_cryptcompress(struct inode *, struct file *); +int release_cryptcompress(struct inode *, struct file *); + +/* private address space operations */ +int readpage_cryptcompress(struct file *, struct page *); +int readpages_cryptcompress(struct file*, struct address_space*, + struct list_head*, unsigned); +int writepages_cryptcompress(struct address_space *, + struct writeback_control *); +int write_begin_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata); +int write_end_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata); +sector_t bmap_cryptcompress(struct address_space *, sector_t lblock); + +/* other private methods */ +int flow_by_inode_cryptcompress(struct inode *, const char __user *buf, + int user, loff_t, loff_t, rw_op, flow_t *); +int key_by_inode_cryptcompress(struct inode *, loff_t off, reiser4_key *); +int create_object_cryptcompress(struct inode *, struct inode *, + reiser4_object_create_data *); +int delete_object_cryptcompress(struct inode *); +void init_inode_data_cryptcompress(struct inode *, reiser4_object_create_data *, + int create); +int cut_tree_worker_cryptcompress(tap_t *, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, + struct inode *object, int truncate, + int *progress); +void destroy_inode_cryptcompress(struct inode *); + +/* + * Private methods of symlink file plugin + * (SYMLINK_FILE_PLUGIN_ID) + */ +int reiser4_create_symlink(struct inode *symlink, struct inode *dir, + reiser4_object_create_data *); +void destroy_inode_symlink(struct inode *); + +/* + * all the write into unix file is performed by item write method. Write method + * of unix file plugin only decides which item plugin (extent or tail) and in + * which mode (one from the enum below) to call + */ +typedef enum { + FIRST_ITEM = 1, + APPEND_ITEM = 2, + OVERWRITE_ITEM = 3 +} write_mode_t; + +/* unix file may be in one the following states */ +typedef enum { + UF_CONTAINER_UNKNOWN = 0, + UF_CONTAINER_TAILS = 1, + UF_CONTAINER_EXTENTS = 2, + UF_CONTAINER_EMPTY = 3 +} file_container_t; + +struct formatting_plugin; +struct inode; + +/* unix file plugin specific part of reiser4 inode */ +struct unix_file_info { + /* + * this read-write lock protects file containerization change. Accesses + * which do not change file containerization (see file_container_t) + * (read, readpage, writepage, write (until tail conversion is + * involved)) take read-lock. Accesses which modify file + * containerization (truncate, conversion from tail to extent and back) + * take write-lock. + */ + struct rw_semaphore latch; + /* this enum specifies which items are used to build the file */ + file_container_t container; + /* + * plugin which controls when file is to be converted to extents and + * back to tail + */ + struct formatting_plugin *tplug; + /* if this is set, file is in exclusive use */ + int exclusive_use; +#if REISER4_DEBUG + /* pointer to task struct of thread owning exclusive access to file */ + void *ea_owner; + atomic_t nr_neas; + void *last_reader; +#endif +}; + +struct unix_file_info *unix_file_inode_data(const struct inode *inode); +void get_exclusive_access(struct unix_file_info *); +void drop_exclusive_access(struct unix_file_info *); +void get_nonexclusive_access(struct unix_file_info *); +void drop_nonexclusive_access(struct unix_file_info *); +int try_to_get_nonexclusive_access(struct unix_file_info *); +int find_file_item(hint_t *, const reiser4_key *, znode_lock_mode, + struct inode *); +int find_file_item_nohint(coord_t *, lock_handle *, + const reiser4_key *, znode_lock_mode, + struct inode *); + +int load_file_hint(struct file *, hint_t *); +void save_file_hint(struct file *, const hint_t *); + +#include "../item/extent.h" +#include "../item/tail.h" +#include "../item/ctail.h" + +struct uf_coord { + coord_t coord; + lock_handle *lh; + int valid; + union { + struct extent_coord_extension extent; + struct tail_coord_extension tail; + struct ctail_coord_extension ctail; + } extension; +}; + +#include "../../forward.h" +#include "../../seal.h" +#include "../../lock.h" + +/* + * This structure is used to speed up file operations (reads and writes). A + * hint is a suggestion about where a key resolved to last time. A seal + * indicates whether a node has been modified since a hint was last recorded. + * You check the seal, and if the seal is still valid, you can use the hint + * without traversing the tree again. + */ +struct hint { + seal_t seal; /* a seal over last file item accessed */ + uf_coord_t ext_coord; + loff_t offset; + znode_lock_mode mode; + lock_handle lh; +}; + +static inline int hint_is_valid(hint_t * hint) +{ + return hint->ext_coord.valid; +} + +static inline void hint_set_valid(hint_t * hint) +{ + hint->ext_coord.valid = 1; +} + +static inline void hint_clr_valid(hint_t * hint) +{ + hint->ext_coord.valid = 0; +} + +int load_file_hint(struct file *, hint_t *); +void save_file_hint(struct file *, const hint_t *); +void hint_init_zero(hint_t *); +void reiser4_set_hint(hint_t *, const reiser4_key *, znode_lock_mode); +int hint_is_set(const hint_t *); +void reiser4_unset_hint(hint_t *); + +int reiser4_update_file_size(struct inode *, loff_t, int update_sd); +int cut_file_items(struct inode *, loff_t new_size, + int update_sd, loff_t cur_size, + int (*update_actor) (struct inode *, loff_t, int)); +#if REISER4_DEBUG + +/* return 1 is exclusive access is obtained, 0 - otherwise */ +static inline int ea_obtained(struct unix_file_info * uf_info) +{ + int ret; + + ret = down_read_trylock(&uf_info->latch); + if (ret) + up_read(&uf_info->latch); + return !ret; +} + +#endif + +#define WRITE_GRANULARITY 32 + +int tail2extent(struct unix_file_info *); +int extent2tail(struct file *, struct unix_file_info *); + +int goto_right_neighbor(coord_t *, lock_handle *); +int find_or_create_extent(struct page *); +int equal_to_ldk(znode *, const reiser4_key *); + +void init_uf_coord(uf_coord_t *uf_coord, lock_handle *lh); + +static inline int cbk_errored(int cbk_result) +{ + return (cbk_result != CBK_COORD_NOTFOUND + && cbk_result != CBK_COORD_FOUND); +} + +/* __REISER4_FILE_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: +*/ diff --git a/fs/reiser4/plugin/file/file_conversion.c b/fs/reiser4/plugin/file/file_conversion.c new file mode 100644 index 000000000000..cda538bb405f --- /dev/null +++ b/fs/reiser4/plugin/file/file_conversion.c @@ -0,0 +1,755 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, + licensing governed by reiser4/README */ + +/** + * This file contains dispatching hooks, and conversion methods, which + * implement transitions in the FILE interface. + * + * Dispatching hook makes a decision (at dispatching point) about the + * most reasonable plugin. Such decision is made in accordance with some + * O(1)-heuristic. + * + * We implement a transition CRYPTCOMPRESS -> UNIX_FILE for files with + * incompressible data. Current heuristic to estimate compressibility is + * very simple: if first complete logical cluster (64K by default) of a + * file is incompressible, then we make a decision, that the whole file + * is incompressible. + * + * To enable dispatching we install a special "magic" compression mode + * plugin CONVX_COMPRESSION_MODE_ID at file creation time. + * + * Note, that we don't perform back conversion (UNIX_FILE->CRYPTCOMPRESS) + * because of compatibility reasons). + * + * In conversion time we protect CS, the conversion set (file's (meta)data + * and plugin table (pset)) via special per-inode rw-semaphore (conv_sem). + * The methods which implement conversion are CS writers. The methods of FS + * interface (file_operations, inode_operations, address_space_operations) + * are CS readers. + */ + +#include +#include "../../inode.h" +#include "../cluster.h" +#include "file.h" + +#define conversion_enabled(inode) \ + (inode_compression_mode_plugin(inode) == \ + compression_mode_plugin_by_id(CONVX_COMPRESSION_MODE_ID)) + +/** + * Located sections (readers and writers of @pset) are not permanently + * critical: cryptcompress file can be converted only if the conversion + * is enabled (see the macrio above). Also we don't perform back + * conversion. The following helper macro is a sanity check to decide + * if we need the protection (locks are always additional overheads). + */ +#define should_protect(inode) \ + (inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID) && \ + conversion_enabled(inode)) +/** + * To avoid confusion with read/write file operations, we'll speak about + * "passive" protection for FCS readers and "active" protection for FCS + * writers. All methods with active or passive protection have suffix + * "careful". + */ +/** + * Macros for passive protection. + * + * Construct invariant operation to be supplied to VFS. + * The macro accepts the following lexemes: + * @type - type of the value represented by the compound statement; + * @method - name of an operation to be supplied to VFS (reiser4 file + * plugin also should contain a method with such name). + */ +#define PROT_PASSIVE(type, method, args) \ +({ \ + type _result; \ + struct rw_semaphore * guard = \ + &reiser4_inode_data(inode)->conv_sem; \ + \ + if (should_protect(inode)) { \ + down_read(guard); \ + if (!should_protect(inode)) \ + up_read(guard); \ + } \ + _result = inode_file_plugin(inode)->method args; \ + if (should_protect(inode)) \ + up_read(guard); \ + _result; \ +}) + +#define PROT_PASSIVE_VOID(method, args) \ +({ \ + struct rw_semaphore * guard = \ + &reiser4_inode_data(inode)->conv_sem; \ + \ + if (should_protect(inode)) { \ + down_read(guard); \ + if (!should_protect(inode)) \ + up_read(guard); \ + } \ + inode_file_plugin(inode)->method args; \ + \ + if (should_protect(inode)) \ + up_read(guard); \ +}) + +/* Pass management to the unix-file plugin with "notail" policy */ +static int __cryptcompress2unixfile(struct file *file, struct inode * inode) +{ + int result; + reiser4_inode *info; + struct unix_file_info * uf; + info = reiser4_inode_data(inode); + + result = aset_set_unsafe(&info->pset, + PSET_FILE, + (reiser4_plugin *) + file_plugin_by_id(UNIX_FILE_PLUGIN_ID)); + if (result) + return result; + result = aset_set_unsafe(&info->pset, + PSET_FORMATTING, + (reiser4_plugin *) + formatting_plugin_by_id(NEVER_TAILS_FORMATTING_ID)); + if (result) + return result; + /* get rid of non-standard plugins */ + info->plugin_mask &= ~cryptcompress_mask; + /* get rid of plugin stat-data extension */ + info->extmask &= ~(1 << PLUGIN_STAT); + + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); + + /* FIXME use init_inode_data_unix_file() instead, + but aviod init_inode_ordering() */ + /* Init unix-file specific part of inode */ + uf = unix_file_inode_data(inode); + uf->container = UF_CONTAINER_UNKNOWN; + init_rwsem(&uf->latch); + uf->tplug = inode_formatting_plugin(inode); + uf->exclusive_use = 0; +#if REISER4_DEBUG + uf->ea_owner = NULL; + atomic_set(&uf->nr_neas, 0); +#endif + /** + * we was carefull for file_ops, inode_ops and as_ops + * to be invariant for plugin conversion, so there is + * no need to update ones already installed in the + * vfs's residence. + */ + return 0; +} + +#if REISER4_DEBUG +static int disabled_conversion_inode_ok(struct inode * inode) +{ + __u64 extmask = reiser4_inode_data(inode)->extmask; + __u16 plugin_mask = reiser4_inode_data(inode)->plugin_mask; + + return ((extmask & (1 << LIGHT_WEIGHT_STAT)) && + (extmask & (1 << UNIX_STAT)) && + (extmask & (1 << LARGE_TIMES_STAT)) && + (extmask & (1 << PLUGIN_STAT)) && + (plugin_mask & (1 << PSET_COMPRESSION_MODE))); +} +#endif + +/** + * Disable future attempts to schedule/convert file plugin. + * This function is called by plugin schedule hooks. + * + * To disable conversion we assign any compression mode plugin id + * different from CONVX_COMPRESSION_MODE_ID. + */ +static int disable_conversion(struct inode * inode) +{ + int result; + result = + force_plugin_pset(inode, + PSET_COMPRESSION_MODE, + (reiser4_plugin *)compression_mode_plugin_by_id + (LATTD_COMPRESSION_MODE_ID)); + assert("edward-1500", + ergo(!result, disabled_conversion_inode_ok(inode))); + return result; +} + +/** + * Check if we really have achieved plugin scheduling point + */ +static int check_dispatch_point(struct inode * inode, + loff_t pos /* position in the + file to write from */, + struct cluster_handle * clust, + struct dispatch_context * cont) +{ + assert("edward-1505", conversion_enabled(inode)); + /* + * if file size is more then cluster size, then compressible + * status must be figured out (i.e. compression was disabled, + * or file plugin was converted to unix_file) + */ + assert("edward-1506", inode->i_size <= inode_cluster_size(inode)); + + if (pos > inode->i_size) + /* first logical cluster will contain a (partial) hole */ + return disable_conversion(inode); + if (pos < inode_cluster_size(inode)) + /* writing to the first logical cluster */ + return 0; + /* + * here we have: + * cluster_size <= pos <= i_size <= cluster_size, + * and, hence, pos == i_size == cluster_size + */ + assert("edward-1498", + pos == inode->i_size && + pos == inode_cluster_size(inode)); + assert("edward-1539", cont != NULL); + assert("edward-1540", cont->state == DISPATCH_INVAL_STATE); + + cont->state = DISPATCH_POINT; + return 0; +} + +static void start_check_compressibility(struct inode * inode, + struct cluster_handle * clust, + hint_t * hint) +{ + assert("edward-1507", clust->index == 1); + assert("edward-1508", !tfm_cluster_is_uptodate(&clust->tc)); + assert("edward-1509", cluster_get_tfm_act(&clust->tc) == TFMA_READ); + + hint_init_zero(hint); + clust->hint = hint; + clust->index --; + clust->nr_pages = size_in_pages(lbytes(clust->index, inode)); + + /* first logical cluster (of index #0) must be complete */ + assert("edward-1510", lbytes(clust->index, inode) == + inode_cluster_size(inode)); +} + +static void finish_check_compressibility(struct inode * inode, + struct cluster_handle * clust, + hint_t * hint) +{ + reiser4_unset_hint(clust->hint); + clust->hint = hint; + clust->index ++; +} + +#if REISER4_DEBUG +static int prepped_dclust_ok(hint_t * hint) +{ + reiser4_key key; + coord_t * coord = &hint->ext_coord.coord; + + item_key_by_coord(coord, &key); + return (item_id_by_coord(coord) == CTAIL_ID && + !coord_is_unprepped_ctail(coord) && + (get_key_offset(&key) + nr_units_ctail(coord) == + dclust_get_extension_dsize(hint))); +} +#endif + +#define fifty_persent(size) (size >> 1) +/* evaluation of data compressibility */ +#define data_is_compressible(osize, isize) \ + (osize < fifty_persent(isize)) + +/** + * A simple O(1)-heuristic for compressibility. + * This is called not more then one time per file's life. + * Read first logical cluster (of index #0) and estimate its compressibility. + * Save estimation result in @cont. + */ +static int read_check_compressibility(struct inode * inode, + struct cluster_handle * clust, + struct dispatch_context * cont) +{ + int i; + int result; + size_t dst_len; + hint_t tmp_hint; + hint_t * cur_hint = clust->hint; + assert("edward-1541", cont->state == DISPATCH_POINT); + + start_check_compressibility(inode, clust, &tmp_hint); + + reset_cluster_pgset(clust, cluster_nrpages(inode)); + result = grab_page_cluster(inode, clust, READ_OP); + if (result) + return result; + /* Read page cluster here */ + for (i = 0; i < clust->nr_pages; i++) { + struct page *page = clust->pages[i]; + lock_page(page); + result = do_readpage_ctail(inode, clust, page, + ZNODE_READ_LOCK); + unlock_page(page); + if (result) + goto error; + } + tfm_cluster_clr_uptodate(&clust->tc); + + cluster_set_tfm_act(&clust->tc, TFMA_WRITE); + + if (hint_is_valid(&tmp_hint) && !hint_is_unprepped_dclust(&tmp_hint)) { + /* lenght of compressed data is known, no need to compress */ + assert("edward-1511", + znode_is_any_locked(tmp_hint.lh.node)); + assert("edward-1512", + WITH_DATA(tmp_hint.ext_coord.coord.node, + prepped_dclust_ok(&tmp_hint))); + dst_len = dclust_get_extension_dsize(&tmp_hint); + } + else { + struct tfm_cluster * tc = &clust->tc; + compression_plugin * cplug = inode_compression_plugin(inode); + result = grab_tfm_stream(inode, tc, INPUT_STREAM); + if (result) + goto error; + for (i = 0; i < clust->nr_pages; i++) { + char *data; + lock_page(clust->pages[i]); + BUG_ON(!PageUptodate(clust->pages[i])); + data = kmap(clust->pages[i]); + memcpy(tfm_stream_data(tc, INPUT_STREAM) + pg_to_off(i), + data, PAGE_SIZE); + kunmap(clust->pages[i]); + unlock_page(clust->pages[i]); + } + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + goto error; + result = grab_coa(tc, cplug); + if (result) + goto error; + tc->len = tc->lsize = lbytes(clust->index, inode); + assert("edward-1513", tc->len == inode_cluster_size(inode)); + dst_len = tfm_stream_size(tc, OUTPUT_STREAM); + cplug->compress(get_coa(tc, cplug->h.id, tc->act), + tfm_input_data(clust), tc->len, + tfm_output_data(clust), &dst_len); + assert("edward-1514", + dst_len <= tfm_stream_size(tc, OUTPUT_STREAM)); + } + finish_check_compressibility(inode, clust, cur_hint); + cont->state = + (data_is_compressible(dst_len, inode_cluster_size(inode)) ? + DISPATCH_REMAINS_OLD : + DISPATCH_ASSIGNED_NEW); + return 0; + error: + put_page_cluster(clust, inode, READ_OP); + return result; +} + +/* Cut disk cluster of index @idx */ +static int cut_disk_cluster(struct inode * inode, cloff_t idx) +{ + reiser4_key from, to; + assert("edward-1515", inode_file_plugin(inode) == + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + key_by_inode_cryptcompress(inode, clust_to_off(idx, inode), &from); + to = from; + set_key_offset(&to, + get_key_offset(&from) + inode_cluster_size(inode) - 1); + return reiser4_cut_tree(reiser4_tree_by_inode(inode), + &from, &to, inode, 0); +} + +static int reserve_cryptcompress2unixfile(struct inode *inode) +{ + reiser4_block_nr unformatted_nodes; + reiser4_tree *tree; + + tree = reiser4_tree_by_inode(inode); + + /* number of unformatted nodes which will be created */ + unformatted_nodes = cluster_nrpages(inode); /* N */ + + /* + * space required for one iteration of extent->tail conversion: + * + * 1. kill ctail items + * + * 2. insert N unformatted nodes + * + * 3. insert N (worst-case single-block + * extents) extent units. + * + * 4. drilling to the leaf level by coord_by_key() + * + * 5. possible update of stat-data + * + */ + grab_space_enable(); + return reiser4_grab_space + (2 * tree->height + + unformatted_nodes + + unformatted_nodes * estimate_one_insert_into_item(tree) + + 1 + estimate_one_insert_item(tree) + + inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); +} + +/** + * Convert cryptcompress file plugin to unix_file plugin. + */ +static int cryptcompress2unixfile(struct file *file, struct inode *inode, + struct dispatch_context *cont) +{ + int i; + int result = 0; + struct cryptcompress_info *cr_info; + struct unix_file_info *uf_info; + assert("edward-1516", cont->pages[0]->index == 0); + + /* release all cryptcompress-specific resources */ + cr_info = cryptcompress_inode_data(inode); + result = reserve_cryptcompress2unixfile(inode); + if (result) + goto out; + /* tell kill_hook to not truncate pages */ + reiser4_inode_set_flag(inode, REISER4_FILE_CONV_IN_PROGRESS); + result = cut_disk_cluster(inode, 0); + if (result) + goto out; + /* captured jnode of cluster and assotiated resources (pages, + reserved disk space) were released by ->kill_hook() method + of the item plugin */ + + result = __cryptcompress2unixfile(file, inode); + if (result) + goto out; + /* At this point file is managed by unix file plugin */ + + uf_info = unix_file_inode_data(inode); + + assert("edward-1518", + ergo(jprivate(cont->pages[0]), + !jnode_is_cluster_page(jprivate(cont->pages[0])))); + for(i = 0; i < cont->nr_pages; i++) { + assert("edward-1519", cont->pages[i]); + assert("edward-1520", PageUptodate(cont->pages[i])); + + result = find_or_create_extent(cont->pages[i]); + if (result) + break; + } + if (unlikely(result)) + goto out; + uf_info->container = UF_CONTAINER_EXTENTS; + result = reiser4_update_sd(inode); + out: + all_grabbed2free(); + return result; +} + +#define convert_file_plugin cryptcompress2unixfile + +/** + * This is called by ->write() method of a cryptcompress file plugin. + * Make a decision about the most reasonable file plugin id to manage + * the file. + */ +int write_dispatch_hook(struct file *file, struct inode *inode, + loff_t pos, struct cluster_handle *clust, + struct dispatch_context *cont) +{ + int result; + if (!conversion_enabled(inode)) + return 0; + result = check_dispatch_point(inode, pos, clust, cont); + if (result || cont->state != DISPATCH_POINT) + return result; + result = read_check_compressibility(inode, clust, cont); + if (result) + return result; + if (cont->state == DISPATCH_REMAINS_OLD) { + put_page_cluster(clust, inode, READ_OP); + return disable_conversion(inode); + } + assert("edward-1543", cont->state == DISPATCH_ASSIGNED_NEW); + /* + * page cluster is grabbed and uptodate. It will be + * released with a pgset after plugin conversion is + * finished, see put_dispatch_context(). + */ + reiser4_unset_hint(clust->hint); + move_cluster_pgset(clust, &cont->pages, &cont->nr_pages); + return 0; +} + +/** + * This is called by ->setattr() method of cryptcompress file plugin. + */ +int setattr_dispatch_hook(struct inode * inode) +{ + if (conversion_enabled(inode)) + return disable_conversion(inode); + return 0; +} + +static inline void init_dispatch_context(struct dispatch_context * cont) +{ + memset(cont, 0, sizeof(*cont)); +} + +static inline void done_dispatch_context(struct dispatch_context * cont, + struct inode * inode) +{ + if (cont->pages) { + __put_page_cluster(0, cont->nr_pages, cont->pages, inode); + kfree(cont->pages); + } +} + +static inline ssize_t reiser4_write_checks(struct file *file, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t result; + struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; + struct kiocb iocb; + struct iov_iter iter; + + init_sync_kiocb(&iocb, file); + iocb.ki_pos = *off; + iov_iter_init(&iter, WRITE, &iov, 1, count); + + result = generic_write_checks(&iocb, &iter); + *off = iocb.ki_pos; + return result; +} + +/* + * ->write() VFS file operation + * + * performs "intelligent" conversion in the FILE interface. + * Write a file in 3 steps (2d and 3d steps are optional). + */ +ssize_t reiser4_write_dispatch(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t result; + reiser4_context *ctx; + ssize_t written_old = 0; /* bytes written with initial plugin */ + ssize_t written_new = 0; /* bytes written with new plugin */ + struct dispatch_context cont; + struct inode * inode = file_inode(file); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + current->backing_dev_info = inode_to_bdi(inode); + init_dispatch_context(&cont); + inode_lock(inode); + + result = reiser4_write_checks(file, buf, count, off); + if (unlikely(result <= 0)) + goto exit; + /** + * First step. + * Start write with initial file plugin. + * Keep a plugin schedule status at @cont (if any). + */ + written_old = inode_file_plugin(inode)->write(file, + buf, + count, + off, + &cont); + if (cont.state != DISPATCH_ASSIGNED_NEW || written_old < 0) + goto exit; + /** + * Second step. + * New file plugin has been scheduled. + * Perform conversion to the new plugin. + */ + down_read(&reiser4_inode_data(inode)->conv_sem); + result = convert_file_plugin(file, inode, &cont); + up_read(&reiser4_inode_data(inode)->conv_sem); + if (result) { + warning("edward-1544", + "Inode %llu: file plugin conversion failed (%d)", + (unsigned long long)get_inode_oid(inode), + (int)result); + goto exit; + } + reiser4_txn_restart(ctx); + /** + * Third step: + * Finish write with the new file plugin. + */ + assert("edward-1536", + inode_file_plugin(inode) == + file_plugin_by_id(UNIX_FILE_PLUGIN_ID)); + + written_new = inode_file_plugin(inode)->write(file, + buf + written_old, + count - written_old, + off, + NULL); + exit: + inode_unlock(inode); + done_dispatch_context(&cont, inode); + current->backing_dev_info = NULL; + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return written_old + (written_new < 0 ? 0 : written_new); +} + +/* + * Dispatchers with "passive" protection for: + * + * ->open(); + * ->read(); + * ->ioctl(); + * ->mmap(); + * ->release(); + * ->bmap(). + */ + +int reiser4_open_dispatch(struct inode *inode, struct file *file) +{ + return PROT_PASSIVE(int, open, (inode, file)); +} + +ssize_t reiser4_read_dispatch(struct file * file, char __user * buf, + size_t size, loff_t * off) +{ + struct inode * inode = file_inode(file); + return PROT_PASSIVE(ssize_t, read, (file, buf, size, off)); +} + +long reiser4_ioctl_dispatch(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct inode * inode = file_inode(filp); + return PROT_PASSIVE(int, ioctl, (filp, cmd, arg)); +} + +int reiser4_mmap_dispatch(struct file *file, struct vm_area_struct *vma) +{ + struct inode *inode = file_inode(file); + return PROT_PASSIVE(int, mmap, (file, vma)); +} + +int reiser4_release_dispatch(struct inode *inode, struct file *file) +{ + return PROT_PASSIVE(int, release, (inode, file)); +} + +sector_t reiser4_bmap_dispatch(struct address_space * mapping, sector_t lblock) +{ + struct inode *inode = mapping->host; + return PROT_PASSIVE(sector_t, bmap, (mapping, lblock)); +} + +/** + * NOTE: The following two methods are + * used only for loopback functionality. + * reiser4_write_end() can not cope with + * short writes for now. + */ +int reiser4_write_begin_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned flags, + struct page **pagep, + void **fsdata) +{ + int ret = 0; + struct page *page; + pgoff_t index; + reiser4_context *ctx; + struct inode * inode = file_inode(file); + + index = pos >> PAGE_SHIFT; + page = grab_cache_page_write_begin(mapping, index, + flags & AOP_FLAG_NOFS); + *pagep = page; + if (!page) + return -ENOMEM; + + ctx = reiser4_init_context(file_inode(file)->i_sb); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto err2; + } + ret = reiser4_grab_space_force(/* for update_sd: + * one when updating file size and + * one when updating mtime/ctime */ + 2 * estimate_update_common(inode), + BA_CAN_COMMIT); + if (ret) + goto err1; + ret = PROT_PASSIVE(int, write_begin, (file, page, pos, len, fsdata)); + if (unlikely(ret)) + goto err1; + /* Success. Resorces will be released in write_end_dispatch */ + return 0; + err1: + reiser4_exit_context(ctx); + err2: + unlock_page(page); + put_page(page); + return ret; +} + +int reiser4_write_end_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned copied, + struct page *page, + void *fsdata) +{ + int ret; + reiser4_context *ctx; + struct inode *inode = page->mapping->host; + + assert("umka-3101", file != NULL); + assert("umka-3102", page != NULL); + assert("umka-3093", PageLocked(page)); + + ctx = get_current_context(); + + SetPageUptodate(page); + set_page_dirty_notag(page); + + ret = PROT_PASSIVE(int, write_end, (file, page, pos, copied, fsdata)); + put_page(page); + + /* don't commit transaction under inode semaphore */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return ret == 0 ? copied : ret; +} + +/* + * Dispatchers without protection + */ +int reiser4_setattr_dispatch(struct dentry *dentry, struct iattr *attr) +{ + return inode_file_plugin(dentry->d_inode)->setattr(dentry, attr); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/invert.c b/fs/reiser4/plugin/file/invert.c new file mode 100644 index 000000000000..73498787b74f --- /dev/null +++ b/fs/reiser4/plugin/file/invert.c @@ -0,0 +1,493 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Suppose you want to conveniently read and write a large variety of small files conveniently within a single emacs + buffer, without having a separate buffer for each 8 byte or so file. Inverts are the way to do that. An invert + provides you with the contents of a set of subfiles plus its own contents. It is a file which inherits other files + when you read it, and allows you to write to it and through it to the files that it inherits from. In order for it + to know which subfiles each part of your write should go into, there must be delimiters indicating that. It tries to + make that easy for you by providing those delimiters in what you read from it. + + When you read it, an invert performs an inverted assignment. Instead of taking an assignment command and writing a + bunch of files, it takes a bunch of files and composes an assignment command for you to read from it that if executed + would create those files. But which files? Well, that must be specified in the body of the invert using a special + syntax, and that specification is called the invert of the assignment. + + When written to, an invert performs the assignment command that is written + to it, and modifies its own body to contain the invert of that + assignment. + + In other words, writing to an invert file what you have read from it + is the identity operation. + + Malformed assignments cause write errors. Partial writes are not + supported in v4.0, but will be. + + Example: + + If an invert contains: + + /filenameA/<>+"(some text stored in the invert)+/filenameB/<> + +====================== +Each element in this definition should be an invert, and all files +should be called recursively - too. This is bad. If one of the +included files in not a regular or invert file, then we can't read +main file. + +I think to make it is possible easier: + +internal structure of invert file should be like symlink file. But +read and write method should be explitely indicated in i/o operation.. + +By default we read and write (if probably) as symlink and if we +specify ..invert at reading time that too we can specify it at write time. + +example: +/my_invert_file/..invert<- ( (/filenameA<-"(The contents of filenameA))+"(some text stored in the invert)+(/filenameB<-"(The contents of filenameB) ) ) +will create /my_invert_file as invert, and will creat /filenameA and /filenameB with specified body. + +read of /my_invert_file/..invert will be +/filenameA<-"(The contents of filenameA)+"(some text stored in the invert)+/filenameB<-"(The contents of filenameB) + +but read of /my_invert_file/ will be +The contents of filenameAsome text stored in the invertThe contents of filenameB + +we also can creat this file as +/my_invert_file/<-/filenameA+"(some text stored in the invert)+/filenameB +will create /my_invert_file , and use existing files /filenameA and /filenameB. + +and when we will read it will be as previously invert file. + +This is correct? + + vv +DEMIDOV-FIXME-HANS: + +Maybe you are right, but then you must disable writes to /my_invert_file/ and only allow writes to /my_invert_file/..invert + +Do you agree? Discuss it on reiserfs-list.... + +-Hans +======================= + + Then a read will return: + + /filenameA<-"(The contents of filenameA)+"(some text stored in the invert)+/filenameB<-"(The contents of filenameB) + + and a write of the line above to the invert will set the contents of + the invert and filenameA and filenameB to their original values. + + Note that the contents of an invert have no influence on the effect + of a write unless the write is a partial write (and a write of a + shorter file without using truncate first is a partial write). + + truncate() has no effect on filenameA and filenameB, it merely + resets the value of the invert. + + Writes to subfiles via the invert are implemented by preceding them + with truncates. + + Parse failures cause write failures. + + Questions to ponder: should the invert be acted on prior to file + close when writing to an open filedescriptor? + + Example: + + If an invert contains: + + "(This text and a pair of quotes are all that is here.) + +Then a read will return: + + "(This text and a pair of quotes are all that is here.) + +*/ + +/* OPEN method places a struct file in memory associated with invert body + and returns something like file descriptor to the user for the future access + to the invert file. + During opening we parse the body of invert and get a list of the 'entryes' + (that describes all its subfiles) and place pointer on the first struct in + reiserfs-specific part of invert inode (arbitrary decision). + + Each subfile is described by the struct inv_entry that has a pointer @sd on + in-core based stat-data and a pointer on struct file @f (if we find that the + subfile uses more then one unformated node (arbitrary decision), we load + struct file in memory, otherwise we load base stat-data (and maybe 1-2 bytes + of some other information we need) + + Since READ and WRITE methods for inverts were formulated in assignment + language, they don't contain arguments 'size' and 'offset' that make sense + only in ordinary read/write methods. + + READ method is a combination of two methods: + 1) ordinary read method (with offset=0, lenght = @f->...->i_size) for entries + with @f != 0, this method uses pointer on struct file as an argument + 2) read method for inode-less files with @sd != 0, this method uses + in-core based stat-data instead struct file as an argument. + in the first case we don't use pagecache, just copy data that we got after + cbk() into userspace. + + WRITE method for invert files is more complex. + Besides declared WRITE-interface in assignment languageb above we need + to have an opportunity to edit unwrapped body of invert file with some + text editor, it means we need GENERIC WRITE METHOD for invert file: + + my_invert_file/..invert <- "string" + + this method parses "string" and looks for correct subfile signatures, also + the parsing process splits this "string" on the set of flows in accordance + with the set of subfiles specified by this signarure. + The found list of signatures #S is compared with the opened one #I of invert + file. If it doesn't have this one (#I==0, it will be so for instance if we + have just create this invert file) the write method assignes found signature + (#I=#S;) to the invert file. Then if #I==#S, generic write method splits + itself to the some write methods for ordinary or light-weight, or call itself + recursively for invert files with corresponding flows. + I am not sure, but the list of signatures looks like what mr.Demidov means + by 'delimiters'. + + The cases when #S<#I (#I<#S) (in the sense of set-theory) are also available + and cause delete (create new) subfiles (arbitrary decision - it may looks + too complex, but this interface will be the completest). The order of entries + of list #S (#I) and inherited order on #I (#S) must coincide. + The other parsing results give malformed signature that aborts READ method + and releases all resources. + + Format of subfile (entry) signature: + + "START_MAGIC"<>(TYPE="...",LOOKUP_ARG="...")SUBFILE_BODY"END_MAGIC" + + Legend: + + START_MAGIC - keyword indicates the start of subfile signature; + + <> indicates the start of 'subfile metadata', that is the pair + (TYPE="...",LOOKUP_ARG="...") in parenthesis separated by comma. + + TYPE - the string "type" indicates the start of one of the three words: + - ORDINARY_FILE, + - LIGHT_WEIGHT_FILE, + - INVERT_FILE; + + LOOKUP_ARG - lookup argument depends on previous type: + */ + + /************************************************************/ + /* TYPE * LOOKUP ARGUMENT */ + /************************************************************/ + /* LIGH_WEIGHT_FILE * stat-data key */ + /************************************************************/ + /* ORDINARY_FILE * filename */ + /************************************************************/ + /* INVERT_FILE * filename */ + /************************************************************/ + + /* where: + *stat-data key - the string contains stat data key of this subfile, it will be + passed to fast-access lookup method for light-weight files; + *filename - pathname of this subfile, iyt well be passed to VFS lookup methods + for ordinary and invert files; + + SUBFILE_BODY - data of this subfile (it will go to the flow) + END_MAGIC - the keyword indicates the end of subfile signature. + + The other simbols inside the signature interpreted as 'unformatted content', + which is available with VFS's read_link() (arbitraruy decision). + + NOTE: Parse method for a body of invert file uses mentioned signatures _without_ + subfile bodies. + + Now the only unclear thing is WRITE in regular light-weight subfile A that we + can describe only in assignment language: + + A <- "some_string" + + I guess we don't want to change stat-data and body items of file A + if this file exist, and size(A) != size("some_string") because this operation is + expencive, so we only do the partial write if size(A) > size("some_string") + and do truncate of the "some_string", and then do A <- "truncated string", if + size(A) < size("some_string"). This decision is also arbitrary.. + */ + +/* here is infrastructure for formated flows */ + +#define SUBFILE_HEADER_MAGIC 0x19196605 +#define FLOW_HEADER_MAGIC 0x01194304 + +#include "../plugin.h" +#include "../../debug.h" +#include "../../forward.h" +#include "../object.h" +#include "../item/item.h" +#include "../item/static_stat.h" +#include "../../dformat.h" +#include "../znode.h" +#include "../inode.h" + +#include +#include /* for struct file */ +#include /* for struct list_head */ + +typedef enum { + LIGHT_WEIGHT_FILE, + ORDINARY_FILE, + INVERT_FILE +} inv_entry_type; + +typedef struct flow_header { + d32 fl_magic; + d16 fl_nr; /* number of subfiles in the flow */ +}; + +typedef struct subfile_header { + d32 sh_magic; /* subfile magic */ + d16 sh_type; /* type of subfile: light-weight, ordinary, invert */ + d16 sh_arg_len; /* lenght of lookup argument (filename, key) */ + d32 sh_body_len; /* lenght of subfile body */ +}; + +/* functions to get/set fields of flow header */ + +static void fl_set_magic(flow_header * fh, __u32 value) +{ + cputod32(value, &fh->fh_magic); +} + +static __u32 fl_get_magic(flow_header * fh) +{ + return d32tocpu(&fh->fh_magic); +} +static void fl_set_number(flow_header * fh, __u16 value) +{ + cputod16(value, &fh->fh_nr); +} +static unsigned fl_get_number(flow_header * fh) +{ + return d16tocpu(&fh->fh_nr); +} + +/* functions to get/set fields of subfile header */ + +static void sh_set_magic(subfile_header * sh, __u32 value) +{ + cputod32(value, &sh->sh_magic); +} + +static __u32 sh_get_magic(subfile_header * sh) +{ + return d32tocpu(&sh->sh_magic); +} +static void sh_set_type(subfile_header * sh, __u16 value) +{ + cputod16(value, &sh->sh_magic); +} +static unsigned sh_get_type(subfile_header * sh) +{ + return d16tocpu(&sh->sh_magic); +} +static void sh_set_arg_len(subfile_header * sh, __u16 value) +{ + cputod16(value, &sh->sh_arg_len); +} +static unsigned sh_get_arg_len(subfile_header * sh) +{ + return d16tocpu(&sh->sh_arg_len); +} +static void sh_set_body_len(subfile_header * sh, __u32 value) +{ + cputod32(value, &sh->sh_body_len); +} + +static __u32 sh_get_body_len(subfile_header * sh) +{ + return d32tocpu(&sh->sh_body_len); +} + +/* in-core minimal stat-data, light-weight analog of inode */ + +struct incore_sd_base { + umode_t isd_mode; + nlink_t isd_nlink; + loff_t isd_size; + char *isd_data; /* 'subflow' to write */ +}; + +/* open invert create a list of invert entries, + every entry is represented by structure inv_entry */ + +struct inv_entry { + struct list_head *ie_list; + struct file *ie_file; /* this is NULL if the file doesn't + have unformated nodes */ + struct incore_sd_base *ie_sd; /* inode-less analog of struct file */ +}; + +/* allocate and init invert entry */ + +static struct inv_entry *allocate_inv_entry(void) +{ + struct inv_entry *inv_entry; + + inv_entry = reiser4_kmalloc(sizeof(struct inv_entry), GFP_KERNEL); + if (!inv_entry) + return ERR_PTR(RETERR(-ENOMEM)); + inv_entry->ie_file = NULL; + inv_entry->ie_sd = NULL; + INIT_LIST_HEAD(&inv_entry->ie_list); + return inv_entry; +} + +static int put_inv_entry(struct inv_entry *ientry) +{ + int result = 0; + + assert("edward-96", ientry != NULL); + assert("edward-97", ientry->ie_list != NULL); + + list_del(ientry->ie_list); + if (ientry->ie_sd != NULL) { + kfree(ientry->ie_sd); + kfree(ientry); + } + if (ientry->ie_file != NULL) + result = filp_close(ientry->file, NULL); + return result; +} + +static int allocate_incore_sd_base(struct inv_entry *inv_entry) +{ + struct incore_sd_base *isd_base assert("edward-98", inv_entry != NULL); + assert("edward-99", inv_entry->ie_inode = NULL); + assert("edward-100", inv_entry->ie_sd = NULL); + + isd_base = reiser4_kmalloc(sizeof(struct incore_sd_base), GFP_KERNEL); + if (!isd_base) + return RETERR(-ENOMEM); + inv_entry->ie_sd = isd_base; + return 0; +} + +/* this can be installed as ->init_inv_entry () method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). + Copies data from on-disk stat-data format into light-weight analog of inode . + Doesn't hanlde stat-data extensions. */ + +static void sd_base_load(struct inv_entry *inv_entry, char *sd) +{ + reiser4_stat_data_base *sd_base; + + assert("edward-101", inv_entry != NULL); + assert("edward-101", inv_entry->ie_sd != NULL); + assert("edward-102", sd != NULL); + + sd_base = (reiser4_stat_data_base *) sd; + inv_entry->incore_sd_base->isd_mode = d16tocpu(&sd_base->mode); + inv_entry->incore_sd_base->isd_nlink = d32tocpu(&sd_base->nlink); + inv_entry->incore_sd_base->isd_size = d64tocpu(&sd_base->size); + inv_entry->incore_sd_base->isd_data = NULL; +} + +/* initialise incore stat-data */ + +static void init_incore_sd_base(struct inv_entry *inv_entry, coord_t * coord) +{ + reiser4_plugin *plugin = item_plugin_by_coord(coord); + void *body = item_body_by_coord(coord); + + assert("edward-103", inv_entry != NULL); + assert("edward-104", plugin != NULL); + assert("edward-105", body != NULL); + + sd_base_load(inv_entry, body); +} + +/* takes a key or filename and allocates new invert_entry, + init and adds it into the list, + we use lookup_sd_by_key() for light-weight files and VFS lookup by filename */ + +int get_inv_entry(struct inode *invert_inode, /* inode of invert's body */ + inv_entry_type type, /* LIGHT-WEIGHT or ORDINARY */ + const reiser4_key * key, /* key of invert entry stat-data */ + char *filename, /* filename of the file to be opened */ + int flags, int mode) +{ + int result; + struct inv_entry *ientry; + + assert("edward-107", invert_inode != NULL); + + ientry = allocate_inv_entry(); + if (IS_ERR(ientry)) + return (PTR_ERR(ientry)); + + if (type == LIGHT_WEIGHT_FILE) { + coord_t coord; + lock_handle lh; + + assert("edward-108", key != NULL); + + init_coord(&coord); + init_lh(&lh); + result = + lookup_sd_by_key(reiser4_tree_by_inode(invert_inode), + ZNODE_READ_LOCK, &coord, &lh, key); + if (result == 0) + init_incore_sd_base(ientry, coord); + + done_lh(&lh); + done_coord(&coord); + return (result); + } else { + struct file *file = filp_open(filename, flags, mode); + /* FIXME_EDWARD here we need to check if we + did't follow to any mount point */ + + assert("edward-108", filename != NULL); + + if (IS_ERR(file)) + return (PTR_ERR(file)); + ientry->ie_file = file; + return 0; + } +} + +/* takes inode of invert, reads the body of this invert, parses it, + opens all invert entries and return pointer on the first inv_entry */ + +struct inv_entry *open_invert(struct file *invert_file) +{ + +} + +ssize_t subfile_read(struct *invert_entry, flow * f) +{ + +} + +ssize_t subfile_write(struct *invert_entry, flow * f) +{ + +} + +ssize_t invert_read(struct *file, flow * f) +{ + +} + +ssize_t invert_write(struct *file, flow * f) +{ + +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/symfile.c b/fs/reiser4/plugin/file/symfile.c new file mode 100644 index 000000000000..814dfb8b2cf8 --- /dev/null +++ b/fs/reiser4/plugin/file/symfile.c @@ -0,0 +1,87 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Symfiles are a generalization of Unix symlinks. + + A symfile when read behaves as though you took its contents and + substituted them into the reiser4 naming system as the right hand side + of an assignment, and then read that which you had assigned to it. + + A key issue for symfiles is how to implement writes through to + subfiles. In general, one must have some method of determining what + of that which is written to the symfile is written to what subfile. + This can be done by use of custom plugin methods written by users, or + by using a few general methods we provide for those willing to endure + the insertion of delimiters into what is read. + + Writing to symfiles without delimiters to denote what is written to + what subfile is not supported by any plugins we provide in this + release. Our most sophisticated support for writes is that embodied + by the invert plugin (see invert.c). + + A read only version of the /etc/passwd file might be + constructed as a symfile whose contents are as follows: + + /etc/passwd/userlines/* + + or + + /etc/passwd/userlines/demidov+/etc/passwd/userlines/edward+/etc/passwd/userlines/reiser+/etc/passwd/userlines/root + + or + + /etc/passwd/userlines/(demidov+edward+reiser+root) + + A symfile with contents + + /filenameA+"(some text stored in the uninvertable symfile)+/filenameB + + will return when read + + The contents of filenameAsome text stored in the uninvertable symfileThe contents of filenameB + + and write of what has been read will not be possible to implement as + an identity operation because there are no delimiters denoting the + boundaries of what is to be written to what subfile. + + Note that one could make this a read/write symfile if one specified + delimiters, and the write method understood those delimiters delimited + what was written to subfiles. + + So, specifying the symfile in a manner that allows writes: + + /etc/passwd/userlines/demidov+"( + )+/etc/passwd/userlines/edward+"( + )+/etc/passwd/userlines/reiser+"( + )+/etc/passwd/userlines/root+"( + ) + + or + + /etc/passwd/userlines/(demidov+"( + )+edward+"( + )+reiser+"( + )+root+"( + )) + + and the file demidov might be specified as: + + /etc/passwd/userlines/demidov/username+"(:)+/etc/passwd/userlines/demidov/password+"(:)+/etc/passwd/userlines/demidov/userid+"(:)+/etc/passwd/userlines/demidov/groupid+"(:)+/etc/passwd/userlines/demidov/gecos+"(:)+/etc/passwd/userlines/demidov/home+"(:)+/etc/passwd/userlines/demidov/shell + + or + + /etc/passwd/userlines/demidov/(username+"(:)+password+"(:)+userid+"(:)+groupid+"(:)+gecos+"(:)+home+"(:)+shell) + + Notice that if the file demidov has a carriage return in it, the + parsing fails, but then if you put carriage returns in the wrong place + in a normal /etc/passwd file it breaks things also. + + Note that it is forbidden to have no text between two interpolations + if one wants to be able to define what parts of a write go to what + subfiles referenced in an interpolation. + + If one wants to be able to add new lines by writing to the file, one + must either write a custom plugin for /etc/passwd that knows how to + name an added line, or one must use an invert, or one must use a more + sophisticated symfile syntax that we are not planning to write for + version 4.0. +*/ diff --git a/fs/reiser4/plugin/file/symlink.c b/fs/reiser4/plugin/file/symlink.c new file mode 100644 index 000000000000..bcf3ef80c4dc --- /dev/null +++ b/fs/reiser4/plugin/file/symlink.c @@ -0,0 +1,95 @@ +/* Copyright 2002, 2003, 2005 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../inode.h" + +#include +#include + +/* file plugin methods specific for symlink files + (SYMLINK_FILE_PLUGIN_ID) */ + +/* this is implementation of create_object method of file plugin for + SYMLINK_FILE_PLUGIN_ID + */ + +/** + * reiser4_create_symlink - create_object of file plugin for SYMLINK_FILE_PLUGIN_ID + * @symlink: inode of symlink object + * @dir: inode of parent directory + * @info: parameters of new object + * + * Inserts stat data with symlink extension where into the tree. + */ +int reiser4_create_symlink(struct inode *symlink, + struct inode *dir UNUSED_ARG, + reiser4_object_create_data *data /* info passed to us + * this is filled by + * reiser4() syscall + * in particular */) +{ + int result; + + assert("nikita-680", symlink != NULL); + assert("nikita-681", S_ISLNK(symlink->i_mode)); + assert("nikita-685", reiser4_inode_get_flag(symlink, REISER4_NO_SD)); + assert("nikita-682", dir != NULL); + assert("nikita-684", data != NULL); + assert("nikita-686", data->id == SYMLINK_FILE_PLUGIN_ID); + + /* + * stat data of symlink has symlink extension in which we store + * symlink content, that is, path symlink is pointing to. + */ + reiser4_inode_data(symlink)->extmask |= (1 << SYMLINK_STAT); + + assert("vs-838", symlink->i_private == NULL); + symlink->i_private = (void *)data->name; + + assert("vs-843", symlink->i_size == 0); + INODE_SET_FIELD(symlink, i_size, strlen(data->name)); + + /* insert stat data appended with data->name */ + result = inode_file_plugin(symlink)->write_sd_by_inode(symlink); + if (result) { + /* FIXME-VS: Make sure that symlink->i_private is not attached + to kmalloced data */ + INODE_SET_FIELD(symlink, i_size, 0); + } else { + assert("vs-849", symlink->i_private + && reiser4_inode_get_flag(symlink, + REISER4_GENERIC_PTR_USED)); + assert("vs-850", + !memcmp((char *)symlink->i_private, data->name, + (size_t) symlink->i_size + 1)); + } + return result; +} + +/* this is implementation of destroy_inode method of file plugin for + SYMLINK_FILE_PLUGIN_ID + */ +void destroy_inode_symlink(struct inode *inode) +{ + assert("edward-799", + inode_file_plugin(inode) == + file_plugin_by_id(SYMLINK_FILE_PLUGIN_ID)); + assert("edward-800", !is_bad_inode(inode) && is_inode_loaded(inode)); + assert("edward-801", reiser4_inode_get_flag(inode, + REISER4_GENERIC_PTR_USED)); + assert("vs-839", S_ISLNK(inode->i_mode)); + + kfree(inode->i_private); + inode->i_private = NULL; + reiser4_inode_clr_flag(inode, REISER4_GENERIC_PTR_USED); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/tail_conversion.c b/fs/reiser4/plugin/file/tail_conversion.c new file mode 100644 index 000000000000..a21e464845a4 --- /dev/null +++ b/fs/reiser4/plugin/file/tail_conversion.c @@ -0,0 +1,763 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../inode.h" +#include "../../super.h" +#include "../../page_cache.h" +#include "../../carry.h" +#include "../../safe_link.h" +#include "../../vfs_ops.h" + +#include + +/* this file contains: + tail2extent and extent2tail */ + +/* exclusive access to a file is acquired when file state changes: tail2extent, empty2tail, extent2tail, etc */ +void get_exclusive_access(struct unix_file_info * uf_info) +{ + assert("nikita-3028", reiser4_schedulable()); + assert("nikita-3047", LOCK_CNT_NIL(inode_sem_w)); + assert("nikita-3048", LOCK_CNT_NIL(inode_sem_r)); + /* + * "deadlock avoidance": sometimes we commit a transaction under + * rw-semaphore on a file. Such commit can deadlock with another + * thread that captured some block (hence preventing atom from being + * committed) and waits on rw-semaphore. + */ + reiser4_txn_restart_current(); + LOCK_CNT_INC(inode_sem_w); + down_write(&uf_info->latch); + uf_info->exclusive_use = 1; + assert("vs-1713", uf_info->ea_owner == NULL); + assert("vs-1713", atomic_read(&uf_info->nr_neas) == 0); + ON_DEBUG(uf_info->ea_owner = current); +} + +void drop_exclusive_access(struct unix_file_info * uf_info) +{ + assert("vs-1714", uf_info->ea_owner == current); + assert("vs-1715", atomic_read(&uf_info->nr_neas) == 0); + ON_DEBUG(uf_info->ea_owner = NULL); + uf_info->exclusive_use = 0; + up_write(&uf_info->latch); + assert("nikita-3049", LOCK_CNT_NIL(inode_sem_r)); + assert("nikita-3049", LOCK_CNT_GTZ(inode_sem_w)); + LOCK_CNT_DEC(inode_sem_w); + reiser4_txn_restart_current(); +} + +/** + * nea_grabbed - do something when file semaphore is down_read-ed + * @uf_info: + * + * This is called when nonexclisive access is obtained on file. All it does is + * for debugging purposes. + */ +static void nea_grabbed(struct unix_file_info *uf_info) +{ +#if REISER4_DEBUG + LOCK_CNT_INC(inode_sem_r); + assert("vs-1716", uf_info->ea_owner == NULL); + atomic_inc(&uf_info->nr_neas); + uf_info->last_reader = current; +#endif +} + +/** + * get_nonexclusive_access - get nonexclusive access to a file + * @uf_info: unix file specific part of inode to obtain access to + * + * Nonexclusive access is obtained on a file before read, write, readpage. + */ +void get_nonexclusive_access(struct unix_file_info *uf_info) +{ + assert("nikita-3029", reiser4_schedulable()); + assert("nikita-3361", get_current_context()->trans->atom == NULL); + + down_read(&uf_info->latch); + nea_grabbed(uf_info); +} + +/** + * try_to_get_nonexclusive_access - try to get nonexclusive access to a file + * @uf_info: unix file specific part of inode to obtain access to + * + * Non-blocking version of nonexclusive access obtaining. + */ +int try_to_get_nonexclusive_access(struct unix_file_info *uf_info) +{ + int result; + + result = down_read_trylock(&uf_info->latch); + if (result) + nea_grabbed(uf_info); + return result; +} + +void drop_nonexclusive_access(struct unix_file_info * uf_info) +{ + assert("vs-1718", uf_info->ea_owner == NULL); + assert("vs-1719", atomic_read(&uf_info->nr_neas) > 0); + ON_DEBUG(atomic_dec(&uf_info->nr_neas)); + + up_read(&uf_info->latch); + + LOCK_CNT_DEC(inode_sem_r); + reiser4_txn_restart_current(); +} + +/* part of tail2extent. Cut all items covering @count bytes starting from + @offset */ +/* Audited by: green(2002.06.15) */ +static int cut_formatting_items(struct inode *inode, loff_t offset, int count) +{ + reiser4_key from, to; + + /* AUDIT: How about putting an assertion here, what would check + all provided range is covered by tail items only? */ + /* key of first byte in the range to be cut */ + inode_file_plugin(inode)->key_by_inode(inode, offset, &from); + + /* key of last byte in that range */ + to = from; + set_key_offset(&to, (__u64) (offset + count - 1)); + + /* cut everything between those keys */ + return reiser4_cut_tree(reiser4_tree_by_inode(inode), &from, &to, + inode, 0); +} + +static void release_all_pages(struct page **pages, unsigned nr_pages) +{ + unsigned i; + + for (i = 0; i < nr_pages; i++) { + if (pages[i] == NULL) { +#if REISER4_DEBUG + unsigned j; + for (j = i + 1; j < nr_pages; j++) + assert("vs-1620", pages[j] == NULL); +#endif + break; + } + put_page(pages[i]); + pages[i] = NULL; + } +} + +/* part of tail2extent. replace tail items with extent one. Content of tail + items (@count bytes) being cut are copied already into + pages. extent_writepage method is called to create extents corresponding to + those pages */ +static int replace(struct inode *inode, struct page **pages, unsigned nr_pages, int count) +{ + int result; + unsigned i; + STORE_COUNTERS; + + if (nr_pages == 0) + return 0; + + assert("vs-596", pages[0]); + + /* cut copied items */ + result = cut_formatting_items(inode, page_offset(pages[0]), count); + if (result) + return result; + + CHECK_COUNTERS; + + /* put into tree replacement for just removed items: extent item, namely */ + for (i = 0; i < nr_pages; i++) { + result = add_to_page_cache_lru(pages[i], inode->i_mapping, + pages[i]->index, + mapping_gfp_mask(inode-> + i_mapping)); + if (result) + break; + SetPageUptodate(pages[i]); + set_page_dirty_notag(pages[i]); + unlock_page(pages[i]); + result = find_or_create_extent(pages[i]); + if (result) { + /* + * Unsuccess in critical place: + * tail has been removed, + * but extent hasn't been created + */ + warning("edward-1572", + "Report the error code %i to developers. Run FSCK", + result); + break; + } + } + return result; +} + +#define TAIL2EXTENT_PAGE_NUM 3 /* number of pages to fill before cutting tail + * items */ + +static int reserve_tail2extent_iteration(struct inode *inode) +{ + reiser4_block_nr unformatted_nodes; + reiser4_tree *tree; + + tree = reiser4_tree_by_inode(inode); + + /* number of unformatted nodes which will be created */ + unformatted_nodes = TAIL2EXTENT_PAGE_NUM; + + /* + * space required for one iteration of extent->tail conversion: + * + * 1. kill N tail items + * + * 2. insert TAIL2EXTENT_PAGE_NUM unformatted nodes + * + * 3. insert TAIL2EXTENT_PAGE_NUM (worst-case single-block + * extents) extent units. + * + * 4. drilling to the leaf level by coord_by_key() + * + * 5. possible update of stat-data + * + */ + grab_space_enable(); + return reiser4_grab_space + (2 * tree->height + + TAIL2EXTENT_PAGE_NUM + + TAIL2EXTENT_PAGE_NUM * estimate_one_insert_into_item(tree) + + 1 + estimate_one_insert_item(tree) + + inode_file_plugin(inode)->estimate.update(inode), BA_CAN_COMMIT); +} + +/* clear stat data's flag indicating that conversion is being converted */ +static int complete_conversion(struct inode *inode) +{ + int result; + + grab_space_enable(); + result = + reiser4_grab_space(inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); + if (result == 0) { + reiser4_inode_clr_flag(inode, REISER4_PART_MIXED); + result = reiser4_update_sd(inode); + } + if (result) + warning("vs-1696", "Failed to clear converting bit of %llu: %i", + (unsigned long long)get_inode_oid(inode), result); + return 0; +} + +/** + * find_start + * @inode: + * @id: + * @offset: + * + * this is used by tail2extent and extent2tail to detect where previous + * uncompleted conversion stopped + */ +static int find_start(struct inode *inode, reiser4_plugin_id id, __u64 *offset) +{ + int result; + lock_handle lh; + coord_t coord; + struct unix_file_info *ufo; + int found; + reiser4_key key; + + ufo = unix_file_inode_data(inode); + init_lh(&lh); + result = 0; + found = 0; + inode_file_plugin(inode)->key_by_inode(inode, *offset, &key); + do { + init_lh(&lh); + result = find_file_item_nohint(&coord, &lh, &key, + ZNODE_READ_LOCK, inode); + + if (result == CBK_COORD_FOUND) { + if (coord.between == AT_UNIT) { + /*coord_clear_iplug(&coord); */ + result = zload(coord.node); + if (result == 0) { + if (item_id_by_coord(&coord) == id) + found = 1; + else + item_plugin_by_coord(&coord)->s. + file.append_key(&coord, + &key); + zrelse(coord.node); + } + } else + result = RETERR(-ENOENT); + } + done_lh(&lh); + } while (result == 0 && !found); + *offset = get_key_offset(&key); + return result; +} + +/** + * tail2extent + * @uf_info: + * + * + */ +int tail2extent(struct unix_file_info *uf_info) +{ + int result; + reiser4_key key; /* key of next byte to be moved to page */ + char *p_data; /* data of page */ + unsigned page_off = 0, /* offset within the page where to copy data */ + count; /* number of bytes of item which can be + * copied to page */ + struct page *pages[TAIL2EXTENT_PAGE_NUM]; + struct page *page; + int done; /* set to 1 when all file is read */ + char *item; + int i; + struct inode *inode; + int first_iteration; + int bytes; + __u64 offset; + + assert("nikita-3362", ea_obtained(uf_info)); + inode = unix_file_info_to_inode(uf_info); + assert("nikita-3412", !IS_RDONLY(inode)); + assert("vs-1649", uf_info->container != UF_CONTAINER_EXTENTS); + assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)); + + offset = 0; + first_iteration = 1; + result = 0; + if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + /* + * file is marked on disk as there was a conversion which did + * not complete due to either crash or some error. Find which + * offset tail conversion stopped at + */ + result = find_start(inode, FORMATTING_ID, &offset); + if (result == -ENOENT) { + /* no tail items found, everything is converted */ + uf_info->container = UF_CONTAINER_EXTENTS; + complete_conversion(inode); + return 0; + } else if (result != 0) + /* some other error */ + return result; + first_iteration = 0; + } + + reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV); + + /* get key of first byte of a file */ + inode_file_plugin(inode)->key_by_inode(inode, offset, &key); + + done = 0; + while (done == 0) { + memset(pages, 0, sizeof(pages)); + result = reserve_tail2extent_iteration(inode); + if (result != 0) { + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + goto out; + } + if (first_iteration) { + reiser4_inode_set_flag(inode, REISER4_PART_MIXED); + reiser4_update_sd(inode); + first_iteration = 0; + } + bytes = 0; + for (i = 0; i < sizeof_array(pages) && done == 0; i++) { + assert("vs-598", + (get_key_offset(&key) & ~PAGE_MASK) == 0); + page = alloc_page(reiser4_ctx_gfp_mask_get()); + if (!page) { + result = RETERR(-ENOMEM); + goto error; + } + + page->index = + (unsigned long)(get_key_offset(&key) >> + PAGE_SHIFT); + /* + * usually when one is going to longterm lock znode (as + * find_file_item does, for instance) he must not hold + * locked pages. However, there is an exception for + * case tail2extent. Pages appearing here are not + * reachable to everyone else, they are clean, they do + * not have jnodes attached so keeping them locked do + * not risk deadlock appearance + */ + assert("vs-983", !PagePrivate(page)); + reiser4_invalidate_pages(inode->i_mapping, page->index, + 1, 0); + + for (page_off = 0; page_off < PAGE_SIZE;) { + coord_t coord; + lock_handle lh; + + /* get next item */ + /* FIXME: we might want to readahead here */ + init_lh(&lh); + result = + find_file_item_nohint(&coord, &lh, &key, + ZNODE_READ_LOCK, + inode); + if (result != CBK_COORD_FOUND) { + /* + * error happened of not items of file + * were found + */ + done_lh(&lh); + put_page(page); + goto error; + } + + if (coord.between == AFTER_UNIT) { + /* + * end of file is reached. Padd page + * with zeros + */ + done_lh(&lh); + done = 1; + p_data = kmap_atomic(page); + memset(p_data + page_off, 0, + PAGE_SIZE - page_off); + kunmap_atomic(p_data); + break; + } + + result = zload(coord.node); + if (result) { + put_page(page); + done_lh(&lh); + goto error; + } + assert("vs-856", coord.between == AT_UNIT); + item = ((char *)item_body_by_coord(&coord)) + + coord.unit_pos; + + /* how many bytes to copy */ + count = + item_length_by_coord(&coord) - + coord.unit_pos; + /* limit length of copy to end of page */ + if (count > PAGE_SIZE - page_off) + count = PAGE_SIZE - page_off; + + /* + * copy item (as much as will fit starting from + * the beginning of the item) into the page + */ + p_data = kmap_atomic(page); + memcpy(p_data + page_off, item, count); + kunmap_atomic(p_data); + + page_off += count; + bytes += count; + set_key_offset(&key, + get_key_offset(&key) + count); + + zrelse(coord.node); + done_lh(&lh); + } /* end of loop which fills one page by content of + * formatting items */ + + if (page_off) { + /* something was copied into page */ + pages[i] = page; + } else { + put_page(page); + assert("vs-1648", done == 1); + break; + } + } /* end of loop through pages of one conversion iteration */ + + if (i > 0) { + result = replace(inode, pages, i, bytes); + release_all_pages(pages, sizeof_array(pages)); + if (result) + goto error; + /* + * We have to drop exclusive access to avoid deadlock + * which may happen because called by reiser4_writepages + * capture_unix_file requires to get non-exclusive + * access to a file. It is safe to drop EA in the middle + * of tail2extent conversion because write_unix_file, + * setattr_unix_file(truncate), mmap_unix_file, + * release_unix_file(extent2tail) checks if conversion + * is not in progress (see comments before + * get_exclusive_access_careful(). + * Other processes that acquire non-exclusive access + * (read_unix_file, reiser4_writepages, etc) should work + * on partially converted files. + */ + drop_exclusive_access(uf_info); + /* throttle the conversion */ + reiser4_throttle_write(inode); + get_exclusive_access(uf_info); + + /* + * nobody is allowed to complete conversion but a + * process which started it + */ + assert("", reiser4_inode_get_flag(inode, + REISER4_PART_MIXED)); + } + } + if (result == 0) { + /* file is converted to extent items */ + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + assert("vs-1697", reiser4_inode_get_flag(inode, + REISER4_PART_MIXED)); + + uf_info->container = UF_CONTAINER_EXTENTS; + complete_conversion(inode); + } else { + /* + * conversion is not complete. Inode was already marked as + * REISER4_PART_MIXED and stat-data were updated at the first + * iteration of the loop above. + */ + error: + release_all_pages(pages, sizeof_array(pages)); + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + warning("edward-1548", "Partial conversion of %llu: %i", + (unsigned long long)get_inode_oid(inode), result); + } + + out: + /* this flag should be cleared, otherwise get_exclusive_access_careful() + will fall into infinite loop */ + assert("edward-1549", !reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)); + return result; +} + +static int reserve_extent2tail_iteration(struct inode *inode) +{ + reiser4_tree *tree; + + tree = reiser4_tree_by_inode(inode); + /* + * reserve blocks for (in this order): + * + * 1. removal of extent item + * + * 2. insertion of tail by insert_flow() + * + * 3. drilling to the leaf level by coord_by_key() + * + * 4. possible update of stat-data + */ + grab_space_enable(); + return reiser4_grab_space + (estimate_one_item_removal(tree) + + estimate_insert_flow(tree->height) + + 1 + estimate_one_insert_item(tree) + + inode_file_plugin(inode)->estimate.update(inode), BA_CAN_COMMIT); +} + +/* for every page of file: read page, cut part of extent pointing to this page, + put data of page tree by tail item */ +int extent2tail(struct file * file, struct unix_file_info *uf_info) +{ + int result; + struct inode *inode; + struct page *page; + unsigned long num_pages, i; + unsigned long start_page; + reiser4_key from; + reiser4_key to; + unsigned count; + __u64 offset; + + assert("nikita-3362", ea_obtained(uf_info)); + inode = unix_file_info_to_inode(uf_info); + assert("nikita-3412", !IS_RDONLY(inode)); + assert("vs-1649", uf_info->container != UF_CONTAINER_TAILS); + assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)); + + offset = 0; + if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + /* + * file is marked on disk as there was a conversion which did + * not complete due to either crash or some error. Find which + * offset tail conversion stopped at + */ + result = find_start(inode, EXTENT_POINTER_ID, &offset); + if (result == -ENOENT) { + /* no extent found, everything is converted */ + uf_info->container = UF_CONTAINER_TAILS; + complete_conversion(inode); + return 0; + } else if (result != 0) + /* some other error */ + return result; + } + reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV); + + /* number of pages in the file */ + num_pages = + (inode->i_size + - offset + PAGE_SIZE - 1) >> PAGE_SHIFT; + start_page = offset >> PAGE_SHIFT; + + inode_file_plugin(inode)->key_by_inode(inode, offset, &from); + to = from; + + result = 0; + for (i = 0; i < num_pages; i++) { + __u64 start_byte; + + result = reserve_extent2tail_iteration(inode); + if (result != 0) + break; + if (i == 0 && offset == 0) { + reiser4_inode_set_flag(inode, REISER4_PART_MIXED); + reiser4_update_sd(inode); + } + + page = read_mapping_page(inode->i_mapping, + (unsigned)(i + start_page), NULL); + if (IS_ERR(page)) { + result = PTR_ERR(page); + warning("edward-1569", + "Can not read page %lu of %lu: %i", + i, num_pages, result); + break; + } + + wait_on_page_locked(page); + + if (!PageUptodate(page)) { + put_page(page); + result = RETERR(-EIO); + break; + } + + /* cut part of file we have read */ + start_byte = (__u64) ((i + start_page) << PAGE_SHIFT); + set_key_offset(&from, start_byte); + set_key_offset(&to, start_byte + PAGE_SIZE - 1); + /* + * reiser4_cut_tree_object() returns -E_REPEAT to allow atom + * commits during over-long truncates. But + * extent->tail conversion should be performed in one + * transaction. + */ + result = reiser4_cut_tree(reiser4_tree_by_inode(inode), &from, + &to, inode, 0); + + if (result) { + put_page(page); + warning("edward-1570", + "Can not delete converted chunk: %i", + result); + break; + } + + /* put page data into tree via tail_write */ + count = PAGE_SIZE; + if ((i == (num_pages - 1)) && + (inode->i_size & ~PAGE_MASK)) + /* last page can be incompleted */ + count = (inode->i_size & ~PAGE_MASK); + while (count) { + loff_t pos = start_byte; + + assert("edward-1537", + file != NULL && file->f_path.dentry != NULL); + assert("edward-1538", + file_inode(file) == inode); + + result = reiser4_write_tail_noreserve(file, inode, + (char __user *)kmap(page), + count, &pos); + kunmap(page); + /* FIXME: + may be put_file_hint() instead ? */ + reiser4_free_file_fsdata(file); + if (result <= 0) { + /* + * Unsuccess in critical place: + * extent has been removed, + * but tail hasn't been created + */ + warning("edward-1571", + "Report the error code %i to developers. Run FSCK", + result); + put_page(page); + reiser4_inode_clr_flag(inode, + REISER4_PART_IN_CONV); + return result; + } + count -= result; + } + + /* release page */ + lock_page(page); + /* page is already detached from jnode and mapping. */ + assert("vs-1086", page->mapping == NULL); + assert("nikita-2690", + (!PagePrivate(page) && jprivate(page) == 0)); + /* waiting for writeback completion with page lock held is + * perfectly valid. */ + wait_on_page_writeback(page); + reiser4_drop_page(page); + /* release reference taken by read_cache_page() above */ + put_page(page); + + drop_exclusive_access(uf_info); + /* throttle the conversion */ + reiser4_throttle_write(inode); + get_exclusive_access(uf_info); + /* + * nobody is allowed to complete conversion but a process which + * started it + */ + assert("", reiser4_inode_get_flag(inode, REISER4_PART_MIXED)); + } + + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + + if (i == num_pages) { + /* file is converted to formatted items */ + assert("vs-1698", reiser4_inode_get_flag(inode, + REISER4_PART_MIXED)); + assert("vs-1260", + inode_has_no_jnodes(reiser4_inode_data(inode))); + + uf_info->container = UF_CONTAINER_TAILS; + complete_conversion(inode); + return 0; + } + /* + * conversion is not complete. Inode was already marked as + * REISER4_PART_MIXED and stat-data were updated at the first + * iteration of the loop above. + */ + warning("nikita-2282", + "Partial conversion of %llu: %lu of %lu: %i", + (unsigned long long)get_inode_oid(inode), i, + num_pages, result); + + /* this flag should be cleared, otherwise get_exclusive_access_careful() + will fall into infinite loop */ + assert("edward-1550", !reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)); + return result; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/file_ops.c b/fs/reiser4/plugin/file_ops.c new file mode 100644 index 000000000000..fbdb8c365e5a --- /dev/null +++ b/fs/reiser4/plugin/file_ops.c @@ -0,0 +1,119 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + reiser4/README */ + +/* this file contains typical implementations for some of methods of + struct file_operations and of struct address_space_operations +*/ + +#include "../inode.h" +#include "object.h" + +/* file operations */ + +/* implementation of vfs's llseek method of struct file_operations for + typical directory can be found in file_ops_readdir.c +*/ +loff_t reiser4_llseek_dir_common(struct file *, loff_t, int origin); + +/* implementation of vfs's iterate method of struct file_operations for + typical directory can be found in file_ops_readdir.c +*/ +int reiser4_iterate_common(struct file *, struct dir_context *); + +/** + * reiser4_release_dir_common - release of struct file_operations + * @inode: inode of released file + * @file: file to release + * + * Implementation of release method of struct file_operations for typical + * directory. All it does is freeing of reiser4 specific file data. +*/ +int reiser4_release_dir_common(struct inode *inode, struct file *file) +{ + reiser4_context *ctx; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + reiser4_free_file_fsdata(file); + reiser4_exit_context(ctx); + return 0; +} + +/* this is common implementation of vfs's fsync method of struct + file_operations +*/ +int reiser4_sync_common(struct file *file, loff_t start, + loff_t end, int datasync) +{ + reiser4_context *ctx; + int result; + struct dentry *dentry = file->f_path.dentry; + + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + result = txnmgr_force_commit_all(dentry->d_inode->i_sb, 0); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* + * common sync method for regular files. + * + * We are trying to be smart here. Instead of committing all atoms (original + * solution), we scan dirty pages of this file and commit all atoms they are + * part of. + * + * Situation is complicated by anonymous pages: i.e., extent-less pages + * dirtied through mmap. Fortunately sys_fsync() first calls + * filemap_fdatawrite() that will ultimately call reiser4_writepages_dispatch, + * insert all missing extents and capture anonymous pages. + */ +int reiser4_sync_file_common(struct file *file, loff_t start, loff_t end, int datasync) +{ + reiser4_context *ctx; + txn_atom *atom; + reiser4_block_nr reserve; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = file->f_mapping->host; + + int err = filemap_write_and_wait_range(file->f_mapping->host->i_mapping, start, end); + if (err) + return err; + + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + inode_lock(inode); + + reserve = estimate_update_common(dentry->d_inode); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) { + reiser4_exit_context(ctx); + inode_unlock(inode); + return RETERR(-ENOSPC); + } + write_sd_by_inode_common(dentry->d_inode); + + atom = get_current_atom_locked(); + spin_lock_txnh(ctx->trans); + force_commit_atom(ctx->trans); + reiser4_exit_context(ctx); + inode_unlock(inode); + + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/file_ops_readdir.c b/fs/reiser4/plugin/file_ops_readdir.c new file mode 100644 index 000000000000..0cde411eab18 --- /dev/null +++ b/fs/reiser4/plugin/file_ops_readdir.c @@ -0,0 +1,658 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "../inode.h" + +/* return true, iff @coord points to the valid directory item that is part of + * @inode directory. */ +static int is_valid_dir_coord(struct inode *inode, coord_t *coord) +{ + return plugin_of_group(item_plugin_by_coord(coord), + DIR_ENTRY_ITEM_TYPE) && + inode_file_plugin(inode)->owns_item(inode, coord); +} + +/* compare two logical positions within the same directory */ +static cmp_t dir_pos_cmp(const struct dir_pos *p1, const struct dir_pos *p2) +{ + cmp_t result; + + assert("nikita-2534", p1 != NULL); + assert("nikita-2535", p2 != NULL); + + result = de_id_cmp(&p1->dir_entry_key, &p2->dir_entry_key); + if (result == EQUAL_TO) { + int diff; + + diff = p1->pos - p2->pos; + result = + (diff < 0) ? LESS_THAN : (diff ? GREATER_THAN : EQUAL_TO); + } + return result; +} + +/* see comment before reiser4_readdir_common() for overview of why "adjustment" + * is necessary. */ +static void +adjust_dir_pos(struct file *dir, struct readdir_pos *readdir_spot, + const struct dir_pos *mod_point, int adj) +{ + struct dir_pos *pos; + + /* + * new directory entry was added (adj == +1) or removed (adj == -1) at + * the @mod_point. Directory file descriptor @dir is doing readdir and + * is currently positioned at @readdir_spot. Latter has to be updated + * to maintain stable readdir. + */ + /* directory is positioned to the beginning. */ + if (readdir_spot->entry_no == 0) + return; + + pos = &readdir_spot->position; + switch (dir_pos_cmp(mod_point, pos)) { + case LESS_THAN: + /* @mod_pos is _before_ @readdir_spot, that is, entry was + * added/removed on the left (in key order) of current + * position. */ + /* logical number of directory entry readdir is "looking" at + * changes */ + readdir_spot->entry_no += adj; + assert("nikita-2577", + ergo(dir != NULL, + reiser4_get_dir_fpos(dir, dir->f_pos) + adj >= 0)); + if (de_id_cmp(&pos->dir_entry_key, + &mod_point->dir_entry_key) == EQUAL_TO) { + assert("nikita-2575", mod_point->pos < pos->pos); + /* + * if entry added/removed has the same key as current + * for readdir, update counter of duplicate keys in + * @readdir_spot. + */ + pos->pos += adj; + } + break; + case GREATER_THAN: + /* directory is modified after @pos: nothing to do. */ + break; + case EQUAL_TO: + /* cannot insert an entry readdir is looking at, because it + already exists. */ + assert("nikita-2576", adj < 0); + /* directory entry to which @pos points to is being + removed. + + NOTE-NIKITA: Right thing to do is to update @pos to point + to the next entry. This is complex (we are under spin-lock + for one thing). Just rewind it to the beginning. Next + readdir will have to scan the beginning of + directory. Proper solution is to use semaphore in + spin lock's stead and use rewind_right() here. + + NOTE-NIKITA: now, semaphore is used, so... + */ + memset(readdir_spot, 0, sizeof *readdir_spot); + } +} + +/* scan all file-descriptors for this directory and adjust their + positions respectively. Should be used by implementations of + add_entry and rem_entry of dir plugin */ +void reiser4_adjust_dir_file(struct inode *dir, const struct dentry *de, + int offset, int adj) +{ + reiser4_file_fsdata *scan; + struct dir_pos mod_point; + + assert("nikita-2536", dir != NULL); + assert("nikita-2538", de != NULL); + assert("nikita-2539", adj != 0); + + build_de_id(dir, &de->d_name, &mod_point.dir_entry_key); + mod_point.pos = offset; + + spin_lock_inode(dir); + + /* + * new entry was added/removed in directory @dir. Scan all file + * descriptors for @dir that are currently involved into @readdir and + * update them. + */ + + list_for_each_entry(scan, get_readdir_list(dir), dir.linkage) + adjust_dir_pos(scan->back, &scan->dir.readdir, &mod_point, adj); + + spin_unlock_inode(dir); +} + +/* + * traverse tree to start/continue readdir from the readdir position @pos. + */ +static int dir_go_to(struct file *dir, struct readdir_pos *pos, tap_t *tap) +{ + reiser4_key key; + int result; + struct inode *inode; + + assert("nikita-2554", pos != NULL); + + inode = file_inode(dir); + result = inode_dir_plugin(inode)->build_readdir_key(dir, &key); + if (result != 0) + return result; + result = reiser4_object_lookup(inode, + &key, + tap->coord, + tap->lh, + tap->mode, + FIND_EXACT, + LEAF_LEVEL, LEAF_LEVEL, + 0, &tap->ra_info); + if (result == CBK_COORD_FOUND) + result = rewind_right(tap, (int)pos->position.pos); + else { + tap->coord->node = NULL; + done_lh(tap->lh); + result = RETERR(-EIO); + } + return result; +} + +/* + * handling of non-unique keys: calculate at what ordinal position within + * sequence of directory items with identical keys @pos is. + */ +static int set_pos(struct inode *inode, struct readdir_pos *pos, tap_t *tap) +{ + int result; + coord_t coord; + lock_handle lh; + tap_t scan; + de_id *did; + reiser4_key de_key; + + coord_init_zero(&coord); + init_lh(&lh); + reiser4_tap_init(&scan, &coord, &lh, ZNODE_READ_LOCK); + reiser4_tap_copy(&scan, tap); + reiser4_tap_load(&scan); + pos->position.pos = 0; + + did = &pos->position.dir_entry_key; + + if (is_valid_dir_coord(inode, scan.coord)) { + + build_de_id_by_key(unit_key_by_coord(scan.coord, &de_key), did); + + while (1) { + + result = go_prev_unit(&scan); + if (result != 0) + break; + + if (!is_valid_dir_coord(inode, scan.coord)) { + result = -EINVAL; + break; + } + + /* get key of directory entry */ + unit_key_by_coord(scan.coord, &de_key); + if (de_id_key_cmp(did, &de_key) != EQUAL_TO) { + /* duplicate-sequence is over */ + break; + } + pos->position.pos++; + } + } else + result = RETERR(-ENOENT); + reiser4_tap_relse(&scan); + reiser4_tap_done(&scan); + return result; +} + +/* + * "rewind" directory to @offset, i.e., set @pos and @tap correspondingly. + */ +static int dir_rewind(struct file *dir, loff_t *fpos, struct readdir_pos *pos, tap_t *tap) +{ + __u64 destination; + __s64 shift; + int result; + struct inode *inode; + loff_t dirpos; + + assert("nikita-2553", dir != NULL); + assert("nikita-2548", pos != NULL); + assert("nikita-2551", tap->coord != NULL); + assert("nikita-2552", tap->lh != NULL); + + dirpos = reiser4_get_dir_fpos(dir, *fpos); + shift = dirpos - pos->fpos; + /* this is logical directory entry within @dir which we are rewinding + * to */ + destination = pos->entry_no + shift; + + inode = file_inode(dir); + if (dirpos < 0) + return RETERR(-EINVAL); + else if (destination == 0ll || dirpos == 0) { + /* rewind to the beginning of directory */ + memset(pos, 0, sizeof *pos); + return dir_go_to(dir, pos, tap); + } else if (destination >= inode->i_size) + return RETERR(-ENOENT); + + if (shift < 0) { + /* I am afraid of negative numbers */ + shift = -shift; + /* rewinding to the left */ + if (shift <= (int)pos->position.pos) { + /* destination is within sequence of entries with + duplicate keys. */ + result = dir_go_to(dir, pos, tap); + } else { + shift -= pos->position.pos; + while (1) { + /* repetitions: deadlock is possible when + going to the left. */ + result = dir_go_to(dir, pos, tap); + if (result == 0) { + result = rewind_left(tap, shift); + if (result == -E_DEADLOCK) { + reiser4_tap_done(tap); + continue; + } + } + break; + } + } + } else { + /* rewinding to the right */ + result = dir_go_to(dir, pos, tap); + if (result == 0) + result = rewind_right(tap, shift); + } + if (result == 0) { + result = set_pos(inode, pos, tap); + if (result == 0) { + /* update pos->position.pos */ + pos->entry_no = destination; + pos->fpos = dirpos; + } + } + return result; +} + +/* + * Function that is called by common_readdir() on each directory entry while + * doing readdir. ->filldir callback may block, so we had to release long term + * lock while calling it. To avoid repeating tree traversal, seal is used. If + * seal is broken, we return -E_REPEAT. Node is unlocked in this case. + * + * Whether node is unlocked in case of any other error is undefined. It is + * guaranteed to be still locked if success (0) is returned. + * + * When ->filldir() wants no more, feed_entry() returns 1, and node is + * unlocked. + */ +static int +feed_entry(tap_t *tap, struct dir_context *context) +{ + item_plugin *iplug; + char *name; + reiser4_key sd_key; + int result; + char buf[DE_NAME_BUF_LEN]; + char name_buf[32]; + char *local_name; + unsigned file_type; + seal_t seal; + coord_t *coord; + reiser4_key entry_key; + + coord = tap->coord; + iplug = item_plugin_by_coord(coord); + + /* pointer to name within the node */ + name = iplug->s.dir.extract_name(coord, buf); + assert("nikita-1371", name != NULL); + + /* key of object the entry points to */ + if (iplug->s.dir.extract_key(coord, &sd_key) != 0) + return RETERR(-EIO); + + /* we must release longterm znode lock before calling filldir to avoid + deadlock which may happen if filldir causes page fault. So, copy + name to intermediate buffer */ + if (strlen(name) + 1 > sizeof(name_buf)) { + local_name = kmalloc(strlen(name) + 1, + reiser4_ctx_gfp_mask_get()); + if (local_name == NULL) + return RETERR(-ENOMEM); + } else + local_name = name_buf; + + strcpy(local_name, name); + file_type = iplug->s.dir.extract_file_type(coord); + + unit_key_by_coord(coord, &entry_key); + reiser4_seal_init(&seal, coord, &entry_key); + + longterm_unlock_znode(tap->lh); + + /* + * send information about directory entry to the ->filldir() filler + * supplied to us by caller (VFS). + * + * ->filldir is entitled to do weird things. For example, ->filldir + * supplied by knfsd re-enters file system. Make sure no locks are + * held. + */ + assert("nikita-3436", lock_stack_isclean(get_current_lock_stack())); + + reiser4_txn_restart_current(); + if (!dir_emit(context, name, (int)strlen(name), + /* inode number of object bounden by this entry */ + oid_to_uino(get_key_objectid(&sd_key)), file_type)) + /* ->filldir() is satisfied. (no space in buffer, IOW) */ + result = 1; + else + result = reiser4_seal_validate(&seal, coord, &entry_key, + tap->lh, tap->mode, + ZNODE_LOCK_HIPRI); + + if (local_name != name_buf) + kfree(local_name); + + return result; +} + +static void move_entry(struct readdir_pos *pos, coord_t *coord) +{ + reiser4_key de_key; + de_id *did; + + /* update @pos */ + ++pos->entry_no; + did = &pos->position.dir_entry_key; + + /* get key of directory entry */ + unit_key_by_coord(coord, &de_key); + + if (de_id_key_cmp(did, &de_key) == EQUAL_TO) + /* we are within sequence of directory entries + with duplicate keys. */ + ++pos->position.pos; + else { + pos->position.pos = 0; + build_de_id_by_key(&de_key, did); + } + ++pos->fpos; +} + +/* + * STATELESS READDIR + * + * readdir support in reiser4 relies on ability to update readdir_pos embedded + * into reiser4_file_fsdata on each directory modification (name insertion and + * removal), see reiser4_readdir_common() function below. This obviously doesn't + * work when reiser4 is accessed over NFS, because NFS doesn't keep any state + * across client READDIR requests for the same directory. + * + * To address this we maintain a "pool" of detached reiser4_file_fsdata + * (d_cursor). Whenever NFS readdir request comes, we detect this, and try to + * find detached reiser4_file_fsdata corresponding to previous readdir + * request. In other words, additional state is maintained on the + * server. (This is somewhat contrary to the design goals of NFS protocol.) + * + * To efficiently detect when our ->readdir() method is called by NFS server, + * dentry is marked as "stateless" in reiser4_decode_fh() (this is checked by + * file_is_stateless() function). + * + * To find out d_cursor in the pool, we encode client id (cid) in the highest + * bits of NFS readdir cookie: when first readdir request comes to the given + * directory from the given client, cookie is set to 0. This situation is + * detected, global cid_counter is incremented, and stored in highest bits of + * all direntry offsets returned to the client, including last one. As the + * only valid readdir cookie is one obtained as direntry->offset, we are + * guaranteed that next readdir request (continuing current one) will have + * current cid in the highest bits of starting readdir cookie. All d_cursors + * are hashed into per-super-block hash table by (oid, cid) key. + * + * In addition d_cursors are placed into per-super-block radix tree where they + * are keyed by oid alone. This is necessary to efficiently remove them during + * rmdir. + * + * At last, currently unused d_cursors are linked into special list. This list + * is used d_cursor_shrink to reclaim d_cursors on memory pressure. + * + */ + +/* + * prepare for readdir. + * + * NOTE: @f->f_pos may be out-of-date (iterate() vs readdir()). + * @fpos is effective position. + */ +static int dir_readdir_init(struct file *f, loff_t* fpos, tap_t *tap, + struct readdir_pos **pos) +{ + struct inode *inode; + reiser4_file_fsdata *fsdata; + int result; + + assert("nikita-1359", f != NULL); + inode = file_inode(f); + assert("nikita-1360", inode != NULL); + + if (!S_ISDIR(inode->i_mode)) + return RETERR(-ENOTDIR); + + /* try to find detached readdir state */ + result = reiser4_attach_fsdata(f, fpos, inode); + if (result != 0) + return result; + + fsdata = reiser4_get_file_fsdata(f); + assert("nikita-2571", fsdata != NULL); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + + /* add file descriptor to the readdir list hanging of directory + * inode. This list is used to scan "readdirs-in-progress" while + * inserting or removing names in the directory. */ + spin_lock_inode(inode); + if (list_empty_careful(&fsdata->dir.linkage)) + list_add(&fsdata->dir.linkage, get_readdir_list(inode)); + *pos = &fsdata->dir.readdir; + spin_unlock_inode(inode); + + /* move @tap to the current position */ + return dir_rewind(f, fpos, *pos, tap); +} + +/* this is implementation of vfs's llseek method of struct file_operations for + typical directory + See comment before reiser4_iterate_common() for explanation. +*/ +loff_t reiser4_llseek_dir_common(struct file *file, loff_t off, int origin) +{ + reiser4_context *ctx; + loff_t result; + struct inode *inode; + + inode = file_inode(file); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + inode_lock(inode); + + /* update ->f_pos */ + result = default_llseek_unlocked(file, off, origin); + if (result >= 0) { + int ff; + coord_t coord; + lock_handle lh; + tap_t tap; + struct readdir_pos *pos; + + coord_init_zero(&coord); + init_lh(&lh); + reiser4_tap_init(&tap, &coord, &lh, ZNODE_READ_LOCK); + + ff = dir_readdir_init(file, &file->f_pos, &tap, &pos); + reiser4_detach_fsdata(file); + if (ff != 0) + result = (loff_t) ff; + reiser4_tap_done(&tap); + } + reiser4_detach_fsdata(file); + inode_unlock(inode); + + reiser4_exit_context(ctx); + return result; +} + +/* this is common implementation of vfs's readdir method of struct + file_operations + + readdir problems: + + readdir(2)/getdents(2) interface is based on implicit assumption that + readdir can be restarted from any particular point by supplying file system + with off_t-full of data. That is, file system fills ->d_off field in struct + dirent and later user passes ->d_off to the seekdir(3), which is, actually, + implemented by glibc as lseek(2) on directory. + + Reiser4 cannot restart readdir from 64 bits of data, because two last + components of the key of directory entry are unknown, which given 128 bits: + locality and type fields in the key of directory entry are always known, to + start readdir() from given point objectid and offset fields have to be + filled. + + Traditional UNIX API for scanning through directory + (readdir/seekdir/telldir/opendir/closedir/rewindir/getdents) is based on the + assumption that directory is structured very much like regular file, in + particular, it is implied that each name within given directory (directory + entry) can be uniquely identified by scalar offset and that such offset is + stable across the life-time of the name is identifies. + + This is manifestly not so for reiser4. In reiser4 the only stable unique + identifies for the directory entry is its key that doesn't fit into + seekdir/telldir API. + + solution: + + Within each file descriptor participating in readdir-ing of directory + plugin/dir/dir.h:readdir_pos is maintained. This structure keeps track of + the "current" directory entry that file descriptor looks at. It contains a + key of directory entry (plus some additional info to deal with non-unique + keys that we wouldn't dwell onto here) and a logical position of this + directory entry starting from the beginning of the directory, that is + ordinal number of this entry in the readdir order. + + Obviously this logical position is not stable in the face of directory + modifications. To work around this, on each addition or removal of directory + entry all file descriptors for directory inode are scanned and their + readdir_pos are updated accordingly (adjust_dir_pos()). +*/ +int reiser4_iterate_common(struct file *f /* directory file being read */, + struct dir_context *context /* callback data passed to us by VFS */) +{ + reiser4_context *ctx; + int result; + struct inode *inode; + coord_t coord; + lock_handle lh; + tap_t tap; + struct readdir_pos *pos; + + assert("nikita-1359", f != NULL); + inode = file_inode(f); + assert("nikita-1360", inode != NULL); + + if (!S_ISDIR(inode->i_mode)) + return RETERR(-ENOTDIR); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + coord_init_zero(&coord); + init_lh(&lh); + reiser4_tap_init(&tap, &coord, &lh, ZNODE_READ_LOCK); + + reiser4_readdir_readahead_init(inode, &tap); + +repeat: + result = dir_readdir_init(f, &context->pos, &tap, &pos); + if (result == 0) { + result = reiser4_tap_load(&tap); + /* scan entries one by one feeding them to @filld */ + while (result == 0) { + coord_t *coord; + + coord = tap.coord; + assert("nikita-2572", coord_is_existing_unit(coord)); + assert("nikita-3227", is_valid_dir_coord(inode, coord)); + + result = feed_entry(&tap, context); + if (result > 0) { + break; + } else if (result == 0) { + ++context->pos; + result = go_next_unit(&tap); + if (result == -E_NO_NEIGHBOR || + result == -ENOENT) { + result = 0; + break; + } else if (result == 0) { + if (is_valid_dir_coord(inode, coord)) + move_entry(pos, coord); + else + break; + } + } else if (result == -E_REPEAT) { + /* feed_entry() had to restart. */ + ++context->pos; + reiser4_tap_relse(&tap); + goto repeat; + } else + warning("vs-1617", + "reiser4_readdir_common: unexpected error %d", + result); + } + reiser4_tap_relse(&tap); + + if (result >= 0) + f->f_version = inode->i_version; + } else if (result == -E_NO_NEIGHBOR || result == -ENOENT) + result = 0; + reiser4_tap_done(&tap); + reiser4_detach_fsdata(f); + + /* try to update directory's atime */ + if (reiser4_grab_space_force(inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT) != 0) + warning("", "failed to update atime on readdir: %llu", + get_inode_oid(inode)); + else + file_accessed(f); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return (result <= 0) ? result : 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/file_plugin_common.c b/fs/reiser4/plugin/file_plugin_common.c new file mode 100644 index 000000000000..706732dd393f --- /dev/null +++ b/fs/reiser4/plugin/file_plugin_common.c @@ -0,0 +1,1004 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + reiser4/README */ + +/* this file contains typical implementations for most of methods of + file plugin +*/ + +#include "../inode.h" +#include "object.h" +#include "../safe_link.h" + +static int insert_new_sd(struct inode *inode); +static int update_sd(struct inode *inode); + +/* this is common implementation of write_sd_by_inode method of file plugin + either insert stat data or update it + */ +int write_sd_by_inode_common(struct inode *inode/* object to save */) +{ + int result; + + assert("nikita-730", inode != NULL); + + if (reiser4_inode_get_flag(inode, REISER4_NO_SD)) + /* object doesn't have stat-data yet */ + result = insert_new_sd(inode); + else + result = update_sd(inode); + if (result != 0 && result != -ENAMETOOLONG && result != -ENOMEM) + /* Don't issue warnings about "name is too long" */ + warning("nikita-2221", "Failed to save sd for %llu: %i", + (unsigned long long)get_inode_oid(inode), result); + return result; +} + +/* this is common implementation of key_by_inode method of file plugin + */ +int +key_by_inode_and_offset_common(struct inode *inode, loff_t off, + reiser4_key * key) +{ + reiser4_key_init(key); + set_key_locality(key, reiser4_inode_data(inode)->locality_id); + set_key_ordering(key, get_inode_ordering(inode)); + set_key_objectid(key, get_inode_oid(inode)); /*FIXME: inode->i_ino */ + set_key_type(key, KEY_BODY_MINOR); + set_key_offset(key, (__u64) off); + return 0; +} + +/* this is common implementation of set_plug_in_inode method of file plugin + */ +int set_plug_in_inode_common(struct inode *object /* inode to set plugin on */ , + struct inode *parent /* parent object */ , + reiser4_object_create_data * data /* creational + * data */ ) +{ + __u64 mask; + + object->i_mode = data->mode; + /* this should be plugin decision */ + object->i_uid = current_fsuid(); + object->i_mtime = object->i_atime = object->i_ctime = current_time(object); + + /* support for BSD style group-id assignment. See mount's manual page + description of bsdgroups ext2 mount options for more details */ + if (reiser4_is_set(object->i_sb, REISER4_BSD_GID)) + object->i_gid = parent->i_gid; + else if (parent->i_mode & S_ISGID) { + /* parent directory has sguid bit */ + object->i_gid = parent->i_gid; + if (S_ISDIR(object->i_mode)) + /* sguid is inherited by sub-directories */ + object->i_mode |= S_ISGID; + } else + object->i_gid = current_fsgid(); + + /* this object doesn't have stat-data yet */ + reiser4_inode_set_flag(object, REISER4_NO_SD); +#if 0 + /* this is now called after all inode plugins are initialized: + do_create_vfs_child after adjust_to_parent */ + /* setup inode and file-operations for this inode */ + setup_inode_ops(object, data); +#endif + reiser4_seal_init(&reiser4_inode_data(object)->sd_seal, NULL, NULL); + mask = (1 << UNIX_STAT) | (1 << LIGHT_WEIGHT_STAT); + if (!reiser4_is_set(object->i_sb, REISER4_32_BIT_TIMES)) + mask |= (1 << LARGE_TIMES_STAT); + + reiser4_inode_data(object)->extmask = mask; + return 0; +} + +/* this is common implementation of adjust_to_parent method of file plugin for + regular files + */ +int adjust_to_parent_common(struct inode *object /* new object */ , + struct inode *parent /* parent directory */ , + struct inode *root/* root directory */) +{ + assert("nikita-2165", object != NULL); + if (parent == NULL) + parent = root; + assert("nikita-2069", parent != NULL); + + /* + * inherit missing plugins from parent + */ + + grab_plugin_pset(object, parent, PSET_FILE); + grab_plugin_pset(object, parent, PSET_SD); + grab_plugin_pset(object, parent, PSET_FORMATTING); + grab_plugin_pset(object, parent, PSET_PERM); + return 0; +} + +/* this is common implementation of adjust_to_parent method of file plugin for + typical directories + */ +int adjust_to_parent_common_dir(struct inode *object /* new object */ , + struct inode *parent /* parent directory */ , + struct inode *root/* root directory */) +{ + int result = 0; + pset_member memb; + + assert("nikita-2166", object != NULL); + if (parent == NULL) + parent = root; + assert("nikita-2167", parent != NULL); + + /* + * inherit missing plugins from parent + */ + for (memb = 0; memb < PSET_LAST; ++memb) { + result = grab_plugin_pset(object, parent, memb); + if (result != 0) + break; + } + return result; +} + +int adjust_to_parent_cryptcompress(struct inode *object /* new object */ , + struct inode *parent /* parent directory */, + struct inode *root/* root directory */) +{ + int result; + result = adjust_to_parent_common(object, parent, root); + if (result) + return result; + assert("edward-1416", parent != NULL); + + grab_plugin_pset(object, parent, PSET_CLUSTER); + grab_plugin_pset(object, parent, PSET_CIPHER); + grab_plugin_pset(object, parent, PSET_DIGEST); + grab_plugin_pset(object, parent, PSET_COMPRESSION); + grab_plugin_pset(object, parent, PSET_COMPRESSION_MODE); + + return 0; +} + +/* this is common implementation of create_object method of file plugin + */ +int reiser4_create_object_common(struct inode *object, struct inode *parent, + reiser4_object_create_data * data) +{ + reiser4_block_nr reserve; + assert("nikita-744", object != NULL); + assert("nikita-745", parent != NULL); + assert("nikita-747", data != NULL); + assert("nikita-748", reiser4_inode_get_flag(object, REISER4_NO_SD)); + + reserve = estimate_create_common(object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + return write_sd_by_inode_common(object); +} + +static int common_object_delete_no_reserve(struct inode *inode); + +/** + * reiser4_delete_object_common - delete_object of file_plugin + * @inode: inode to be deleted + * + * This is common implementation of delete_object method of file_plugin. It + * applies to object its deletion consists of removing two items - stat data + * and safe-link. + */ +int reiser4_delete_object_common(struct inode *inode) +{ + int result; + + assert("nikita-1477", inode != NULL); + /* FIXME: if file body deletion failed (i/o error, for instance), + inode->i_size can be != 0 here */ + assert("nikita-3420", inode->i_size == 0 || S_ISLNK(inode->i_mode)); + assert("nikita-3421", inode->i_nlink == 0); + + if (!reiser4_inode_get_flag(inode, REISER4_NO_SD)) { + reiser4_block_nr reserve; + + /* grab space which is needed to remove 2 items from the tree: + stat data and safe-link */ + reserve = 2 * + estimate_one_item_removal(reiser4_tree_by_inode(inode)); + if (reiser4_grab_space_force(reserve, + BA_RESERVED | BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + result = common_object_delete_no_reserve(inode); + } else + result = 0; + return result; +} + +/** + * reiser4_delete_dir_common - delete_object of file_plugin + * @inode: inode to be deleted + * + * This is common implementation of delete_object method of file_plugin for + * typical directory. It calls done method of dir_plugin to remove "." and + * removes stat data and safe-link. + */ +int reiser4_delete_dir_common(struct inode *inode) +{ + int result; + dir_plugin *dplug; + + assert("", (get_current_context() && + get_current_context()->trans->atom == NULL)); + + dplug = inode_dir_plugin(inode); + assert("vs-1101", dplug && dplug->done); + + /* kill cursors which might be attached to inode */ + reiser4_kill_cursors(inode); + + /* grab space enough for removing two items */ + if (reiser4_grab_space + (2 * estimate_one_item_removal(reiser4_tree_by_inode(inode)), + BA_RESERVED | BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + result = dplug->done(inode); + if (!result) + result = common_object_delete_no_reserve(inode); + return result; +} + +/* this is common implementation of add_link method of file plugin + */ +int reiser4_add_link_common(struct inode *object, struct inode *parent) +{ + /* + * increment ->i_nlink and update ->i_ctime + */ + + INODE_INC_NLINK(object); + object->i_ctime = current_time(object); + return 0; +} + +/* this is common implementation of rem_link method of file plugin + */ +int reiser4_rem_link_common(struct inode *object, struct inode *parent) +{ + assert("nikita-2021", object != NULL); + assert("nikita-2163", object->i_nlink > 0); + + /* + * decrement ->i_nlink and update ->i_ctime + */ + + INODE_DROP_NLINK(object); + object->i_ctime = current_time(object); + return 0; +} + +/* this is common implementation of rem_link method of file plugin for typical + directory +*/ +int rem_link_common_dir(struct inode *object, struct inode *parent UNUSED_ARG) +{ + assert("nikita-20211", object != NULL); + assert("nikita-21631", object->i_nlink > 0); + + /* + * decrement ->i_nlink and update ->i_ctime + */ + if(object->i_nlink == 2) + INODE_SET_NLINK(object, 0); + + else + INODE_DROP_NLINK(object); + object->i_ctime = current_time(object); + return 0; +} + +/* this is common implementation of owns_item method of file plugin + compare objectids of keys in inode and coord */ +int owns_item_common(const struct inode *inode, /* object to check + * against */ + const coord_t *coord/* coord to check */) +{ + reiser4_key item_key; + reiser4_key file_key; + + assert("nikita-760", inode != NULL); + assert("nikita-761", coord != NULL); + + return coord_is_existing_item(coord) && + (get_key_objectid(build_sd_key(inode, &file_key)) == + get_key_objectid(item_key_by_coord(coord, &item_key))); +} + +/* this is common implementation of owns_item method of file plugin + for typical directory +*/ +int owns_item_common_dir(const struct inode *inode,/* object to check against */ + const coord_t *coord/* coord of item to check */) +{ + reiser4_key item_key; + + assert("nikita-1335", inode != NULL); + assert("nikita-1334", coord != NULL); + + if (plugin_of_group(item_plugin_by_coord(coord), DIR_ENTRY_ITEM_TYPE)) + return get_key_locality(item_key_by_coord(coord, &item_key)) == + get_inode_oid(inode); + else + return owns_item_common(inode, coord); +} + +/* this is common implementation of can_add_link method of file plugin + checks whether yet another hard links to this object can be added +*/ +int can_add_link_common(const struct inode *object/* object to check */) +{ + assert("nikita-732", object != NULL); + + /* inode->i_nlink is unsigned int, so just check for integer + overflow */ + return object->i_nlink + 1 != 0; +} + +/* this is common implementation of can_rem_link method of file plugin for + typical directory +*/ +int can_rem_link_common_dir(const struct inode *inode) +{ + /* is_dir_empty() returns 0 is dir is empty */ + return !is_dir_empty(inode); +} + +/* this is common implementation of detach method of file plugin for typical + directory +*/ +int reiser4_detach_common_dir(struct inode *child, struct inode *parent) +{ + dir_plugin *dplug; + + dplug = inode_dir_plugin(child); + assert("nikita-2883", dplug != NULL); + assert("nikita-2884", dplug->detach != NULL); + return dplug->detach(child, parent); +} + +/* this is common implementation of bind method of file plugin for typical + directory +*/ +int reiser4_bind_common_dir(struct inode *child, struct inode *parent) +{ + dir_plugin *dplug; + + dplug = inode_dir_plugin(child); + assert("nikita-2646", dplug != NULL); + return dplug->attach(child, parent); +} + +static int process_truncate(struct inode *, __u64 size); + +/* this is common implementation of safelink method of file plugin + */ +int safelink_common(struct inode *object, reiser4_safe_link_t link, __u64 value) +{ + int result; + + assert("vs-1705", get_current_context()->trans->atom == NULL); + if (link == SAFE_UNLINK) + /* nothing to do. iput() in the caller (process_safelink) will + * finish with file */ + result = 0; + else if (link == SAFE_TRUNCATE) + result = process_truncate(object, value); + else { + warning("nikita-3438", "Unrecognized safe-link type: %i", link); + result = RETERR(-EIO); + } + return result; +} + +/* this is common implementation of estimate.create method of file plugin + can be used when object creation involves insertion of one item (usually stat + data) into tree +*/ +reiser4_block_nr estimate_create_common(const struct inode *object) +{ + return estimate_one_insert_item(reiser4_tree_by_inode(object)); +} + +/* this is common implementation of estimate.create method of file plugin for + typical directory + can be used when directory creation involves insertion of two items (usually + stat data and item containing "." and "..") into tree +*/ +reiser4_block_nr estimate_create_common_dir(const struct inode *object) +{ + return 2 * estimate_one_insert_item(reiser4_tree_by_inode(object)); +} + +/* this is common implementation of estimate.update method of file plugin + can be used when stat data update does not do more than inserting a unit + into a stat data item which is probably true for most cases +*/ +reiser4_block_nr estimate_update_common(const struct inode *inode) +{ + return estimate_one_insert_into_item(reiser4_tree_by_inode(inode)); +} + +/* this is common implementation of estimate.unlink method of file plugin + */ +reiser4_block_nr +estimate_unlink_common(const struct inode *object UNUSED_ARG, + const struct inode *parent UNUSED_ARG) +{ + return 0; +} + +/* this is common implementation of estimate.unlink method of file plugin for + typical directory +*/ +reiser4_block_nr +estimate_unlink_common_dir(const struct inode *object, + const struct inode *parent) +{ + dir_plugin *dplug; + + dplug = inode_dir_plugin(object); + assert("nikita-2888", dplug != NULL); + assert("nikita-2887", dplug->estimate.unlink != NULL); + return dplug->estimate.unlink(object, parent); +} + +char *wire_write_common(struct inode *inode, char *start) +{ + return build_inode_onwire(inode, start); +} + +char *wire_read_common(char *addr, reiser4_object_on_wire * obj) +{ + if (!obj) + return locate_obj_key_id_onwire(addr); + return extract_obj_key_id_from_onwire(addr, &obj->u.std.key_id); +} + +struct dentry *wire_get_common(struct super_block *sb, + reiser4_object_on_wire * obj) +{ + struct inode *inode; + struct dentry *dentry; + reiser4_key key; + + extract_key_from_id(&obj->u.std.key_id, &key); + inode = reiser4_iget(sb, &key, 1); + if (!IS_ERR(inode)) { + reiser4_iget_complete(inode); + dentry = d_obtain_alias(inode); + if (!IS_ERR(dentry)) + dentry->d_op = &get_super_private(sb)->ops.dentry; + } else if (PTR_ERR(inode) == -ENOENT) + /* + * inode wasn't found at the key encoded in the file + * handle. Hence, file handle is stale. + */ + dentry = ERR_PTR(RETERR(-ESTALE)); + else + dentry = (void *)inode; + return dentry; +} + +int wire_size_common(struct inode *inode) +{ + return inode_onwire_size(inode); +} + +void wire_done_common(reiser4_object_on_wire * obj) +{ + /* nothing to do */ +} + +/* helper function to print errors */ +static void key_warning(const reiser4_key * key /* key to print */ , + const struct inode *inode, + int code/* error code to print */) +{ + assert("nikita-716", key != NULL); + + if (code != -ENOMEM) { + warning("nikita-717", "Error for inode %llu (%i)", + (unsigned long long)get_key_objectid(key), code); + reiser4_print_key("for key", key); + } +} + +/* NIKITA-FIXME-HANS: perhaps this function belongs in another file? */ +#if REISER4_DEBUG +static void +check_inode_seal(const struct inode *inode, + const coord_t *coord, const reiser4_key * key) +{ + reiser4_key unit_key; + + unit_key_by_coord(coord, &unit_key); + assert("nikita-2752", + WITH_DATA_RET(coord->node, 1, keyeq(key, &unit_key))); + assert("nikita-2753", get_inode_oid(inode) == get_key_objectid(key)); +} + +static void check_sd_coord(coord_t *coord, const reiser4_key * key) +{ + reiser4_key ukey; + + coord_clear_iplug(coord); + if (zload(coord->node)) + return; + + if (!coord_is_existing_unit(coord) || + !item_plugin_by_coord(coord) || + !keyeq(unit_key_by_coord(coord, &ukey), key) || + (znode_get_level(coord->node) != LEAF_LEVEL) || + !item_is_statdata(coord)) { + warning("nikita-1901", "Conspicuous seal"); + reiser4_print_key("key", key); + print_coord("coord", coord, 1); + impossible("nikita-2877", "no way"); + } + zrelse(coord->node); +} + +#else +#define check_inode_seal(inode, coord, key) noop +#define check_sd_coord(coord, key) noop +#endif + +/* insert new stat-data into tree. Called with inode state + locked. Return inode state locked. */ +static int insert_new_sd(struct inode *inode/* inode to create sd for */) +{ + int result; + reiser4_key key; + coord_t coord; + reiser4_item_data data; + char *area; + reiser4_inode *ref; + lock_handle lh; + oid_t oid; + + assert("nikita-723", inode != NULL); + assert("nikita-3406", reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + ref = reiser4_inode_data(inode); + spin_lock_inode(inode); + + if (ref->plugin_mask != 0) + /* inode has non-standard plugins */ + inode_set_extension(inode, PLUGIN_STAT); + /* + * prepare specification of new item to be inserted + */ + + data.iplug = inode_sd_plugin(inode); + data.length = data.iplug->s.sd.save_len(inode); + spin_unlock_inode(inode); + + data.data = NULL; + data.user = 0; +/* could be optimized for case where there is only one node format in + * use in the filesystem, probably there are lots of such + * places we could optimize for only one node layout.... -Hans */ + if (data.length > reiser4_tree_by_inode(inode)->nplug->max_item_size()) { + /* This is silly check, but we don't know actual node where + insertion will go into. */ + return RETERR(-ENAMETOOLONG); + } + oid = oid_allocate(inode->i_sb); +/* NIKITA-FIXME-HANS: what is your opinion on whether this error check should be + * encapsulated into oid_allocate? */ + if (oid == ABSOLUTE_MAX_OID) + return RETERR(-EOVERFLOW); + + set_inode_oid(inode, oid); + + coord_init_zero(&coord); + init_lh(&lh); + + result = insert_by_key(reiser4_tree_by_inode(inode), + build_sd_key(inode, &key), &data, &coord, &lh, + /* stat data lives on a leaf level */ + LEAF_LEVEL, CBK_UNIQUE); + + /* we don't want to re-check that somebody didn't insert + stat-data while we were doing io, because if it did, + insert_by_key() returned error. */ + /* but what _is_ possible is that plugin for inode's stat-data, + list of non-standard plugins or their state would change + during io, so that stat-data wouldn't fit into sd. To avoid + this race we keep inode_state lock. This lock has to be + taken each time you access inode in a way that would cause + changes in sd size: changing plugins etc. + */ + + if (result == IBK_INSERT_OK) { + coord_clear_iplug(&coord); + result = zload(coord.node); + if (result == 0) { + /* have we really inserted stat data? */ + assert("nikita-725", item_is_statdata(&coord)); + + /* inode was just created. It is inserted into hash + table, but no directory entry was yet inserted into + parent. So, inode is inaccessible through + ->lookup(). All places that directly grab inode + from hash-table (like old knfsd), should check + IMMUTABLE flag that is set by common_create_child. + */ + assert("nikita-3240", data.iplug != NULL); + assert("nikita-3241", data.iplug->s.sd.save != NULL); + area = item_body_by_coord(&coord); + result = data.iplug->s.sd.save(inode, &area); + znode_make_dirty(coord.node); + if (result == 0) { + /* object has stat-data now */ + reiser4_inode_clr_flag(inode, REISER4_NO_SD); + reiser4_inode_set_flag(inode, + REISER4_SDLEN_KNOWN); + /* initialise stat-data seal */ + reiser4_seal_init(&ref->sd_seal, &coord, &key); + ref->sd_coord = coord; + check_inode_seal(inode, &coord, &key); + } else if (result != -ENOMEM) + /* + * convert any other error code to -EIO to + * avoid confusing user level with unexpected + * errors. + */ + result = RETERR(-EIO); + zrelse(coord.node); + } + } + done_lh(&lh); + + if (result != 0) + key_warning(&key, inode, result); + else + oid_count_allocated(); + + return result; +} + +/* find sd of inode in a tree, deal with errors */ +int lookup_sd(struct inode *inode /* inode to look sd for */ , + znode_lock_mode lock_mode /* lock mode */ , + coord_t *coord /* resulting coord */ , + lock_handle * lh /* resulting lock handle */ , + const reiser4_key * key /* resulting key */ , + int silent) +{ + int result; + __u32 flags; + + assert("nikita-1692", inode != NULL); + assert("nikita-1693", coord != NULL); + assert("nikita-1694", key != NULL); + + /* look for the object's stat data in a tree. + This returns in "node" pointer to a locked znode and in "pos" + position of an item found in node. Both are only valid if + coord_found is returned. */ + flags = (lock_mode == ZNODE_WRITE_LOCK) ? CBK_FOR_INSERT : 0; + flags |= CBK_UNIQUE; + /* + * traverse tree to find stat data. We cannot use vroot here, because + * it only covers _body_ of the file, and stat data don't belong + * there. + */ + result = coord_by_key(reiser4_tree_by_inode(inode), + key, + coord, + lh, + lock_mode, + FIND_EXACT, LEAF_LEVEL, LEAF_LEVEL, flags, NULL); + if (REISER4_DEBUG && result == 0) + check_sd_coord(coord, key); + + if (result != 0 && !silent) + key_warning(key, inode, result); + return result; +} + +static int +locate_inode_sd(struct inode *inode, + reiser4_key * key, coord_t *coord, lock_handle * lh) +{ + reiser4_inode *state; + seal_t seal; + int result; + + assert("nikita-3483", inode != NULL); + + state = reiser4_inode_data(inode); + spin_lock_inode(inode); + *coord = state->sd_coord; + coord_clear_iplug(coord); + seal = state->sd_seal; + spin_unlock_inode(inode); + + build_sd_key(inode, key); + /* first, try to use seal */ + if (reiser4_seal_is_set(&seal)) { + result = reiser4_seal_validate(&seal, + coord, + key, + lh, ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (result == 0) { + check_sd_coord(coord, key); + return 0; + } + } + /* hint is invalid, + * so traverse tree + */ + coord_init_zero(coord); + return lookup_sd(inode, ZNODE_WRITE_LOCK, coord, lh, key, 0); +} + +#if REISER4_DEBUG +static int all_but_offset_key_eq(const reiser4_key * k1, const reiser4_key * k2) +{ + return (get_key_locality(k1) == get_key_locality(k2) && + get_key_type(k1) == get_key_type(k2) && + get_key_band(k1) == get_key_band(k2) && + get_key_ordering(k1) == get_key_ordering(k2) && + get_key_objectid(k1) == get_key_objectid(k2)); +} + +#include "../tree_walk.h" + +/* make some checks before and after stat-data resize operation */ +static int check_sd_resize(struct inode *inode, coord_t *coord, + int length, int progress/* 1 means after resize */) +{ + int ret = 0; + lock_handle left_lock; + coord_t left_coord; + reiser4_key left_key; + reiser4_key key; + + if (inode_file_plugin(inode) != + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) + return 0; + if (!length) + return 0; + if (coord->item_pos != 0) + return 0; + + init_lh(&left_lock); + ret = reiser4_get_left_neighbor(&left_lock, + coord->node, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (ret == -E_REPEAT || ret == -E_NO_NEIGHBOR || + ret == -ENOENT || ret == -EINVAL + || ret == -E_DEADLOCK) { + ret = 0; + goto exit; + } + ret = zload(left_lock.node); + if (ret) + goto exit; + coord_init_last_unit(&left_coord, left_lock.node); + item_key_by_coord(&left_coord, &left_key); + item_key_by_coord(coord, &key); + + if (all_but_offset_key_eq(&key, &left_key)) + /* corruption occured */ + ret = 1; + zrelse(left_lock.node); + exit: + done_lh(&left_lock); + return ret; +} +#endif + +/* update stat-data at @coord */ +static int +update_sd_at(struct inode *inode, coord_t *coord, reiser4_key * key, + lock_handle * lh) +{ + int result; + reiser4_item_data data; + char *area; + reiser4_inode *state; + znode *loaded; + + state = reiser4_inode_data(inode); + + coord_clear_iplug(coord); + result = zload(coord->node); + if (result != 0) + return result; + loaded = coord->node; + + spin_lock_inode(inode); + assert("nikita-728", inode_sd_plugin(inode) != NULL); + data.iplug = inode_sd_plugin(inode); + + /* if inode has non-standard plugins, add appropriate stat data + * extension */ + if (state->extmask & (1 << PLUGIN_STAT)) { + if (state->plugin_mask == 0) + inode_clr_extension(inode, PLUGIN_STAT); + } else if (state->plugin_mask != 0) + inode_set_extension(inode, PLUGIN_STAT); + + if (state->extmask & (1 << HEIR_STAT)) { + if (state->heir_mask == 0) + inode_clr_extension(inode, HEIR_STAT); + } else if (state->heir_mask != 0) + inode_set_extension(inode, HEIR_STAT); + + /* data.length is how much space to add to (or remove + from if negative) sd */ + if (!reiser4_inode_get_flag(inode, REISER4_SDLEN_KNOWN)) { + /* recalculate stat-data length */ + data.length = + data.iplug->s.sd.save_len(inode) - + item_length_by_coord(coord); + reiser4_inode_set_flag(inode, REISER4_SDLEN_KNOWN); + } else + data.length = 0; + spin_unlock_inode(inode); + + /* if on-disk stat data is of different length than required + for this inode, resize it */ + + if (data.length != 0) { + data.data = NULL; + data.user = 0; + + assert("edward-1441", + !check_sd_resize(inode, coord, + data.length, 0/* before resize */)); + + /* insertion code requires that insertion point (coord) was + * between units. */ + coord->between = AFTER_UNIT; + result = reiser4_resize_item(coord, &data, key, lh, + COPI_DONT_SHIFT_LEFT); + if (result != 0) { + key_warning(key, inode, result); + zrelse(loaded); + return result; + } + if (loaded != coord->node) { + /* reiser4_resize_item moved coord to another node. + Zload it */ + zrelse(loaded); + coord_clear_iplug(coord); + result = zload(coord->node); + if (result != 0) + return result; + loaded = coord->node; + } + assert("edward-1442", + !check_sd_resize(inode, coord, + data.length, 1/* after resize */)); + } + area = item_body_by_coord(coord); + spin_lock_inode(inode); + result = data.iplug->s.sd.save(inode, &area); + znode_make_dirty(coord->node); + + /* re-initialise stat-data seal */ + + /* + * coord.between was possibly skewed from AT_UNIT when stat-data size + * was changed and new extensions were pasted into item. + */ + coord->between = AT_UNIT; + reiser4_seal_init(&state->sd_seal, coord, key); + state->sd_coord = *coord; + spin_unlock_inode(inode); + check_inode_seal(inode, coord, key); + zrelse(loaded); + return result; +} + +/* Update existing stat-data in a tree. Called with inode state locked. Return + inode state locked. */ +static int update_sd(struct inode *inode/* inode to update sd for */) +{ + int result; + reiser4_key key; + coord_t coord; + lock_handle lh; + + assert("nikita-726", inode != NULL); + + /* no stat-data, nothing to update?! */ + assert("nikita-3482", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + init_lh(&lh); + + result = locate_inode_sd(inode, &key, &coord, &lh); + if (result == 0) + result = update_sd_at(inode, &coord, &key, &lh); + done_lh(&lh); + + return result; +} + +/* helper for reiser4_delete_object_common and reiser4_delete_dir_common. + Remove object stat data. Space for that must be reserved by caller before +*/ +static int +common_object_delete_no_reserve(struct inode *inode/* object to remove */) +{ + int result; + + assert("nikita-1477", inode != NULL); + + if (!reiser4_inode_get_flag(inode, REISER4_NO_SD)) { + reiser4_key sd_key; + + build_sd_key(inode, &sd_key); + result = + reiser4_cut_tree(reiser4_tree_by_inode(inode), + &sd_key, &sd_key, NULL, 0); + if (result == 0) { + reiser4_inode_set_flag(inode, REISER4_NO_SD); + result = oid_release(inode->i_sb, get_inode_oid(inode)); + if (result == 0) { + oid_count_released(); + + result = safe_link_del(reiser4_tree_by_inode(inode), + get_inode_oid(inode), + SAFE_UNLINK); + } + } + } else + result = 0; + return result; +} + +/* helper for safelink_common */ +static int process_truncate(struct inode *inode, __u64 size) +{ + int result; + struct iattr attr; + file_plugin *fplug; + reiser4_context *ctx; + struct dentry dentry; + + assert("vs-21", is_in_reiser4_context()); + ctx = reiser4_init_context(inode->i_sb); + assert("vs-22", !IS_ERR(ctx)); + + attr.ia_size = size; + attr.ia_valid = ATTR_SIZE | ATTR_CTIME; + fplug = inode_file_plugin(inode); + + inode_lock(inode); + assert("vs-1704", get_current_context()->trans->atom == NULL); + dentry.d_inode = inode; + result = inode->i_op->setattr(&dentry, &attr); + inode_unlock(inode); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return result; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/hash.c b/fs/reiser4/plugin/hash.c new file mode 100644 index 000000000000..999f7b1eca4d --- /dev/null +++ b/fs/reiser4/plugin/hash.c @@ -0,0 +1,352 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Hash functions */ + +#include "../debug.h" +#include "plugin_header.h" +#include "plugin.h" +#include "../super.h" +#include "../inode.h" + +#include + +/* old rupasov (yura) hash */ +static __u64 hash_rupasov(const unsigned char *name /* name to hash */ , + int len/* @name's length */) +{ + int i; + int j; + int pow; + __u64 a; + __u64 c; + + assert("nikita-672", name != NULL); + assert("nikita-673", len >= 0); + + for (pow = 1, i = 1; i < len; ++i) + pow = pow * 10; + + if (len == 1) + a = name[0] - 48; + else + a = (name[0] - 48) * pow; + + for (i = 1; i < len; ++i) { + c = name[i] - 48; + for (pow = 1, j = i; j < len - 1; ++j) + pow = pow * 10; + a = a + c * pow; + } + for (; i < 40; ++i) { + c = '0' - 48; + for (pow = 1, j = i; j < len - 1; ++j) + pow = pow * 10; + a = a + c * pow; + } + + for (; i < 256; ++i) { + c = i; + for (pow = 1, j = i; j < len - 1; ++j) + pow = pow * 10; + a = a + c * pow; + } + + a = a << 7; + return a; +} + +/* r5 hash */ +static __u64 hash_r5(const unsigned char *name /* name to hash */ , + int len UNUSED_ARG/* @name's length */) +{ + __u64 a = 0; + + assert("nikita-674", name != NULL); + assert("nikita-675", len >= 0); + + while (*name) { + a += *name << 4; + a += *name >> 4; + a *= 11; + name++; + } + return a; +} + +/* Keyed 32-bit hash function using TEA in a Davis-Meyer function + H0 = Key + Hi = E Mi(Hi-1) + Hi-1 + + (see Applied Cryptography, 2nd edition, p448). + + Jeremy Fitzhardinge 1998 + + Jeremy has agreed to the contents of reiserfs/README. -Hans + + This code was blindly upgraded to __u64 by s/__u32/__u64/g. +*/ +static __u64 hash_tea(const unsigned char *name /* name to hash */ , + int len/* @name's length */) +{ + __u64 k[] = { 0x9464a485u, 0x542e1a94u, 0x3e846bffu, 0xb75bcfc3u }; + + __u64 h0 = k[0], h1 = k[1]; + __u64 a, b, c, d; + __u64 pad; + int i; + + assert("nikita-676", name != NULL); + assert("nikita-677", len >= 0); + +#define DELTA 0x9E3779B9u +#define FULLROUNDS 10 /* 32 is overkill, 16 is strong crypto */ +#define PARTROUNDS 6 /* 6 gets complete mixing */ + +/* a, b, c, d - data; h0, h1 - accumulated hash */ +#define TEACORE(rounds) \ + do { \ + __u64 sum = 0; \ + int n = rounds; \ + __u64 b0, b1; \ + \ + b0 = h0; \ + b1 = h1; \ + \ + do { \ + sum += DELTA; \ + b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ + b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ + } while (--n); \ + \ + h0 += b0; \ + h1 += b1; \ + } while (0) + + pad = (__u64) len | ((__u64) len << 8); + pad |= pad << 16; + + while (len >= 16) { + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + b = (__u64) name[4] | (__u64) name[5] << 8 | (__u64) name[6] << + 16 | (__u64) name[7] << 24; + c = (__u64) name[8] | (__u64) name[9] << 8 | (__u64) name[10] << + 16 | (__u64) name[11] << 24; + d = (__u64) name[12] | (__u64) name[13] << 8 | (__u64) name[14] + << 16 | (__u64) name[15] << 24; + + TEACORE(PARTROUNDS); + + len -= 16; + name += 16; + } + + if (len >= 12) { + /* assert(len < 16); */ + if (len >= 16) + *(int *)0 = 0; + + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + b = (__u64) name[4] | (__u64) name[5] << 8 | (__u64) name[6] << + 16 | (__u64) name[7] << 24; + c = (__u64) name[8] | (__u64) name[9] << 8 | (__u64) name[10] << + 16 | (__u64) name[11] << 24; + + d = pad; + for (i = 12; i < len; i++) { + d <<= 8; + d |= name[i]; + } + } else if (len >= 8) { + /* assert(len < 12); */ + if (len >= 12) + *(int *)0 = 0; + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + b = (__u64) name[4] | (__u64) name[5] << 8 | (__u64) name[6] << + 16 | (__u64) name[7] << 24; + + c = d = pad; + for (i = 8; i < len; i++) { + c <<= 8; + c |= name[i]; + } + } else if (len >= 4) { + /* assert(len < 8); */ + if (len >= 8) + *(int *)0 = 0; + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + + b = c = d = pad; + for (i = 4; i < len; i++) { + b <<= 8; + b |= name[i]; + } + } else { + /* assert(len < 4); */ + if (len >= 4) + *(int *)0 = 0; + a = b = c = d = pad; + for (i = 0; i < len; i++) { + a <<= 8; + a |= name[i]; + } + } + + TEACORE(FULLROUNDS); + +/* return 0;*/ + return h0 ^ h1; + +} + +/* classical 64 bit Fowler/Noll/Vo-1 (FNV-1) hash. + + See http://www.isthe.com/chongo/tech/comp/fnv/ for details. + + Excerpts: + + FNV hashes are designed to be fast while maintaining a low collision + rate. + + [This version also seems to preserve lexicographical order locally.] + + FNV hash algorithms and source code have been released into the public + domain. + +*/ +static __u64 hash_fnv1(const unsigned char *name /* name to hash */ , + int len UNUSED_ARG/* @name's length */) +{ + unsigned long long a = 0xcbf29ce484222325ull; + const unsigned long long fnv_64_prime = 0x100000001b3ull; + + assert("nikita-678", name != NULL); + assert("nikita-679", len >= 0); + + /* FNV-1 hash each octet in the buffer */ + for (; *name; ++name) { + /* multiply by the 32 bit FNV magic prime mod 2^64 */ + a *= fnv_64_prime; + /* xor the bottom with the current octet */ + a ^= (unsigned long long)(*name); + } + /* return our new hash value */ + return a; +} + +/* degenerate hash function used to simplify testing of non-unique key + handling */ +static __u64 hash_deg(const unsigned char *name UNUSED_ARG /* name to hash */ , + int len UNUSED_ARG/* @name's length */) +{ + return 0xc0c0c0c010101010ull; +} + +static int change_hash(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + int result; + + assert("nikita-3503", inode != NULL); + assert("nikita-3504", plugin != NULL); + + assert("nikita-3505", is_reiser4_inode(inode)); + assert("nikita-3507", plugin->h.type_id == REISER4_HASH_PLUGIN_TYPE); + + if (!plugin_of_group(inode_file_plugin(inode), REISER4_DIRECTORY_FILE)) + return RETERR(-EINVAL); + + result = 0; + if (inode_hash_plugin(inode) == NULL || + inode_hash_plugin(inode)->h.id != plugin->h.id) { + if (is_dir_empty(inode) == 0) + result = aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_HASH, plugin); + else + result = RETERR(-ENOTEMPTY); + + } + return result; +} + +static reiser4_plugin_ops hash_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = change_hash +}; + +/* hash plugins */ +hash_plugin hash_plugins[LAST_HASH_ID] = { + [RUPASOV_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = RUPASOV_HASH_ID, + .pops = &hash_plugin_ops, + .label = "rupasov", + .desc = "Original Yura's hash", + .linkage = {NULL, NULL} + }, + .hash = hash_rupasov + }, + [R5_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = R5_HASH_ID, + .pops = &hash_plugin_ops, + .label = "r5", + .desc = "r5 hash", + .linkage = {NULL, NULL} + }, + .hash = hash_r5 + }, + [TEA_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = TEA_HASH_ID, + .pops = &hash_plugin_ops, + .label = "tea", + .desc = "tea hash", + .linkage = {NULL, NULL} + }, + .hash = hash_tea + }, + [FNV1_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = FNV1_HASH_ID, + .pops = &hash_plugin_ops, + .label = "fnv1", + .desc = "fnv1 hash", + .linkage = {NULL, NULL} + }, + .hash = hash_fnv1 + }, + [DEGENERATE_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = DEGENERATE_HASH_ID, + .pops = &hash_plugin_ops, + .label = "degenerate hash", + .desc = "Degenerate hash: only for testing", + .linkage = {NULL, NULL} + }, + .hash = hash_deg + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/inode_ops.c b/fs/reiser4/plugin/inode_ops.c new file mode 100644 index 000000000000..64b50723fd76 --- /dev/null +++ b/fs/reiser4/plugin/inode_ops.c @@ -0,0 +1,891 @@ +/* + * Copyright 2005 by Hans Reiser, licensing governed by reiser4/README + */ + +/* + * this file contains typical implementations for most of methods of struct + * inode_operations + */ + +#include "../inode.h" +#include "../safe_link.h" + +#include + +static int create_vfs_object(struct inode *parent, struct dentry *dentry, + reiser4_object_create_data *data); + +/** + * reiser4_create_common - create of inode operations + * @parent: inode of parent directory + * @dentry: dentry of new object to create + * @mode: the permissions to use + * @exclusive: + * + * This is common implementation of vfs's create method of struct + * inode_operations. + * Creates regular file using file plugin from parent directory plugin set. + */ +int reiser4_create_common(struct inode *parent, struct dentry *dentry, + umode_t mode, bool exclusive) +{ + reiser4_object_create_data data; + file_plugin *fplug; + + memset(&data, 0, sizeof data); + data.mode = S_IFREG | mode; + fplug = child_create_plugin(parent) ? : inode_create_plugin(parent); + if (!plugin_of_group(fplug, REISER4_REGULAR_FILE)) { + warning("vpf-1900", "'%s' is not a regular file plugin.", + fplug->h.label); + return RETERR(-EIO); + } + data.id = fplug->h.id; + return create_vfs_object(parent, dentry, &data); +} + +int reiser4_lookup_name(struct inode *dir, struct dentry *, reiser4_key *); +void check_light_weight(struct inode *inode, struct inode *parent); + +/** + * reiser4_lookup_common - lookup of inode operations + * @parent: inode of directory to lookup into + * @dentry: name to look for + * @flags: + * + * This is common implementation of vfs's lookup method of struct + * inode_operations. + */ +struct dentry *reiser4_lookup_common(struct inode *parent, + struct dentry *dentry, + unsigned int flags) +{ + reiser4_context *ctx; + int result; + struct dentry *new; + struct inode *inode; + reiser4_dir_entry_desc entry; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return (struct dentry *)ctx; + + /* set up operations on dentry. */ + dentry->d_op = &get_super_private(parent->i_sb)->ops.dentry; + + result = reiser4_lookup_name(parent, dentry, &entry.key); + if (result) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + if (result == -ENOENT) { + /* object not found */ + if (!IS_DEADDIR(parent)) + d_add(dentry, NULL); + return NULL; + } + return ERR_PTR(result); + } + + inode = reiser4_iget(parent->i_sb, &entry.key, 0); + if (IS_ERR(inode)) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return ERR_PTR(PTR_ERR(inode)); + } + + /* success */ + check_light_weight(inode, parent); + new = d_splice_alias(inode, dentry); + reiser4_iget_complete(inode); + + /* prevent balance_dirty_pages() from being called: we don't want to + * do this under directory i_mutex. */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return new; +} + +static reiser4_block_nr common_estimate_link(struct inode *parent, + struct inode *object); +int reiser4_update_dir(struct inode *); + +static inline void reiser4_check_immutable(struct inode *inode) +{ + do { + if (!reiser4_inode_get_flag(inode, REISER4_IMMUTABLE)) + break; + yield(); + } while (1); +} + +/** + * reiser4_link_common - link of inode operations + * @existing: dentry of object which is to get new name + * @parent: directory where new name is to be created + * @newname: new name + * + * This is common implementation of vfs's link method of struct + * inode_operations. + */ +int reiser4_link_common(struct dentry *existing, struct inode *parent, + struct dentry *newname) +{ + reiser4_context *ctx; + int result; + struct inode *object; + dir_plugin *parent_dplug; + reiser4_dir_entry_desc entry; + reiser4_object_create_data data; + reiser4_block_nr reserve; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + assert("nikita-1431", existing != NULL); + assert("nikita-1432", parent != NULL); + assert("nikita-1433", newname != NULL); + + object = existing->d_inode; + assert("nikita-1434", object != NULL); + + /* check for race with create_object() */ + reiser4_check_immutable(object); + + parent_dplug = inode_dir_plugin(parent); + + memset(&entry, 0, sizeof entry); + entry.obj = object; + + data.mode = object->i_mode; + data.id = inode_file_plugin(object)->h.id; + + reserve = common_estimate_link(parent, existing->d_inode); + if ((__s64) reserve < 0) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return reserve; + } + + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return RETERR(-ENOSPC); + } + + /* + * Subtle race handling: sys_link() doesn't take i_mutex on @parent. It + * means that link(2) can race against unlink(2) or rename(2), and + * inode is dead (->i_nlink == 0) when reiser4_link() is entered. + * + * For such inode we have to undo special processing done in + * reiser4_unlink() viz. creation of safe-link. + */ + if (unlikely(object->i_nlink == 0)) { + result = safe_link_del(reiser4_tree_by_inode(object), + get_inode_oid(object), SAFE_UNLINK); + if (result != 0) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + } + + /* increment nlink of @existing and update its stat data */ + result = reiser4_add_nlink(object, parent, 1); + if (result == 0) { + /* add entry to the parent */ + result = + parent_dplug->add_entry(parent, newname, &data, &entry); + if (result != 0) { + /* failed to add entry to the parent, decrement nlink + of @existing */ + reiser4_del_nlink(object, parent, 1); + /* + * now, if that failed, we have a file with too big + * nlink---space leak, much better than directory + * entry pointing to nowhere + */ + } + } + if (result == 0) { + atomic_inc(&object->i_count); + /* + * Upon successful completion, link() shall mark for update + * the st_ctime field of the file. Also, the st_ctime and + * st_mtime fields of the directory that contains the new + * entry shall be marked for update. --SUS + */ + result = reiser4_update_dir(parent); + } + if (result == 0) + d_instantiate(newname, existing->d_inode); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +static int unlink_check_and_grab(struct inode *parent, struct dentry *victim); + +/** + * reiser4_unlink_common - unlink of inode operations + * @parent: inode of directory to remove name from + * @victim: name to be removed + * + * This is common implementation of vfs's unlink method of struct + * inode_operations. + */ +int reiser4_unlink_common(struct inode *parent, struct dentry *victim) +{ + reiser4_context *ctx; + int result; + struct inode *object; + file_plugin *fplug; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + object = victim->d_inode; + fplug = inode_file_plugin(object); + assert("nikita-2882", fplug->detach != NULL); + + result = unlink_check_and_grab(parent, victim); + if (result != 0) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + result = fplug->detach(object, parent); + if (result == 0) { + dir_plugin *parent_dplug; + reiser4_dir_entry_desc entry; + + parent_dplug = inode_dir_plugin(parent); + memset(&entry, 0, sizeof entry); + + /* first, delete directory entry */ + result = parent_dplug->rem_entry(parent, victim, &entry); + if (result == 0) { + /* + * if name was removed successfully, we _have_ to + * return 0 from this function, because upper level + * caller (vfs_{rmdir,unlink}) expect this. + * + * now that directory entry is removed, update + * stat-data + */ + reiser4_del_nlink(object, parent, 1); + /* + * Upon successful completion, unlink() shall mark for + * update the st_ctime and st_mtime fields of the + * parent directory. Also, if the file's link count is + * not 0, the st_ctime field of the file shall be + * marked for update. --SUS + */ + reiser4_update_dir(parent); + /* add safe-link for this file */ + if (object->i_nlink == 0) + safe_link_add(object, SAFE_UNLINK); + } + } + + if (unlikely(result != 0)) { + if (result != -ENOMEM) + warning("nikita-3398", "Cannot unlink %llu (%i)", + (unsigned long long)get_inode_oid(object), + result); + /* if operation failed commit pending inode modifications to + * the stat-data */ + reiser4_update_sd(object); + reiser4_update_sd(parent); + } + + reiser4_release_reserved(object->i_sb); + + /* @object's i_ctime was updated by ->rem_link() method(). */ + + /* @victim can be already removed from the disk by this time. Inode is + then marked so that iput() wouldn't try to remove stat data. But + inode itself is still there. + */ + + /* + * we cannot release directory semaphore here, because name has + * already been deleted, but dentry (@victim) still exists. Prevent + * balance_dirty_pages() from being called on exiting this context: we + * don't want to do this under directory i_mutex. + */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/** + * reiser4_symlink_common - symlink of inode operations + * @parent: inode of parent directory + * @dentry: dentry of object to be created + * @linkname: string symlink is to contain + * + * This is common implementation of vfs's symlink method of struct + * inode_operations. + * Creates object using file plugin SYMLINK_FILE_PLUGIN_ID. + */ +int reiser4_symlink_common(struct inode *parent, struct dentry *dentry, + const char *linkname) +{ + reiser4_object_create_data data; + + memset(&data, 0, sizeof data); + data.name = linkname; + data.id = SYMLINK_FILE_PLUGIN_ID; + data.mode = S_IFLNK | S_IRWXUGO; + return create_vfs_object(parent, dentry, &data); +} + +/** + * reiser4_mkdir_common - mkdir of inode operations + * @parent: inode of parent directory + * @dentry: dentry of object to be created + * @mode: the permissions to use + * + * This is common implementation of vfs's mkdir method of struct + * inode_operations. + * Creates object using file plugin DIRECTORY_FILE_PLUGIN_ID. + */ +int reiser4_mkdir_common(struct inode *parent, struct dentry *dentry, umode_t mode) +{ + reiser4_object_create_data data; + + memset(&data, 0, sizeof data); + data.mode = S_IFDIR | mode; + data.id = DIRECTORY_FILE_PLUGIN_ID; + return create_vfs_object(parent, dentry, &data); +} + +/** + * reiser4_mknod_common - mknod of inode operations + * @parent: inode of parent directory + * @dentry: dentry of object to be created + * @mode: the permissions to use and file type + * @rdev: minor and major of new device file + * + * This is common implementation of vfs's mknod method of struct + * inode_operations. + * Creates object using file plugin SPECIAL_FILE_PLUGIN_ID. + */ +int reiser4_mknod_common(struct inode *parent, struct dentry *dentry, + umode_t mode, dev_t rdev) +{ + reiser4_object_create_data data; + + memset(&data, 0, sizeof data); + data.mode = mode; + data.rdev = rdev; + data.id = SPECIAL_FILE_PLUGIN_ID; + return create_vfs_object(parent, dentry, &data); +} + +/* + * implementation of vfs's rename method of struct inode_operations for typical + * directory is in inode_ops_rename.c + */ + +/** + * reiser4_get_link_common: ->get_link() of inode_operations + * @dentry: dentry of symlink + * + * Assumes that inode's i_private points to the content of symbolic link. + */ +const char *reiser4_get_link_common(struct dentry *dentry, + struct inode *inode, + struct delayed_call *done) +{ + if (!dentry) + return ERR_PTR(-ECHILD); + + assert("vs-851", S_ISLNK(dentry->d_inode->i_mode)); + + if (!dentry->d_inode->i_private || + !reiser4_inode_get_flag(dentry->d_inode, REISER4_GENERIC_PTR_USED)) + return ERR_PTR(RETERR(-EINVAL)); + + return dentry->d_inode->i_private; +} + +/** + * reiser4_permission_common - permission of inode operations + * @inode: inode to check permissions for + * @mask: mode bits to check permissions for + * @flags: + * + * Uses generic function to check for rwx permissions. + */ +int reiser4_permission_common(struct inode *inode, int mask) +{ + // generic_permission() says that it's rcu-aware... +#if 0 + if (mask & MAY_NOT_BLOCK) + return -ECHILD; +#endif + return generic_permission(inode, mask); +} + +static int setattr_reserve(reiser4_tree *); + +/* this is common implementation of vfs's setattr method of struct + inode_operations +*/ +int reiser4_setattr_common(struct dentry *dentry, struct iattr *attr) +{ + reiser4_context *ctx; + struct inode *inode; + int result; + + inode = dentry->d_inode; + result = setattr_prepare(dentry, attr); + if (result) + return result; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + assert("nikita-3119", !(attr->ia_valid & ATTR_SIZE)); + + /* + * grab disk space and call standard + * setattr_copy(); + * mark_inode_dirty(). + */ + result = setattr_reserve(reiser4_tree_by_inode(inode)); + if (!result) { + setattr_copy(inode, attr); + mark_inode_dirty(inode); + result = reiser4_update_sd(inode); + } + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* this is common implementation of vfs's getattr method of struct + inode_operations +*/ +int reiser4_getattr_common(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) +{ + struct inode *obj; + + assert("nikita-2298", path != NULL); + assert("nikita-2299", stat != NULL); + + obj = d_inode(path->dentry); + + stat->dev = obj->i_sb->s_dev; + stat->ino = oid_to_uino(get_inode_oid(obj)); + stat->mode = obj->i_mode; + /* don't confuse userland with huge nlink. This is not entirely + * correct, because nlink_t is not necessary 16 bit signed. */ + stat->nlink = min(obj->i_nlink, (typeof(obj->i_nlink)) 0x7fff); + stat->uid = obj->i_uid; + stat->gid = obj->i_gid; + stat->rdev = obj->i_rdev; + stat->atime = obj->i_atime; + stat->mtime = obj->i_mtime; + stat->ctime = obj->i_ctime; + stat->size = obj->i_size; + stat->blocks = + (inode_get_bytes(obj) + VFS_BLKSIZE - 1) >> VFS_BLKSIZE_BITS; + /* "preferred" blocksize for efficient file system I/O */ + stat->blksize = get_super_private(obj->i_sb)->optimal_io_size; + + return 0; +} + +/* Estimate the maximum amount of nodes which might be allocated or changed on + typical new object creation. Typical creation consists of calling create + method of file plugin, adding directory entry to parent and update parent + directory's stat data. +*/ +static reiser4_block_nr estimate_create_vfs_object(struct inode *parent, + /* parent object */ + struct inode *object + /* object */) +{ + assert("vpf-309", parent != NULL); + assert("vpf-307", object != NULL); + + return + /* object creation estimation */ + inode_file_plugin(object)->estimate.create(object) + + /* stat data of parent directory estimation */ + inode_file_plugin(parent)->estimate.update(parent) + + /* adding entry estimation */ + inode_dir_plugin(parent)->estimate.add_entry(parent) + + /* to undo in the case of failure */ + inode_dir_plugin(parent)->estimate.rem_entry(parent); +} + +/* Create child in directory. + + . get object's plugin + . get fresh inode + . initialize inode + . add object's stat-data + . initialize object's directory + . add entry to the parent + . instantiate dentry + +*/ +static int do_create_vfs_child(reiser4_object_create_data * data,/* parameters + of new + object */ + struct inode **retobj) +{ + int result; + + struct dentry *dentry; /* parent object */ + struct inode *parent; /* new name */ + + dir_plugin *par_dir; /* directory plugin on the parent */ + dir_plugin *obj_dir; /* directory plugin on the new object */ + file_plugin *obj_plug; /* object plugin on the new object */ + struct inode *object; /* new object */ + reiser4_block_nr reserve; + + reiser4_dir_entry_desc entry; /* new directory entry */ + + assert("nikita-1420", data != NULL); + parent = data->parent; + dentry = data->dentry; + + assert("nikita-1418", parent != NULL); + assert("nikita-1419", dentry != NULL); + + /* check, that name is acceptable for parent */ + par_dir = inode_dir_plugin(parent); + if (par_dir->is_name_acceptable && + !par_dir->is_name_acceptable(parent, + dentry->d_name.name, + (int)dentry->d_name.len)) + return RETERR(-ENAMETOOLONG); + + result = 0; + obj_plug = file_plugin_by_id((int)data->id); + if (obj_plug == NULL) { + warning("nikita-430", "Cannot find plugin %i", data->id); + return RETERR(-ENOENT); + } + object = new_inode(parent->i_sb); + if (object == NULL) + return RETERR(-ENOMEM); + /* new_inode() initializes i_ino to "arbitrary" value. Reset it to 0, + * to simplify error handling: if some error occurs before i_ino is + * initialized with oid, i_ino should already be set to some + * distinguished value. */ + object->i_ino = 0; + + /* So that on error iput will be called. */ + *retobj = object; + + memset(&entry, 0, sizeof entry); + entry.obj = object; + + set_plugin(&reiser4_inode_data(object)->pset, PSET_FILE, + file_plugin_to_plugin(obj_plug)); + result = obj_plug->set_plug_in_inode(object, parent, data); + if (result) { + warning("nikita-431", "Cannot install plugin %i on %llx", + data->id, (unsigned long long)get_inode_oid(object)); + return result; + } + + /* reget plugin after installation */ + obj_plug = inode_file_plugin(object); + + if (obj_plug->create_object == NULL) { + return RETERR(-EPERM); + } + + /* if any of hash, tail, sd or permission plugins for newly created + object are not set yet set them here inheriting them from parent + directory + */ + assert("nikita-2070", obj_plug->adjust_to_parent != NULL); + result = obj_plug->adjust_to_parent(object, + parent, + object->i_sb->s_root->d_inode); + if (result == 0) + result = finish_pset(object); + if (result != 0) { + warning("nikita-432", "Cannot inherit from %llx to %llx", + (unsigned long long)get_inode_oid(parent), + (unsigned long long)get_inode_oid(object)); + return result; + } + + /* setup inode and file-operations for this inode */ + setup_inode_ops(object, data); + + /* call file plugin's method to initialize plugin specific part of + * inode */ + if (obj_plug->init_inode_data) + obj_plug->init_inode_data(object, data, 1/*create */); + + /* obtain directory plugin (if any) for new object. */ + obj_dir = inode_dir_plugin(object); + if (obj_dir != NULL && obj_dir->init == NULL) { + return RETERR(-EPERM); + } + + reiser4_inode_data(object)->locality_id = get_inode_oid(parent); + + reserve = estimate_create_vfs_object(parent, object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) { + return RETERR(-ENOSPC); + } + + /* mark inode `immutable'. We disable changes to the file being + created until valid directory entry for it is inserted. Otherwise, + if file were expanded and insertion of directory entry fails, we + have to remove file, but we only alloted enough space in + transaction to remove _empty_ file. 3.x code used to remove stat + data in different transaction thus possibly leaking disk space on + crash. This all only matters if it's possible to access file + without name, for example, by inode number + */ + reiser4_inode_set_flag(object, REISER4_IMMUTABLE); + + /* create empty object, this includes allocation of new objectid. For + directories this implies creation of dot and dotdot */ + assert("nikita-2265", reiser4_inode_get_flag(object, REISER4_NO_SD)); + + /* mark inode as `loaded'. From this point onward + reiser4_delete_inode() will try to remove its stat-data. */ + reiser4_inode_set_flag(object, REISER4_LOADED); + + result = obj_plug->create_object(object, parent, data); + if (result != 0) { + reiser4_inode_clr_flag(object, REISER4_IMMUTABLE); + if (result != -ENAMETOOLONG && result != -ENOMEM) + warning("nikita-2219", + "Failed to create sd for %llu", + (unsigned long long)get_inode_oid(object)); + return result; + } + + if (obj_dir != NULL) + result = obj_dir->init(object, parent, data); + if (result == 0) { + assert("nikita-434", !reiser4_inode_get_flag(object, + REISER4_NO_SD)); + /* insert inode into VFS hash table */ + insert_inode_hash(object); + /* create entry */ + result = par_dir->add_entry(parent, dentry, data, &entry); + if (result == 0) { + /* If O_CREAT is set and the file did not previously + exist, upon successful completion, open() shall + mark for update the st_atime, st_ctime, and + st_mtime fields of the file and the st_ctime and + st_mtime fields of the parent directory. --SUS + */ + object->i_ctime = current_time(object); + reiser4_update_dir(parent); + } + if (result != 0) + /* cleanup failure to add entry */ + obj_plug->detach(object, parent); + } else if (result != -ENOMEM) + warning("nikita-2219", "Failed to initialize dir for %llu: %i", + (unsigned long long)get_inode_oid(object), result); + + /* + * update stat-data, committing all pending modifications to the inode + * fields. + */ + reiser4_update_sd(object); + if (result != 0) { + /* if everything was ok (result == 0), parent stat-data is + * already updated above (update_parent_dir()) */ + reiser4_update_sd(parent); + /* failure to create entry, remove object */ + obj_plug->delete_object(object); + } + + /* file has name now, clear immutable flag */ + reiser4_inode_clr_flag(object, REISER4_IMMUTABLE); + + /* on error, iput() will call ->delete_inode(). We should keep track + of the existence of stat-data for this inode and avoid attempt to + remove it in reiser4_delete_inode(). This is accomplished through + REISER4_NO_SD bit in inode.u.reiser4_i.plugin.flags + */ + return result; +} + +/* this is helper for common implementations of reiser4_mkdir, reiser4_create, + reiser4_mknod and reiser4_symlink +*/ +static int +create_vfs_object(struct inode *parent, + struct dentry *dentry, reiser4_object_create_data * data) +{ + reiser4_context *ctx; + int result; + struct inode *child; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + context_set_commit_async(ctx); + + data->parent = parent; + data->dentry = dentry; + child = NULL; + result = do_create_vfs_child(data, &child); + if (unlikely(result != 0)) { + if (child != NULL) { + /* for unlinked inode accounting in iput() */ + clear_nlink(child); + reiser4_make_bad_inode(child); + iput(child); + } + } else + d_instantiate(dentry, child); + + reiser4_exit_context(ctx); + return result; +} + +/** + * helper for link_common. Estimate disk space necessary to add a link + * from @parent to @object + */ +static reiser4_block_nr common_estimate_link(struct inode *parent /* parent + * directory + */, + struct inode *object /* object to + * which new + * link is + * being + * created */) +{ + reiser4_block_nr res = 0; + file_plugin *fplug; + dir_plugin *dplug; + + assert("vpf-317", object != NULL); + assert("vpf-318", parent != NULL); + + fplug = inode_file_plugin(object); + dplug = inode_dir_plugin(parent); + /* VS-FIXME-HANS: why do we do fplug->estimate.update(object) twice + * instead of multiplying by 2? */ + /* reiser4_add_nlink(object) */ + res += fplug->estimate.update(object); + /* add_entry(parent) */ + res += dplug->estimate.add_entry(parent); + /* reiser4_del_nlink(object) */ + res += fplug->estimate.update(object); + /* update_dir(parent) */ + res += inode_file_plugin(parent)->estimate.update(parent); + /* safe-link */ + res += estimate_one_item_removal(reiser4_tree_by_inode(object)); + + return res; +} + +/* Estimate disk space necessary to remove a link between @parent and + @object. +*/ +static reiser4_block_nr estimate_unlink(struct inode *parent /* parent + * directory */, + struct inode *object /* object to which + * new link is + * being created + */) +{ + reiser4_block_nr res = 0; + file_plugin *fplug; + dir_plugin *dplug; + + assert("vpf-317", object != NULL); + assert("vpf-318", parent != NULL); + + fplug = inode_file_plugin(object); + dplug = inode_dir_plugin(parent); + + /* rem_entry(parent) */ + res += dplug->estimate.rem_entry(parent); + /* reiser4_del_nlink(object) */ + res += fplug->estimate.update(object); + /* update_dir(parent) */ + res += inode_file_plugin(parent)->estimate.update(parent); + /* fplug->unlink */ + res += fplug->estimate.unlink(object, parent); + /* safe-link */ + res += estimate_one_insert_item(reiser4_tree_by_inode(object)); + + return res; +} + +/* helper for reiser4_unlink_common. Estimate and grab space for unlink. */ +static int unlink_check_and_grab(struct inode *parent, struct dentry *victim) +{ + file_plugin *fplug; + struct inode *child; + int result; + + result = 0; + child = victim->d_inode; + fplug = inode_file_plugin(child); + + /* check for race with create_object() */ + reiser4_check_immutable(child); + + /* object being deleted should have stat data */ + assert("vs-949", !reiser4_inode_get_flag(child, REISER4_NO_SD)); + + /* ask object plugin */ + if (fplug->can_rem_link != NULL && !fplug->can_rem_link(child)) + return RETERR(-ENOTEMPTY); + + result = (int)estimate_unlink(parent, child); + if (result < 0) + return result; + + return reiser4_grab_reserved(child->i_sb, result, BA_CAN_COMMIT); +} + +/* helper for reiser4_setattr_common */ +static int setattr_reserve(reiser4_tree * tree) +{ + assert("vs-1096", is_grab_enabled(get_current_context())); + return reiser4_grab_space(estimate_one_insert_into_item(tree), + BA_CAN_COMMIT); +} + +/* helper function. Standards require that for many file-system operations + on success ctime and mtime of parent directory is to be updated. */ +int reiser4_update_dir(struct inode *dir) +{ + assert("nikita-2525", dir != NULL); + + dir->i_ctime = dir->i_mtime = current_time(dir); + return reiser4_update_sd(dir); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/inode_ops_rename.c b/fs/reiser4/plugin/inode_ops_rename.c new file mode 100644 index 000000000000..bedc86fd69ae --- /dev/null +++ b/fs/reiser4/plugin/inode_ops_rename.c @@ -0,0 +1,958 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "../inode.h" +#include "../safe_link.h" + +static const char *possible_leak = "Possible disk space leak."; + +/* re-bind existing name at @from_coord in @from_dir to point to @to_inode. + + Helper function called from hashed_rename() */ +static int replace_name(struct inode *to_inode, /* inode where @from_coord is + * to be re-targeted at */ + struct inode *from_dir, /* directory where @from_coord + * lives */ + struct inode *from_inode, /* inode @from_coord + * originally point to */ + coord_t *from_coord, /* where directory entry is in + * the tree */ + lock_handle * from_lh/* lock handle on @from_coord */) +{ + item_plugin *from_item; + int result; + znode *node; + + coord_clear_iplug(from_coord); + node = from_coord->node; + result = zload(node); + if (result != 0) + return result; + from_item = item_plugin_by_coord(from_coord); + if (plugin_of_group(item_plugin_by_coord(from_coord), + DIR_ENTRY_ITEM_TYPE)) { + reiser4_key to_key; + + build_sd_key(to_inode, &to_key); + + /* everything is found and prepared to change directory entry + at @from_coord to point to @to_inode. + + @to_inode is just about to get new name, so bump its link + counter. + + */ + result = reiser4_add_nlink(to_inode, from_dir, 0); + if (result != 0) { + /* Don't issue warning: this may be plain -EMLINK */ + zrelse(node); + return result; + } + + result = + from_item->s.dir.update_key(from_coord, &to_key, from_lh); + if (result != 0) { + reiser4_del_nlink(to_inode, from_dir, 0); + zrelse(node); + return result; + } + + /* @from_inode just lost its name, he-he. + + If @from_inode was directory, it contained dotdot pointing + to @from_dir. @from_dir i_nlink will be decreased when + iput() will be called on @from_inode. + + If file-system is not ADG (hard-links are + supported on directories), iput(from_inode) will not remove + @from_inode, and thus above is incorrect, but hard-links on + directories are problematic in many other respects. + */ + result = reiser4_del_nlink(from_inode, from_dir, 0); + if (result != 0) { + warning("nikita-2330", + "Cannot remove link from source: %i. %s", + result, possible_leak); + } + /* Has to return success, because entry is already + * modified. */ + result = 0; + + /* NOTE-NIKITA consider calling plugin method in stead of + accessing inode fields directly. */ + from_dir->i_mtime = current_time(from_dir); + } else { + warning("nikita-2326", "Unexpected item type"); + result = RETERR(-EIO); + } + zrelse(node); + return result; +} + +/* add new entry pointing to @inode into @dir at @coord, locked by @lh + + Helper function used by hashed_rename(). */ +static int add_name(struct inode *inode, /* inode where @coord is to be + * re-targeted at */ + struct inode *dir, /* directory where @coord lives */ + struct dentry *name, /* new name */ + coord_t *coord, /* where directory entry is in the tree + */ + lock_handle * lh, /* lock handle on @coord */ + int is_dir/* true, if @inode is directory */) +{ + int result; + reiser4_dir_entry_desc entry; + + assert("nikita-2333", lh->node == coord->node); + assert("nikita-2334", is_dir == S_ISDIR(inode->i_mode)); + + memset(&entry, 0, sizeof entry); + entry.obj = inode; + /* build key of directory entry description */ + inode_dir_plugin(dir)->build_entry_key(dir, &name->d_name, &entry.key); + + /* ext2 does this in different order: first inserts new entry, + then increases directory nlink. We don't want do this, + because reiser4_add_nlink() calls ->add_link() plugin + method that can fail for whatever reason, leaving as with + cleanup problems. + */ + /* @inode is getting new name */ + reiser4_add_nlink(inode, dir, 0); + /* create @new_name in @new_dir pointing to + @old_inode */ + result = WITH_COORD(coord, + inode_dir_item_plugin(dir)->s.dir.add_entry(dir, + coord, + lh, + name, + &entry)); + if (result != 0) { + int result2; + result2 = reiser4_del_nlink(inode, dir, 0); + if (result2 != 0) { + warning("nikita-2327", + "Cannot drop link on %lli %i. %s", + (unsigned long long)get_inode_oid(inode), + result2, possible_leak); + } + } else + INODE_INC_FIELD(dir, i_size); + return result; +} + +static reiser4_block_nr estimate_rename(struct inode *old_dir, /* directory + * where @old is + * located */ + struct dentry *old_name,/* old name */ + struct inode *new_dir, /* directory + * where @new is + * located */ + struct dentry *new_name /* new name */) +{ + reiser4_block_nr res1, res2; + dir_plugin * p_parent_old, *p_parent_new; + file_plugin * p_child_old, *p_child_new; + + assert("vpf-311", old_dir != NULL); + assert("vpf-312", new_dir != NULL); + assert("vpf-313", old_name != NULL); + assert("vpf-314", new_name != NULL); + + p_parent_old = inode_dir_plugin(old_dir); + p_parent_new = inode_dir_plugin(new_dir); + p_child_old = inode_file_plugin(old_name->d_inode); + if (new_name->d_inode) + p_child_new = inode_file_plugin(new_name->d_inode); + else + p_child_new = NULL; + + /* find_entry - can insert one leaf. */ + res1 = res2 = 1; + + /* replace_name */ + { + /* reiser4_add_nlink(p_child_old) and + * reiser4_del_nlink(p_child_old) */ + res1 += 2 * p_child_old->estimate.update(old_name->d_inode); + /* update key */ + res1 += 1; + /* reiser4_del_nlink(p_child_new) */ + if (p_child_new) + res1 += p_child_new->estimate.update(new_name->d_inode); + } + + /* else add_name */ + { + /* reiser4_add_nlink(p_parent_new) and + * reiser4_del_nlink(p_parent_new) */ + res2 += + 2 * inode_file_plugin(new_dir)->estimate.update(new_dir); + /* reiser4_add_nlink(p_parent_old) */ + res2 += p_child_old->estimate.update(old_name->d_inode); + /* add_entry(p_parent_new) */ + res2 += p_parent_new->estimate.add_entry(new_dir); + /* reiser4_del_nlink(p_parent_old) */ + res2 += p_child_old->estimate.update(old_name->d_inode); + } + + res1 = res1 < res2 ? res2 : res1; + + /* reiser4_write_sd(p_parent_new) */ + res1 += inode_file_plugin(new_dir)->estimate.update(new_dir); + + /* reiser4_write_sd(p_child_new) */ + if (p_child_new) + res1 += p_child_new->estimate.update(new_name->d_inode); + + /* hashed_rem_entry(p_parent_old) */ + res1 += p_parent_old->estimate.rem_entry(old_dir); + + /* reiser4_del_nlink(p_child_old) */ + res1 += p_child_old->estimate.update(old_name->d_inode); + + /* replace_name */ + { + /* reiser4_add_nlink(p_parent_dir_new) */ + res1 += inode_file_plugin(new_dir)->estimate.update(new_dir); + /* update_key */ + res1 += 1; + /* reiser4_del_nlink(p_parent_new) */ + res1 += inode_file_plugin(new_dir)->estimate.update(new_dir); + /* reiser4_del_nlink(p_parent_old) */ + res1 += inode_file_plugin(old_dir)->estimate.update(old_dir); + } + + /* reiser4_write_sd(p_parent_old) */ + res1 += inode_file_plugin(old_dir)->estimate.update(old_dir); + + /* reiser4_write_sd(p_child_old) */ + res1 += p_child_old->estimate.update(old_name->d_inode); + + return res1; +} + +static int hashed_rename_estimate_and_grab(struct inode *old_dir, /* directory + * where @old + * is located + */ + struct dentry *old_name,/* old name + */ + struct inode *new_dir, /* directory + * where @new + * is located + */ + struct dentry *new_name /* new name + */) +{ + reiser4_block_nr reserve; + + reserve = estimate_rename(old_dir, old_name, new_dir, new_name); + + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + return 0; +} + +/* check whether @old_inode and @new_inode can be moved within file system + * tree. This singles out attempts to rename pseudo-files, for example. */ +static int can_rename(struct inode *old_dir, struct inode *old_inode, + struct inode *new_dir, struct inode *new_inode) +{ + file_plugin *fplug; + dir_plugin *dplug; + + assert("nikita-3370", old_inode != NULL); + + dplug = inode_dir_plugin(new_dir); + fplug = inode_file_plugin(old_inode); + + if (dplug == NULL) + return RETERR(-ENOTDIR); + else if (new_dir->i_op->create == NULL) + return RETERR(-EPERM); + else if (!fplug->can_add_link(old_inode)) + return RETERR(-EMLINK); + else if (new_inode != NULL) { + fplug = inode_file_plugin(new_inode); + if (fplug->can_rem_link != NULL && + !fplug->can_rem_link(new_inode)) + return RETERR(-EBUSY); + } + return 0; +} + +int reiser4_find_entry(struct inode *, struct dentry *, lock_handle * , + znode_lock_mode, reiser4_dir_entry_desc *); +int reiser4_update_dir(struct inode *); + +/* this is common implementation of vfs's rename2 method of struct + inode_operations + See comments in the body. + + It is arguable that this function can be made generic so, that it + will be applicable to any kind of directory plugin that deals with + directories composed out of directory entries. The only obstacle + here is that we don't have any data-type to represent directory + entry. This should be re-considered when more than one different + directory plugin will be implemented. +*/ +int reiser4_rename2_common(struct inode *old_dir /* directory where @old + * is located */ , + struct dentry *old_name /* old name */ , + struct inode *new_dir /* directory where @new + * is located */ , + struct dentry *new_name /* new name */ , + unsigned flags /* specific flags */) +{ + /* From `The Open Group Base Specifications Issue 6' + + If either the old or new argument names a symbolic link, rename() + shall operate on the symbolic link itself, and shall not resolve + the last component of the argument. If the old argument and the new + argument resolve to the same existing file, rename() shall return + successfully and perform no other action. + + [this is done by VFS: vfs_rename()] + + If the old argument points to the pathname of a file that is not a + directory, the new argument shall not point to the pathname of a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the link named by the new argument exists, it shall + be removed and old renamed to new. In this case, a link named new + shall remain visible to other processes throughout the renaming + operation and refer either to the file referred to by new or old + before the operation began. + + [we should assure this] + + Write access permission is required for + both the directory containing old and the directory containing new. + + [checked by VFS: vfs_rename->may_delete(), may_create()] + + If the old argument points to the pathname of a directory, the new + argument shall not point to the pathname of a file that is not a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the directory named by the new argument exists, it + shall be removed and old renamed to new. In this case, a link named + new shall exist throughout the renaming operation and shall refer + either to the directory referred to by new or old before the + operation began. + + [we should assure this] + + If new names an existing directory, it shall be + required to be an empty directory. + + [we should check this] + + If the old argument points to a pathname of a symbolic link, the + symbolic link shall be renamed. If the new argument points to a + pathname of a symbolic link, the symbolic link shall be removed. + + The new pathname shall not contain a path prefix that names + old. Write access permission is required for the directory + containing old and the directory containing new. If the old + argument points to the pathname of a directory, write access + permission may be required for the directory named by old, and, if + it exists, the directory named by new. + + [checked by VFS: vfs_rename(), vfs_rename_dir()] + + If the link named by the new argument exists and the file's link + count becomes 0 when it is removed and no process has the file + open, the space occupied by the file shall be freed and the file + shall no longer be accessible. If one or more processes have the + file open when the last link is removed, the link shall be removed + before rename() returns, but the removal of the file contents shall + be postponed until all references to the file are closed. + + [iput() handles this, but we can do this manually, a la + reiser4_unlink()] + + Upon successful completion, rename() shall mark for update the + st_ctime and st_mtime fields of the parent directory of each file. + + [N/A] + + */ + + /* From Documentation/filesystems/vfs.txt: + + rename2: this has an additional flags argument compared to rename. + f no flags are supported by the filesystem then this method + need not be implemented. If some flags are supported then the + filesystem must return -EINVAL for any unsupported or unknown + flags. Currently the following flags are implemented: + (1) RENAME_NOREPLACE: this flag indicates that if the target + of the rename exists the rename should fail with -EEXIST + instead of replacing the target. The VFS already checks for + existence, so for local filesystems the RENAME_NOREPLACE + implementation is equivalent to plain rename. + (2) RENAME_EXCHANGE: exchange source and target. Both must + exist; this is checked by the VFS. Unlike plain rename, + source and target may be of different type. + */ + + static const unsigned supported_flags = RENAME_NOREPLACE; + + reiser4_context *ctx; + int result; + int is_dir; /* is @old_name directory */ + + struct inode *old_inode; + struct inode *new_inode; + coord_t *new_coord; + + struct reiser4_dentry_fsdata *new_fsdata; + dir_plugin *dplug; + file_plugin *fplug; + + reiser4_dir_entry_desc *old_entry, *new_entry, *dotdot_entry; + lock_handle * new_lh, *dotdot_lh; + struct dentry *dotdot_name; + struct reiser4_dentry_fsdata *dataonstack; + + ctx = reiser4_init_context(old_dir->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + /* + * Check rename2() flags. + * + * "If some flags are supported then the filesystem must return + * -EINVAL for any unsupported or unknown flags." + * + * We support: + * - RENAME_NOREPLACE (no-op) + */ + if ((flags & supported_flags) != flags) + return RETERR(-EINVAL); + + old_entry = kzalloc(3 * sizeof(*old_entry) + 2 * sizeof(*new_lh) + + sizeof(*dotdot_name) + sizeof(*dataonstack), + reiser4_ctx_gfp_mask_get()); + if (!old_entry) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return RETERR(-ENOMEM); + } + + new_entry = old_entry + 1; + dotdot_entry = old_entry + 2; + new_lh = (lock_handle *)(old_entry + 3); + dotdot_lh = new_lh + 1; + dotdot_name = (struct dentry *)(new_lh + 2); + dataonstack = (struct reiser4_dentry_fsdata *)(dotdot_name + 1); + + assert("nikita-2318", old_dir != NULL); + assert("nikita-2319", new_dir != NULL); + assert("nikita-2320", old_name != NULL); + assert("nikita-2321", new_name != NULL); + + old_inode = old_name->d_inode; + new_inode = new_name->d_inode; + + dplug = inode_dir_plugin(old_dir); + fplug = NULL; + + new_fsdata = reiser4_get_dentry_fsdata(new_name); + if (IS_ERR(new_fsdata)) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return PTR_ERR(new_fsdata); + } + + new_coord = &new_fsdata->dec.entry_coord; + coord_clear_iplug(new_coord); + + is_dir = S_ISDIR(old_inode->i_mode); + + assert("nikita-3461", old_inode->i_nlink >= 1 + !!is_dir); + + /* if target is existing directory and it's not empty---return error. + + This check is done specifically, because is_dir_empty() requires + tree traversal and have to be done before locks are taken. + */ + if (is_dir && new_inode != NULL && is_dir_empty(new_inode) != 0) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return RETERR(-ENOTEMPTY); + } + + result = can_rename(old_dir, old_inode, new_dir, new_inode); + if (result != 0) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + result = hashed_rename_estimate_and_grab(old_dir, old_name, + new_dir, new_name); + if (result != 0) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + init_lh(new_lh); + + /* find entry for @new_name */ + result = reiser4_find_entry(new_dir, new_name, new_lh, ZNODE_WRITE_LOCK, + new_entry); + + if (IS_CBKERR(result)) { + done_lh(new_lh); + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + reiser4_seal_done(&new_fsdata->dec.entry_seal); + + /* add or replace name for @old_inode as @new_name */ + if (new_inode != NULL) { + /* target (@new_name) exists. */ + /* Not clear what to do with objects that are + both directories and files at the same time. */ + if (result == CBK_COORD_FOUND) { + result = replace_name(old_inode, + new_dir, + new_inode, new_coord, new_lh); + if (result == 0) + fplug = inode_file_plugin(new_inode); + } else if (result == CBK_COORD_NOTFOUND) { + /* VFS told us that @new_name is bound to existing + inode, but we failed to find directory entry. */ + warning("nikita-2324", "Target not found"); + result = RETERR(-ENOENT); + } + } else { + /* target (@new_name) doesn't exists. */ + if (result == CBK_COORD_NOTFOUND) + result = add_name(old_inode, + new_dir, + new_name, new_coord, new_lh, is_dir); + else if (result == CBK_COORD_FOUND) { + /* VFS told us that @new_name is "negative" dentry, + but we found directory entry. */ + warning("nikita-2331", "Target found unexpectedly"); + result = RETERR(-EIO); + } + } + + assert("nikita-3462", ergo(result == 0, + old_inode->i_nlink >= 2 + !!is_dir)); + + /* We are done with all modifications to the @new_dir, release lock on + node. */ + done_lh(new_lh); + + if (fplug != NULL) { + /* detach @new_inode from name-space */ + result = fplug->detach(new_inode, new_dir); + if (result != 0) + warning("nikita-2330", "Cannot detach %lli: %i. %s", + (unsigned long long)get_inode_oid(new_inode), + result, possible_leak); + } + + if (new_inode != NULL) + reiser4_update_sd(new_inode); + + if (result == 0) { + old_entry->obj = old_inode; + + dplug->build_entry_key(old_dir, + &old_name->d_name, &old_entry->key); + + /* At this stage new name was introduced for + @old_inode. @old_inode, @new_dir, and @new_inode i_nlink + counters were updated. + + We want to remove @old_name now. If @old_inode wasn't + directory this is simple. + */ + result = dplug->rem_entry(old_dir, old_name, old_entry); + if (result != 0 && result != -ENOMEM) { + warning("nikita-2335", + "Cannot remove old name: %i", result); + } else { + result = reiser4_del_nlink(old_inode, old_dir, 0); + if (result != 0 && result != -ENOMEM) { + warning("nikita-2337", + "Cannot drop link on old: %i", result); + } + } + + if (result == 0 && is_dir) { + /* @old_inode is directory. We also have to update + dotdot entry. */ + coord_t *dotdot_coord; + + memset(dataonstack, 0, sizeof(*dataonstack)); + memset(dotdot_entry, 0, sizeof(*dotdot_entry)); + dotdot_entry->obj = old_dir; + memset(dotdot_name, 0, sizeof(*dotdot_name)); + dotdot_name->d_name.name = ".."; + dotdot_name->d_name.len = 2; + /* + * allocate ->d_fsdata on the stack to avoid using + * reiser4_get_dentry_fsdata(). Locking is not needed, + * because dentry is private to the current thread. + */ + dotdot_name->d_fsdata = dataonstack; + init_lh(dotdot_lh); + + dotdot_coord = &dataonstack->dec.entry_coord; + coord_clear_iplug(dotdot_coord); + + result = reiser4_find_entry(old_inode, dotdot_name, + dotdot_lh, ZNODE_WRITE_LOCK, + dotdot_entry); + if (result == 0) { + /* replace_name() decreases i_nlink on + * @old_dir */ + result = replace_name(new_dir, + old_inode, + old_dir, + dotdot_coord, dotdot_lh); + } else + result = RETERR(-EIO); + done_lh(dotdot_lh); + } + } + reiser4_update_dir(new_dir); + reiser4_update_dir(old_dir); + reiser4_update_sd(old_inode); + if (result == 0) { + file_plugin *fplug; + + if (new_inode != NULL) { + /* add safe-link for target file (in case we removed + * last reference to the poor fellow */ + fplug = inode_file_plugin(new_inode); + if (new_inode->i_nlink == 0) + result = safe_link_add(new_inode, SAFE_UNLINK); + } + } + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +#if 0 +int reiser4_rename_common(struct inode *old_dir /* directory where @old + * is located */ , + struct dentry *old_name /* old name */ , + struct inode *new_dir /* directory where @new + * is located */ , + struct dentry *new_name/* new name */) +{ + /* From `The Open Group Base Specifications Issue 6' + + If either the old or new argument names a symbolic link, rename() + shall operate on the symbolic link itself, and shall not resolve + the last component of the argument. If the old argument and the new + argument resolve to the same existing file, rename() shall return + successfully and perform no other action. + + [this is done by VFS: vfs_rename()] + + If the old argument points to the pathname of a file that is not a + directory, the new argument shall not point to the pathname of a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the link named by the new argument exists, it shall + be removed and old renamed to new. In this case, a link named new + shall remain visible to other processes throughout the renaming + operation and refer either to the file referred to by new or old + before the operation began. + + [we should assure this] + + Write access permission is required for + both the directory containing old and the directory containing new. + + [checked by VFS: vfs_rename->may_delete(), may_create()] + + If the old argument points to the pathname of a directory, the new + argument shall not point to the pathname of a file that is not a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the directory named by the new argument exists, it + shall be removed and old renamed to new. In this case, a link named + new shall exist throughout the renaming operation and shall refer + either to the directory referred to by new or old before the + operation began. + + [we should assure this] + + If new names an existing directory, it shall be + required to be an empty directory. + + [we should check this] + + If the old argument points to a pathname of a symbolic link, the + symbolic link shall be renamed. If the new argument points to a + pathname of a symbolic link, the symbolic link shall be removed. + + The new pathname shall not contain a path prefix that names + old. Write access permission is required for the directory + containing old and the directory containing new. If the old + argument points to the pathname of a directory, write access + permission may be required for the directory named by old, and, if + it exists, the directory named by new. + + [checked by VFS: vfs_rename(), vfs_rename_dir()] + + If the link named by the new argument exists and the file's link + count becomes 0 when it is removed and no process has the file + open, the space occupied by the file shall be freed and the file + shall no longer be accessible. If one or more processes have the + file open when the last link is removed, the link shall be removed + before rename() returns, but the removal of the file contents shall + be postponed until all references to the file are closed. + + [iput() handles this, but we can do this manually, a la + reiser4_unlink()] + + Upon successful completion, rename() shall mark for update the + st_ctime and st_mtime fields of the parent directory of each file. + + [N/A] + + */ + reiser4_context *ctx; + int result; + int is_dir; /* is @old_name directory */ + struct inode *old_inode; + struct inode *new_inode; + reiser4_dir_entry_desc old_entry; + reiser4_dir_entry_desc new_entry; + coord_t *new_coord; + struct reiser4_dentry_fsdata *new_fsdata; + lock_handle new_lh; + dir_plugin *dplug; + file_plugin *fplug; + + ctx = reiser4_init_context(old_dir->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + assert("nikita-2318", old_dir != NULL); + assert("nikita-2319", new_dir != NULL); + assert("nikita-2320", old_name != NULL); + assert("nikita-2321", new_name != NULL); + + old_inode = old_name->d_inode; + new_inode = new_name->d_inode; + + dplug = inode_dir_plugin(old_dir); + fplug = NULL; + + new_fsdata = reiser4_get_dentry_fsdata(new_name); + if (IS_ERR(new_fsdata)) { + result = PTR_ERR(new_fsdata); + goto exit; + } + + new_coord = &new_fsdata->dec.entry_coord; + coord_clear_iplug(new_coord); + + is_dir = S_ISDIR(old_inode->i_mode); + + assert("nikita-3461", old_inode->i_nlink >= 1 + !!is_dir); + + /* if target is existing directory and it's not empty---return error. + + This check is done specifically, because is_dir_empty() requires + tree traversal and have to be done before locks are taken. + */ + if (is_dir && new_inode != NULL && is_dir_empty(new_inode) != 0) + return RETERR(-ENOTEMPTY); + + result = can_rename(old_dir, old_inode, new_dir, new_inode); + if (result != 0) + goto exit; + + result = hashed_rename_estimate_and_grab(old_dir, old_name, + new_dir, new_name); + if (result != 0) + goto exit; + + init_lh(&new_lh); + + /* find entry for @new_name */ + result = reiser4_find_entry(new_dir, new_name, &new_lh, + ZNODE_WRITE_LOCK, &new_entry); + + if (IS_CBKERR(result)) { + done_lh(&new_lh); + goto exit; + } + + reiser4_seal_done(&new_fsdata->dec.entry_seal); + + /* add or replace name for @old_inode as @new_name */ + if (new_inode != NULL) { + /* target (@new_name) exists. */ + /* Not clear what to do with objects that are + both directories and files at the same time. */ + if (result == CBK_COORD_FOUND) { + result = replace_name(old_inode, + new_dir, + new_inode, new_coord, &new_lh); + if (result == 0) + fplug = inode_file_plugin(new_inode); + } else if (result == CBK_COORD_NOTFOUND) { + /* VFS told us that @new_name is bound to existing + inode, but we failed to find directory entry. */ + warning("nikita-2324", "Target not found"); + result = RETERR(-ENOENT); + } + } else { + /* target (@new_name) doesn't exists. */ + if (result == CBK_COORD_NOTFOUND) + result = add_name(old_inode, + new_dir, + new_name, new_coord, &new_lh, is_dir); + else if (result == CBK_COORD_FOUND) { + /* VFS told us that @new_name is "negative" dentry, + but we found directory entry. */ + warning("nikita-2331", "Target found unexpectedly"); + result = RETERR(-EIO); + } + } + + assert("nikita-3462", ergo(result == 0, + old_inode->i_nlink >= 2 + !!is_dir)); + + /* We are done with all modifications to the @new_dir, release lock on + node. */ + done_lh(&new_lh); + + if (fplug != NULL) { + /* detach @new_inode from name-space */ + result = fplug->detach(new_inode, new_dir); + if (result != 0) + warning("nikita-2330", "Cannot detach %lli: %i. %s", + (unsigned long long)get_inode_oid(new_inode), + result, possible_leak); + } + + if (new_inode != NULL) + reiser4_update_sd(new_inode); + + if (result == 0) { + memset(&old_entry, 0, sizeof old_entry); + old_entry.obj = old_inode; + + dplug->build_entry_key(old_dir, + &old_name->d_name, &old_entry.key); + + /* At this stage new name was introduced for + @old_inode. @old_inode, @new_dir, and @new_inode i_nlink + counters were updated. + + We want to remove @old_name now. If @old_inode wasn't + directory this is simple. + */ + result = dplug->rem_entry(old_dir, old_name, &old_entry); + /*result = rem_entry_hashed(old_dir, old_name, &old_entry); */ + if (result != 0 && result != -ENOMEM) { + warning("nikita-2335", + "Cannot remove old name: %i", result); + } else { + result = reiser4_del_nlink(old_inode, old_dir, 0); + if (result != 0 && result != -ENOMEM) { + warning("nikita-2337", + "Cannot drop link on old: %i", result); + } + } + + if (result == 0 && is_dir) { + /* @old_inode is directory. We also have to update + dotdot entry. */ + coord_t *dotdot_coord; + lock_handle dotdot_lh; + struct dentry dotdot_name; + reiser4_dir_entry_desc dotdot_entry; + struct reiser4_dentry_fsdata dataonstack; + struct reiser4_dentry_fsdata *fsdata; + + memset(&dataonstack, 0, sizeof dataonstack); + memset(&dotdot_entry, 0, sizeof dotdot_entry); + dotdot_entry.obj = old_dir; + memset(&dotdot_name, 0, sizeof dotdot_name); + dotdot_name.d_name.name = ".."; + dotdot_name.d_name.len = 2; + /* + * allocate ->d_fsdata on the stack to avoid using + * reiser4_get_dentry_fsdata(). Locking is not needed, + * because dentry is private to the current thread. + */ + dotdot_name.d_fsdata = &dataonstack; + init_lh(&dotdot_lh); + + fsdata = &dataonstack; + dotdot_coord = &fsdata->dec.entry_coord; + coord_clear_iplug(dotdot_coord); + + result = reiser4_find_entry(old_inode, + &dotdot_name, + &dotdot_lh, + ZNODE_WRITE_LOCK, + &dotdot_entry); + if (result == 0) { + /* replace_name() decreases i_nlink on + * @old_dir */ + result = replace_name(new_dir, + old_inode, + old_dir, + dotdot_coord, &dotdot_lh); + } else + result = RETERR(-EIO); + done_lh(&dotdot_lh); + } + } + reiser4_update_dir(new_dir); + reiser4_update_dir(old_dir); + reiser4_update_sd(old_inode); + if (result == 0) { + file_plugin *fplug; + + if (new_inode != NULL) { + /* add safe-link for target file (in case we removed + * last reference to the poor fellow */ + fplug = inode_file_plugin(new_inode); + if (new_inode->i_nlink == 0) + result = safe_link_add(new_inode, SAFE_UNLINK); + } + } +exit: + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} +#endif diff --git a/fs/reiser4/plugin/item/Makefile b/fs/reiser4/plugin/item/Makefile new file mode 100644 index 000000000000..1bae6238722e --- /dev/null +++ b/fs/reiser4/plugin/item/Makefile @@ -0,0 +1,18 @@ +obj-$(CONFIG_REISER4_FS) += item_plugins.o + +item_plugins-objs := \ + item.o \ + static_stat.o \ + sde.o \ + cde.o \ + blackbox.o \ + internal.o \ + tail.o \ + ctail.o \ + extent.o \ + extent_item_ops.o \ + extent_file_ops.o \ + extent_flush_ops.o + + + diff --git a/fs/reiser4/plugin/item/acl.h b/fs/reiser4/plugin/item/acl.h new file mode 100644 index 000000000000..f26762a1c287 --- /dev/null +++ b/fs/reiser4/plugin/item/acl.h @@ -0,0 +1,66 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry. */ + +#if !defined( __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ ) +#define __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" + +#include +#include /* for struct dentry */ + +typedef struct directory_entry_format { + /* key of object stat-data. It's not necessary to store whole + key here, because it's always key of stat-data, so minor + packing locality and offset can be omitted here. But this + relies on particular key allocation scheme for stat-data, so, + for extensibility sake, whole key can be stored here. + + We store key as array of bytes, because we don't want 8-byte + alignment of dir entries. + */ + obj_key_id id; + /* file name. Null terminated string. */ + d8 name[0]; +} directory_entry_format; + +void print_de(const char *prefix, coord_t * coord); +int extract_key_de(const coord_t * coord, reiser4_key * key); +int update_key_de(const coord_t * coord, const reiser4_key * key, + lock_handle * lh); +char *extract_name_de(const coord_t * coord, char *buf); +unsigned extract_file_type_de(const coord_t * coord); +int add_entry_de(struct inode *dir, coord_t * coord, + lock_handle * lh, const struct dentry *name, + reiser4_dir_entry_desc * entry); +int rem_entry_de(struct inode *dir, const struct qstr *name, coord_t * coord, + lock_handle * lh, reiser4_dir_entry_desc * entry); +int max_name_len_de(const struct inode *dir); + +int de_rem_and_shrink(struct inode *dir, coord_t * coord, int length); + +char *extract_dent_name(const coord_t * coord, + directory_entry_format * dent, char *buf); + +#if REISER4_LARGE_KEY +#define DE_NAME_BUF_LEN (24) +#else +#define DE_NAME_BUF_LEN (16) +#endif + +/* __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/blackbox.c b/fs/reiser4/plugin/item/blackbox.c new file mode 100644 index 000000000000..f13ff64572c5 --- /dev/null +++ b/fs/reiser4/plugin/item/blackbox.c @@ -0,0 +1,142 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Black box item implementation */ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../coord.h" +#include "../../tree.h" +#include "../../lock.h" + +#include "blackbox.h" +#include "item.h" +#include "../plugin.h" + +int +store_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length) +{ + int result; + reiser4_item_data idata; + coord_t coord; + lock_handle lh; + + memset(&idata, 0, sizeof idata); + + idata.data = data; + idata.user = 0; + idata.length = length; + idata.iplug = item_plugin_by_id(BLACK_BOX_ID); + + init_lh(&lh); + result = insert_by_key(tree, key, + &idata, &coord, &lh, LEAF_LEVEL, CBK_UNIQUE); + + assert("nikita-3413", + ergo(result == 0, + WITH_COORD(&coord, + item_length_by_coord(&coord) == length))); + + done_lh(&lh); + return result; +} + +int +load_black_box(reiser4_tree * tree, + reiser4_key * key, void *data, int length, int exact) +{ + int result; + coord_t coord; + lock_handle lh; + + init_lh(&lh); + result = coord_by_key(tree, key, + &coord, &lh, ZNODE_READ_LOCK, + exact ? FIND_EXACT : FIND_MAX_NOT_MORE_THAN, + LEAF_LEVEL, LEAF_LEVEL, CBK_UNIQUE, NULL); + + if (result == 0) { + int ilen; + + result = zload(coord.node); + if (result == 0) { + ilen = item_length_by_coord(&coord); + if (ilen <= length) { + memcpy(data, item_body_by_coord(&coord), ilen); + unit_key_by_coord(&coord, key); + } else if (exact) { + /* + * item is larger than buffer provided by the + * user. Only issue a warning if @exact is + * set. If @exact is false, we are iterating + * over all safe-links and here we are reaching + * the end of the iteration. + */ + warning("nikita-3415", + "Wrong black box length: %i > %i", + ilen, length); + result = RETERR(-EIO); + } + zrelse(coord.node); + } + } + + done_lh(&lh); + return result; + +} + +int +update_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length) +{ + int result; + coord_t coord; + lock_handle lh; + + init_lh(&lh); + result = coord_by_key(tree, key, + &coord, &lh, ZNODE_READ_LOCK, + FIND_EXACT, + LEAF_LEVEL, LEAF_LEVEL, CBK_UNIQUE, NULL); + if (result == 0) { + int ilen; + + result = zload(coord.node); + if (result == 0) { + ilen = item_length_by_coord(&coord); + if (length <= ilen) { + memcpy(item_body_by_coord(&coord), data, + length); + } else { + warning("nikita-3437", + "Wrong black box length: %i < %i", + ilen, length); + result = RETERR(-EIO); + } + zrelse(coord.node); + } + } + + done_lh(&lh); + return result; + +} + +int kill_black_box(reiser4_tree * tree, const reiser4_key * key) +{ + return reiser4_cut_tree(tree, key, key, NULL, 1); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/blackbox.h b/fs/reiser4/plugin/item/blackbox.h new file mode 100644 index 000000000000..f5b7af382dc7 --- /dev/null +++ b/fs/reiser4/plugin/item/blackbox.h @@ -0,0 +1,33 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* "Black box" entry to fixed-width contain user supplied data */ + +#if !defined( __FS_REISER4_BLACK_BOX_H__ ) +#define __FS_REISER4_BLACK_BOX_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" + +extern int store_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length); +extern int load_black_box(reiser4_tree * tree, + reiser4_key * key, void *data, int length, int exact); +extern int kill_black_box(reiser4_tree * tree, const reiser4_key * key); +extern int update_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length); + +/* __FS_REISER4_BLACK_BOX_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/cde.c b/fs/reiser4/plugin/item/cde.c new file mode 100644 index 000000000000..e9afd144c39c --- /dev/null +++ b/fs/reiser4/plugin/item/cde.c @@ -0,0 +1,1004 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry implementation */ + +/* DESCRIPTION: + + This is "compound" directory item plugin implementation. This directory + item type is compound (as opposed to the "simple directory item" in + fs/reiser4/plugin/item/sde.[ch]), because it consists of several directory + entries. + + The reason behind this decision is disk space efficiency: all directory + entries inside the same directory have identical fragment in their + keys. This, of course, depends on key assignment policy. In our default key + assignment policy, all directory entries have the same locality which is + equal to the object id of their directory. + + Composing directory item out of several directory entries for the same + directory allows us to store said key fragment only once. That is, this is + some ad hoc form of key compression (stem compression) that is implemented + here, because general key compression is not supposed to be implemented in + v4.0. + + Another decision that was made regarding all directory item plugins, is + that they will store entry keys unaligned. This is for that sake of disk + space efficiency again. + + In should be noted, that storing keys unaligned increases CPU consumption, + at least on some architectures. + + Internal on-disk structure of the compound directory item is the following: + + HEADER cde_item_format. Here number of entries is stored. + ENTRY_HEADER_0 cde_unit_header. Here part of entry key and + ENTRY_HEADER_1 offset of entry body are stored. + ENTRY_HEADER_2 (basically two last parts of key) + ... + ENTRY_HEADER_N + ENTRY_BODY_0 directory_entry_format. Here part of stat data key and + ENTRY_BODY_1 NUL-terminated name are stored. + ENTRY_BODY_2 (part of statadta key in the + sence that since all SDs have + zero offset, this offset is not + stored on disk). + ... + ENTRY_BODY_N + + When it comes to the balancing, each directory entry in compound directory + item is unit, that is, something that can be cut from one item and pasted + into another item of the same type. Handling of unit cut and paste is major + reason for the complexity of code below. + +*/ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" +#include "../../coord.h" +#include "sde.h" +#include "cde.h" +#include "item.h" +#include "../node/node.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../carry.h" +#include "../../tree.h" +#include "../../inode.h" + +#include /* for struct inode */ +#include /* for struct dentry */ + +#if 0 +#define CHECKME(coord) \ +({ \ + const char *message; \ + coord_t dup; \ + \ + coord_dup_nocheck(&dup, (coord)); \ + dup.unit_pos = 0; \ + assert("nikita-2871", cde_check(&dup, &message) == 0); \ +}) +#else +#define CHECKME(coord) noop +#endif + +/* return body of compound directory item at @coord */ +static inline cde_item_format *formatted_at(const coord_t * coord) +{ + assert("nikita-1282", coord != NULL); + return item_body_by_coord(coord); +} + +/* return entry header at @coord */ +static inline cde_unit_header *header_at(const coord_t * + coord /* coord of item */ , + int idx /* index of unit */ ) +{ + assert("nikita-1283", coord != NULL); + return &formatted_at(coord)->entry[idx]; +} + +/* return number of units in compound directory item at @coord */ +static int units(const coord_t * coord /* coord of item */ ) +{ + return le16_to_cpu(get_unaligned(&formatted_at(coord)->num_of_entries)); +} + +/* return offset of the body of @idx-th entry in @coord */ +static unsigned int offset_of(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ ) +{ + if (idx < units(coord)) + return le16_to_cpu(get_unaligned(&header_at(coord, idx)->offset)); + else if (idx == units(coord)) + return item_length_by_coord(coord); + else + impossible("nikita-1308", "Wrong idx"); + return 0; +} + +/* set offset of the body of @idx-th entry in @coord */ +static void set_offset(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ , + unsigned int offset /* new offset */ ) +{ + put_unaligned(cpu_to_le16((__u16) offset), &header_at(coord, idx)->offset); +} + +static void adj_offset(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ , + int delta /* offset change */ ) +{ + d16 *doffset; + __u16 offset; + + doffset = &header_at(coord, idx)->offset; + offset = le16_to_cpu(get_unaligned(doffset)); + offset += delta; + put_unaligned(cpu_to_le16((__u16) offset), doffset); +} + +/* return pointer to @offset-th byte from the beginning of @coord */ +static char *address(const coord_t * coord /* coord of item */ , + int offset) +{ + return ((char *)item_body_by_coord(coord)) + offset; +} + +/* return pointer to the body of @idx-th entry in @coord */ +static directory_entry_format *entry_at(const coord_t * coord /* coord of + * item */ , + int idx /* index of unit */ ) +{ + return (directory_entry_format *) address(coord, + (int)offset_of(coord, idx)); +} + +/* return number of unit referenced by @coord */ +static int idx_of(const coord_t * coord /* coord of item */ ) +{ + assert("nikita-1285", coord != NULL); + return coord->unit_pos; +} + +/* find position where entry with @entry_key would be inserted into @coord */ +static int find(const coord_t * coord /* coord of item */ , + const reiser4_key * entry_key /* key to look for */ , + cmp_t * last /* result of last comparison */ ) +{ + int entries; + + int left; + int right; + + cde_unit_header *header; + + assert("nikita-1295", coord != NULL); + assert("nikita-1296", entry_key != NULL); + assert("nikita-1297", last != NULL); + + entries = units(coord); + left = 0; + right = entries - 1; + while (right - left >= REISER4_SEQ_SEARCH_BREAK) { + int median; + + median = (left + right) >> 1; + + header = header_at(coord, median); + *last = de_id_key_cmp(&header->hash, entry_key); + switch (*last) { + case LESS_THAN: + left = median; + break; + case GREATER_THAN: + right = median; + break; + case EQUAL_TO:{ + do { + median--; + header--; + } while (median >= 0 && + de_id_key_cmp(&header->hash, + entry_key) == EQUAL_TO); + return median + 1; + } + } + } + header = header_at(coord, left); + for (; left < entries; ++left, ++header) { + prefetch(header + 1); + *last = de_id_key_cmp(&header->hash, entry_key); + if (*last != LESS_THAN) + break; + } + if (left < entries) + return left; + else + return RETERR(-ENOENT); + +} + +/* expand @coord as to accommodate for insertion of @no new entries starting + from @pos, with total bodies size @size. */ +static int expand_item(const coord_t * coord /* coord of item */ , + int pos /* unit position */ , int no /* number of new + * units*/ , + int size /* total size of new units' data */ , + unsigned int data_size /* free space already reserved + * in the item for insertion */ ) +{ + int entries; + cde_unit_header *header; + char *dent; + int i; + + assert("nikita-1310", coord != NULL); + assert("nikita-1311", pos >= 0); + assert("nikita-1312", no > 0); + assert("nikita-1313", data_size >= no * sizeof(directory_entry_format)); + assert("nikita-1343", + item_length_by_coord(coord) >= + (int)(size + data_size + no * sizeof *header)); + + entries = units(coord); + + if (pos == entries) + dent = address(coord, size); + else + dent = (char *)entry_at(coord, pos); + /* place where new header will be in */ + header = header_at(coord, pos); + /* free space for new entry headers */ + memmove(header + no, header, + (unsigned)(address(coord, size) - (char *)header)); + /* if adding to the end initialise first new header */ + if (pos == entries) { + set_offset(coord, pos, (unsigned)size); + } + + /* adjust entry pointer and size */ + dent = dent + no * sizeof *header; + size += no * sizeof *header; + /* free space for new entries */ + memmove(dent + data_size, dent, + (unsigned)(address(coord, size) - dent)); + + /* increase counter */ + entries += no; + put_unaligned(cpu_to_le16((__u16) entries), &formatted_at(coord)->num_of_entries); + + /* [ 0 ... pos ] entries were shifted by no * ( sizeof *header ) + bytes. */ + for (i = 0; i <= pos; ++i) + adj_offset(coord, i, no * sizeof *header); + /* [ pos + no ... +\infty ) entries were shifted by ( no * + sizeof *header + data_size ) bytes */ + for (i = pos + no; i < entries; ++i) + adj_offset(coord, i, no * sizeof *header + data_size); + return 0; +} + +/* insert new @entry into item */ +static int expand(const coord_t * coord /* coord of item */ , + struct cde_entry * entry /* entry to insert */ , + int len /* length of @entry data */ , + int *pos /* position to insert */ , + reiser4_dir_entry_desc * dir_entry /* parameters for new + * entry */ ) +{ + cmp_t cmp_res; + int datasize; + + *pos = find(coord, &dir_entry->key, &cmp_res); + if (*pos < 0) + *pos = units(coord); + + datasize = sizeof(directory_entry_format); + if (is_longname(entry->name->name, entry->name->len)) + datasize += entry->name->len + 1; + + expand_item(coord, *pos, 1, item_length_by_coord(coord) - len, + datasize); + return 0; +} + +/* paste body of @entry into item */ +static int paste_entry(const coord_t * coord /* coord of item */ , + struct cde_entry * entry /* new entry */ , + int pos /* position to insert */ , + reiser4_dir_entry_desc * dir_entry /* parameters for + * new entry */ ) +{ + cde_unit_header *header; + directory_entry_format *dent; + const char *name; + int len; + + header = header_at(coord, pos); + dent = entry_at(coord, pos); + + build_de_id_by_key(&dir_entry->key, &header->hash); + build_inode_key_id(entry->obj, &dent->id); + /* AUDIT unsafe strcpy() operation! It should be replaced with + much less CPU hungry + memcpy( ( char * ) dent -> name, entry -> name -> name , entry -> name -> len ); + + Also a more major thing is that there should be a way to figure out + amount of space in dent -> name and be able to check that we are + not going to overwrite more than we supposed to */ + name = entry->name->name; + len = entry->name->len; + if (is_longname(name, len)) { + strcpy((unsigned char *)dent->name, name); + put_unaligned(0, &dent->name[len]); + } + return 0; +} + +/* estimate how much space is necessary in item to insert/paste set of entries + described in @data. */ +int estimate_cde(const coord_t * coord /* coord of item */ , + const reiser4_item_data * data /* parameters for new item */ ) +{ + struct cde_entry_data *e; + int result; + int i; + + e = (struct cde_entry_data *) data->data; + + assert("nikita-1288", e != NULL); + assert("nikita-1289", e->num_of_entries >= 0); + + if (coord == NULL) + /* insert */ + result = sizeof(cde_item_format); + else + /* paste */ + result = 0; + + result += e->num_of_entries * + (sizeof(cde_unit_header) + sizeof(directory_entry_format)); + for (i = 0; i < e->num_of_entries; ++i) { + const char *name; + int len; + + name = e->entry[i].name->name; + len = e->entry[i].name->len; + assert("nikita-2054", strlen(name) == len); + if (is_longname(name, len)) + result += len + 1; + } + ((reiser4_item_data *) data)->length = result; + return result; +} + +/* ->nr_units() method for this item plugin. */ +pos_in_node_t nr_units_cde(const coord_t * coord /* coord of item */ ) +{ + return units(coord); +} + +/* ->unit_key() method for this item plugin. */ +reiser4_key *unit_key_cde(const coord_t * coord /* coord of item */ , + reiser4_key * key /* resulting key */ ) +{ + assert("nikita-1452", coord != NULL); + assert("nikita-1345", idx_of(coord) < units(coord)); + assert("nikita-1346", key != NULL); + + item_key_by_coord(coord, key); + extract_key_from_de_id(extract_dir_id_from_key(key), + &header_at(coord, idx_of(coord))->hash, key); + return key; +} + +/* mergeable_cde(): implementation of ->mergeable() item method. + + Two directory items are mergeable iff they are from the same + directory. That simple. + +*/ +int mergeable_cde(const coord_t * p1 /* coord of first item */ , + const coord_t * p2 /* coord of second item */ ) +{ + reiser4_key k1; + reiser4_key k2; + + assert("nikita-1339", p1 != NULL); + assert("nikita-1340", p2 != NULL); + + return + (item_plugin_by_coord(p1) == item_plugin_by_coord(p2)) && + (extract_dir_id_from_key(item_key_by_coord(p1, &k1)) == + extract_dir_id_from_key(item_key_by_coord(p2, &k2))); + +} + +/* ->max_key_inside() method for this item plugin. */ +reiser4_key *max_key_inside_cde(const coord_t * coord /* coord of item */ , + reiser4_key * result /* resulting key */ ) +{ + assert("nikita-1342", coord != NULL); + + item_key_by_coord(coord, result); + set_key_ordering(result, get_key_ordering(reiser4_max_key())); + set_key_fulloid(result, get_key_fulloid(reiser4_max_key())); + set_key_offset(result, get_key_offset(reiser4_max_key())); + return result; +} + +/* @data contains data which are to be put into tree */ +int can_contain_key_cde(const coord_t * coord /* coord of item */ , + const reiser4_key * key /* key to check */ , + const reiser4_item_data * data /* parameters of new + * item/unit being + * created */ ) +{ + reiser4_key item_key; + + /* FIXME-VS: do not rely on anything but iplug field of @data. Only + data->iplug is initialized */ + assert("vs-457", data && data->iplug); +/* assert( "vs-553", data -> user == 0 );*/ + item_key_by_coord(coord, &item_key); + + return (item_plugin_by_coord(coord) == data->iplug) && + (extract_dir_id_from_key(&item_key) == + extract_dir_id_from_key(key)); +} + +#if REISER4_DEBUG +/* cde_check ->check() method for compressed directory items + + used for debugging, every item should have here the most complete + possible check of the consistency of the item that the inventor can + construct +*/ +int reiser4_check_cde(const coord_t * coord /* coord of item to check */, + const char **error /* where to store error message */) +{ + int i; + int result; + char *item_start; + char *item_end; + reiser4_key key; + + coord_t c; + + assert("nikita-1357", coord != NULL); + assert("nikita-1358", error != NULL); + + if (!ergo(coord->item_pos != 0, + is_dot_key(item_key_by_coord(coord, &key)))) { + *error = "CDE doesn't start with dot"; + return -1; + } + item_start = item_body_by_coord(coord); + item_end = item_start + item_length_by_coord(coord); + + coord_dup(&c, coord); + result = 0; + for (i = 0; i < units(coord); ++i) { + directory_entry_format *entry; + + if ((char *)(header_at(coord, i) + 1) > + item_end - units(coord) * sizeof *entry) { + *error = "CDE header is out of bounds"; + result = -1; + break; + } + entry = entry_at(coord, i); + if ((char *)entry < item_start + sizeof(cde_item_format)) { + *error = "CDE header is too low"; + result = -1; + break; + } + if ((char *)(entry + 1) > item_end) { + *error = "CDE header is too high"; + result = -1; + break; + } + } + + return result; +} +#endif + +/* ->init() method for this item plugin. */ +int init_cde(coord_t * coord /* coord of item */ , + coord_t * from UNUSED_ARG, reiser4_item_data * data /* structure used for insertion */ + UNUSED_ARG) +{ + put_unaligned(cpu_to_le16(0), &formatted_at(coord)->num_of_entries); + return 0; +} + +/* ->lookup() method for this item plugin. */ +lookup_result lookup_cde(const reiser4_key * key /* key to search for */ , + lookup_bias bias /* search bias */ , + coord_t * coord /* coord of item to lookup in */ ) +{ + cmp_t last_comp; + int pos; + + reiser4_key utmost_key; + + assert("nikita-1293", coord != NULL); + assert("nikita-1294", key != NULL); + + CHECKME(coord); + + if (keygt(item_key_by_coord(coord, &utmost_key), key)) { + coord->unit_pos = 0; + coord->between = BEFORE_UNIT; + return CBK_COORD_NOTFOUND; + } + pos = find(coord, key, &last_comp); + if (pos >= 0) { + coord->unit_pos = (int)pos; + switch (last_comp) { + case EQUAL_TO: + coord->between = AT_UNIT; + return CBK_COORD_FOUND; + case GREATER_THAN: + coord->between = BEFORE_UNIT; + return RETERR(-ENOENT); + case LESS_THAN: + default: + impossible("nikita-1298", "Broken find"); + return RETERR(-EIO); + } + } else { + coord->unit_pos = units(coord) - 1; + coord->between = AFTER_UNIT; + return (bias == + FIND_MAX_NOT_MORE_THAN) ? CBK_COORD_FOUND : + CBK_COORD_NOTFOUND; + } +} + +/* ->paste() method for this item plugin. */ +int paste_cde(coord_t * coord /* coord of item */ , + reiser4_item_data * data /* parameters of new unit being + * inserted */ , + carry_plugin_info * info UNUSED_ARG /* todo carry queue */ ) +{ + struct cde_entry_data *e; + int result; + int i; + + CHECKME(coord); + e = (struct cde_entry_data *) data->data; + + result = 0; + for (i = 0; i < e->num_of_entries; ++i) { + int pos; + int phantom_size; + + phantom_size = data->length; + if (units(coord) == 0) + phantom_size -= sizeof(cde_item_format); + + result = + expand(coord, e->entry + i, phantom_size, &pos, data->arg); + if (result != 0) + break; + result = paste_entry(coord, e->entry + i, pos, data->arg); + if (result != 0) + break; + } + CHECKME(coord); + return result; +} + +/* amount of space occupied by all entries starting from @idx both headers and + bodies. */ +static unsigned int part_size(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ ) +{ + assert("nikita-1299", coord != NULL); + assert("nikita-1300", idx < (int)units(coord)); + + return sizeof(cde_item_format) + + (idx + 1) * sizeof(cde_unit_header) + offset_of(coord, + idx + 1) - + offset_of(coord, 0); +} + +/* how many but not more than @want units of @source can be merged with + item in @target node. If pend == append - we try to append last item + of @target by first units of @source. If pend == prepend - we try to + "prepend" first item in @target by last units of @source. @target + node has @free_space bytes of free space. Total size of those units + are returned via @size */ +int can_shift_cde(unsigned free_space /* free space in item */ , + coord_t * coord /* coord of source item */ , + znode * target /* target node */ , + shift_direction pend /* shift direction */ , + unsigned *size /* resulting number of shifted bytes */ , + unsigned want /* maximal number of bytes to shift */ ) +{ + int shift; + + CHECKME(coord); + if (want == 0) { + *size = 0; + return 0; + } + + /* pend == SHIFT_LEFT <==> shifting to the left */ + if (pend == SHIFT_LEFT) { + for (shift = min((int)want - 1, units(coord)); shift >= 0; + --shift) { + *size = part_size(coord, shift); + if (target != NULL) + *size -= sizeof(cde_item_format); + if (*size <= free_space) + break; + } + shift = shift + 1; + } else { + int total_size; + + assert("nikita-1301", pend == SHIFT_RIGHT); + + total_size = item_length_by_coord(coord); + for (shift = units(coord) - want - 1; shift < units(coord) - 1; + ++shift) { + *size = total_size - part_size(coord, shift); + if (target == NULL) + *size += sizeof(cde_item_format); + if (*size <= free_space) + break; + } + shift = units(coord) - shift - 1; + } + if (shift == 0) + *size = 0; + CHECKME(coord); + return shift; +} + +/* ->copy_units() method for this item plugin. */ +void copy_units_cde(coord_t * target /* coord of target item */ , + coord_t * source /* coord of source item */ , + unsigned from /* starting unit */ , + unsigned count /* how many units to copy */ , + shift_direction where_is_free_space /* shift direction */ , + unsigned free_space /* free space in item */ ) +{ + char *header_from; + char *header_to; + + char *entry_from; + char *entry_to; + + int pos_in_target; + int data_size; + int data_delta; + int i; + + assert("nikita-1303", target != NULL); + assert("nikita-1304", source != NULL); + assert("nikita-1305", (int)from < units(source)); + assert("nikita-1307", (int)(from + count) <= units(source)); + + if (where_is_free_space == SHIFT_LEFT) { + assert("nikita-1453", from == 0); + pos_in_target = units(target); + } else { + assert("nikita-1309", (int)(from + count) == units(source)); + pos_in_target = 0; + memmove(item_body_by_coord(target), + (char *)item_body_by_coord(target) + free_space, + item_length_by_coord(target) - free_space); + } + + CHECKME(target); + CHECKME(source); + + /* expand @target */ + data_size = + offset_of(source, (int)(from + count)) - offset_of(source, + (int)from); + + if (units(target) == 0) + free_space -= sizeof(cde_item_format); + + expand_item(target, pos_in_target, (int)count, + (int)(item_length_by_coord(target) - free_space), + (unsigned)data_size); + + /* copy first @count units of @source into @target */ + data_delta = + offset_of(target, pos_in_target) - offset_of(source, (int)from); + + /* copy entries */ + entry_from = (char *)entry_at(source, (int)from); + entry_to = (char *)entry_at(source, (int)(from + count)); + memmove(entry_at(target, pos_in_target), entry_from, + (unsigned)(entry_to - entry_from)); + + /* copy headers */ + header_from = (char *)header_at(source, (int)from); + header_to = (char *)header_at(source, (int)(from + count)); + memmove(header_at(target, pos_in_target), header_from, + (unsigned)(header_to - header_from)); + + /* update offsets */ + for (i = pos_in_target; i < (int)(pos_in_target + count); ++i) + adj_offset(target, i, data_delta); + CHECKME(target); + CHECKME(source); +} + +/* ->cut_units() method for this item plugin. */ +int cut_units_cde(coord_t * coord /* coord of item */ , + pos_in_node_t from /* start unit pos */ , + pos_in_node_t to /* stop unit pos */ , + struct carry_cut_data *cdata UNUSED_ARG, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + char *header_from; + char *header_to; + + char *entry_from; + char *entry_to; + + int size; + int entry_delta; + int header_delta; + int i; + + unsigned count; + + CHECKME(coord); + + count = to - from + 1; + + assert("nikita-1454", coord != NULL); + assert("nikita-1455", (int)(from + count) <= units(coord)); + + if (smallest_removed) + unit_key_by_coord(coord, smallest_removed); + + if (new_first) { + coord_t next; + + /* not everything is cut from item head */ + assert("vs-1527", from == 0); + assert("vs-1528", to < units(coord) - 1); + + coord_dup(&next, coord); + next.unit_pos++; + unit_key_by_coord(&next, new_first); + } + + size = item_length_by_coord(coord); + if (count == (unsigned)units(coord)) { + return size; + } + + header_from = (char *)header_at(coord, (int)from); + header_to = (char *)header_at(coord, (int)(from + count)); + + entry_from = (char *)entry_at(coord, (int)from); + entry_to = (char *)entry_at(coord, (int)(from + count)); + + /* move headers */ + memmove(header_from, header_to, + (unsigned)(address(coord, size) - header_to)); + + header_delta = header_to - header_from; + + entry_from -= header_delta; + entry_to -= header_delta; + size -= header_delta; + + /* copy entries */ + memmove(entry_from, entry_to, + (unsigned)(address(coord, size) - entry_to)); + + entry_delta = entry_to - entry_from; + size -= entry_delta; + + /* update offsets */ + + for (i = 0; i < (int)from; ++i) + adj_offset(coord, i, -header_delta); + + for (i = from; i < units(coord) - (int)count; ++i) + adj_offset(coord, i, -header_delta - entry_delta); + + put_unaligned(cpu_to_le16((__u16) units(coord) - count), + &formatted_at(coord)->num_of_entries); + + if (from == 0) { + /* entries from head was removed - move remaining to right */ + memmove((char *)item_body_by_coord(coord) + + header_delta + entry_delta, item_body_by_coord(coord), + (unsigned)size); + if (REISER4_DEBUG) + memset(item_body_by_coord(coord), 0, + (unsigned)header_delta + entry_delta); + } else { + /* freed space is already at the end of item */ + if (REISER4_DEBUG) + memset((char *)item_body_by_coord(coord) + size, 0, + (unsigned)header_delta + entry_delta); + } + + return header_delta + entry_delta; +} + +int kill_units_cde(coord_t * coord /* coord of item */ , + pos_in_node_t from /* start unit pos */ , + pos_in_node_t to /* stop unit pos */ , + struct carry_kill_data *kdata UNUSED_ARG, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + return cut_units_cde(coord, from, to, NULL, smallest_removed, new_first); +} + +/* ->s.dir.extract_key() method for this item plugin. */ +int extract_key_cde(const coord_t * coord /* coord of item */ , + reiser4_key * key /* resulting key */ ) +{ + directory_entry_format *dent; + + assert("nikita-1155", coord != NULL); + assert("nikita-1156", key != NULL); + + dent = entry_at(coord, idx_of(coord)); + return extract_key_from_id(&dent->id, key); +} + +int +update_key_cde(const coord_t * coord, const reiser4_key * key, + lock_handle * lh UNUSED_ARG) +{ + directory_entry_format *dent; + obj_key_id obj_id; + int result; + + assert("nikita-2344", coord != NULL); + assert("nikita-2345", key != NULL); + + dent = entry_at(coord, idx_of(coord)); + result = build_obj_key_id(key, &obj_id); + if (result == 0) { + dent->id = obj_id; + znode_make_dirty(coord->node); + } + return 0; +} + +/* ->s.dir.extract_name() method for this item plugin. */ +char *extract_name_cde(const coord_t * coord /* coord of item */ , char *buf) +{ + directory_entry_format *dent; + + assert("nikita-1157", coord != NULL); + + dent = entry_at(coord, idx_of(coord)); + return extract_dent_name(coord, dent, buf); +} + +static int cde_bytes(int pasting, const reiser4_item_data * data) +{ + int result; + + result = data->length; + if (!pasting) + result -= sizeof(cde_item_format); + return result; +} + +/* ->s.dir.add_entry() method for this item plugin */ +int add_entry_cde(struct inode *dir /* directory object */ , + coord_t * coord /* coord of item */ , + lock_handle * lh /* lock handle for insertion */ , + const struct dentry *name /* name to insert */ , + reiser4_dir_entry_desc * dir_entry /* parameters of new + * directory entry */ ) +{ + reiser4_item_data data; + struct cde_entry entry; + struct cde_entry_data edata; + int result; + + assert("nikita-1656", coord->node == lh->node); + assert("nikita-1657", znode_is_write_locked(coord->node)); + + edata.num_of_entries = 1; + edata.entry = &entry; + + entry.dir = dir; + entry.obj = dir_entry->obj; + entry.name = &name->d_name; + + data.data = (char *)&edata; + data.user = 0; /* &edata is not user space */ + data.iplug = item_plugin_by_id(COMPOUND_DIR_ID); + data.arg = dir_entry; + assert("nikita-1302", data.iplug != NULL); + + result = is_dot_key(&dir_entry->key); + data.length = estimate_cde(result ? coord : NULL, &data); + + inode_add_bytes(dir, cde_bytes(result, &data)); + + if (result) + result = insert_by_coord(coord, &data, &dir_entry->key, lh, 0); + else + result = reiser4_resize_item(coord, &data, &dir_entry->key, + lh, 0); + return result; +} + +/* ->s.dir.rem_entry() */ +int rem_entry_cde(struct inode *dir /* directory of item */ , + const struct qstr *name, coord_t * coord /* coord of item */ , + lock_handle * lh UNUSED_ARG /* lock handle for + * removal */ , + reiser4_dir_entry_desc * entry UNUSED_ARG /* parameters of + * directory entry + * being removed */ ) +{ + coord_t shadow; + int result; + int length; + ON_DEBUG(char buf[DE_NAME_BUF_LEN]); + + assert("nikita-2870", strlen(name->name) == name->len); + assert("nikita-2869", + !strcmp(name->name, extract_name_cde(coord, buf))); + + length = sizeof(directory_entry_format) + sizeof(cde_unit_header); + if (is_longname(name->name, name->len)) + length += name->len + 1; + + if (inode_get_bytes(dir) < length) { + warning("nikita-2628", "Dir is broke: %llu: %llu", + (unsigned long long)get_inode_oid(dir), + inode_get_bytes(dir)); + + return RETERR(-EIO); + } + + /* cut_node() is supposed to take pointers to _different_ + coords, because it will modify them without respect to + possible aliasing. To work around this, create temporary copy + of @coord. + */ + coord_dup(&shadow, coord); + result = + kill_node_content(coord, &shadow, NULL, NULL, NULL, NULL, NULL, 0); + if (result == 0) { + inode_sub_bytes(dir, length); + } + return result; +} + +/* ->s.dir.max_name_len() method for this item plugin */ +int max_name_len_cde(const struct inode *dir /* directory */ ) +{ + return + reiser4_tree_by_inode(dir)->nplug->max_item_size() - + sizeof(directory_entry_format) - sizeof(cde_item_format) - + sizeof(cde_unit_header) - 2; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/cde.h b/fs/reiser4/plugin/item/cde.h new file mode 100644 index 000000000000..f599714415c8 --- /dev/null +++ b/fs/reiser4/plugin/item/cde.h @@ -0,0 +1,87 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Compound directory item. See cde.c for description. */ + +#if !defined( __FS_REISER4_PLUGIN_COMPRESSED_DE_H__ ) +#define __FS_REISER4_PLUGIN_COMPRESSED_DE_H__ + +#include "../../forward.h" +#include "../../kassign.h" +#include "../../dformat.h" + +#include /* for struct inode */ +#include /* for struct dentry, etc */ + +typedef struct cde_unit_header { + de_id hash; + d16 offset; +} cde_unit_header; + +typedef struct cde_item_format { + d16 num_of_entries; + cde_unit_header entry[0]; +} cde_item_format; + +struct cde_entry { + const struct inode *dir; + const struct inode *obj; + const struct qstr *name; +}; + +struct cde_entry_data { + int num_of_entries; + struct cde_entry *entry; +}; + +/* plugin->item.b.* */ +reiser4_key *max_key_inside_cde(const coord_t * coord, reiser4_key * result); +int can_contain_key_cde(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data *); +int mergeable_cde(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_cde(const coord_t * coord); +reiser4_key *unit_key_cde(const coord_t * coord, reiser4_key * key); +int estimate_cde(const coord_t * coord, const reiser4_item_data * data); +void print_cde(const char *prefix, coord_t * coord); +int init_cde(coord_t * coord, coord_t * from, reiser4_item_data * data); +lookup_result lookup_cde(const reiser4_key * key, lookup_bias bias, + coord_t * coord); +int paste_cde(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG); +int can_shift_cde(unsigned free_space, coord_t * coord, znode * target, + shift_direction pend, unsigned *size, unsigned want); +void copy_units_cde(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction where_is_free_space, + unsigned free_space); +int cut_units_cde(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_cde(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +void print_cde(const char *prefix, coord_t * coord); +int reiser4_check_cde(const coord_t * coord, const char **error); + +/* plugin->u.item.s.dir.* */ +int extract_key_cde(const coord_t * coord, reiser4_key * key); +int update_key_cde(const coord_t * coord, const reiser4_key * key, + lock_handle * lh); +char *extract_name_cde(const coord_t * coord, char *buf); +int add_entry_cde(struct inode *dir, coord_t * coord, + lock_handle * lh, const struct dentry *name, + reiser4_dir_entry_desc * entry); +int rem_entry_cde(struct inode *dir, const struct qstr *name, coord_t * coord, + lock_handle * lh, reiser4_dir_entry_desc * entry); +int max_name_len_cde(const struct inode *dir); + +/* __FS_REISER4_PLUGIN_COMPRESSED_DE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/ctail.c b/fs/reiser4/plugin/item/ctail.c new file mode 100644 index 000000000000..97f7f93725f5 --- /dev/null +++ b/fs/reiser4/plugin/item/ctail.c @@ -0,0 +1,1769 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* ctails (aka "clustered tails") are items for cryptcompress objects */ + +/* DESCRIPTION: + +Each cryptcompress object is stored on disk as a set of clusters sliced +into ctails. + +Internal on-disk structure: + + HEADER (1) Here stored disk cluster shift + BODY +*/ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" +#include "../../coord.h" +#include "item.h" +#include "../node/node.h" +#include "../plugin.h" +#include "../object.h" +#include "../../znode.h" +#include "../../carry.h" +#include "../../tree.h" +#include "../../inode.h" +#include "../../super.h" +#include "../../context.h" +#include "../../page_cache.h" +#include "../cluster.h" +#include "../../flush.h" +#include "../../tree_walk.h" + +#include +#include +#include + +/* return body of ctail item at @coord */ +static ctail_item_format *ctail_formatted_at(const coord_t * coord) +{ + assert("edward-60", coord != NULL); + return item_body_by_coord(coord); +} + +static int cluster_shift_by_coord(const coord_t * coord) +{ + return get_unaligned(&ctail_formatted_at(coord)->cluster_shift); +} + +static inline void dclust_set_extension_shift(hint_t * hint) +{ + assert("edward-1270", + item_id_by_coord(&hint->ext_coord.coord) == CTAIL_ID); + hint->ext_coord.extension.ctail.shift = + cluster_shift_by_coord(&hint->ext_coord.coord); +} + +static loff_t off_by_coord(const coord_t * coord) +{ + reiser4_key key; + return get_key_offset(item_key_by_coord(coord, &key)); +} + +int coord_is_unprepped_ctail(const coord_t * coord) +{ + assert("edward-1233", coord != NULL); + assert("edward-1234", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-1235", + ergo((int)cluster_shift_by_coord(coord) == (int)UCTAIL_SHIFT, + nr_units_ctail(coord) == (pos_in_node_t) UCTAIL_NR_UNITS)); + + return (int)cluster_shift_by_coord(coord) == (int)UCTAIL_SHIFT; +} + +static cloff_t clust_by_coord(const coord_t * coord, struct inode *inode) +{ + int shift; + + if (inode != NULL) { + shift = inode_cluster_shift(inode); + assert("edward-1236", + ergo(!coord_is_unprepped_ctail(coord), + shift == cluster_shift_by_coord(coord))); + } else { + assert("edward-1237", !coord_is_unprepped_ctail(coord)); + shift = cluster_shift_by_coord(coord); + } + return off_by_coord(coord) >> shift; +} + +static int disk_cluster_size(const coord_t * coord) +{ + assert("edward-1156", + item_plugin_by_coord(coord) == item_plugin_by_id(CTAIL_ID)); + /* calculation of disk cluster size + is meaninless if ctail is unprepped */ + assert("edward-1238", !coord_is_unprepped_ctail(coord)); + + return 1 << cluster_shift_by_coord(coord); +} + +/* true if the key is of first disk cluster item */ +static int is_disk_cluster_key(const reiser4_key * key, const coord_t * coord) +{ + assert("edward-1239", item_id_by_coord(coord) == CTAIL_ID); + + return coord_is_unprepped_ctail(coord) || + ((get_key_offset(key) & + ((loff_t) disk_cluster_size(coord) - 1)) == 0); +} + +static char *first_unit(coord_t * coord) +{ + /* FIXME: warning: pointer of type `void *' used in arithmetic */ + return (char *)item_body_by_coord(coord) + sizeof(ctail_item_format); +} + +/* plugin->u.item.b.max_key_inside : + tail_max_key_inside */ + +/* plugin->u.item.b.can_contain_key */ +int can_contain_key_ctail(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data * data) +{ + reiser4_key item_key; + + if (item_plugin_by_coord(coord) != data->iplug) + return 0; + + item_key_by_coord(coord, &item_key); + if (get_key_locality(key) != get_key_locality(&item_key) || + get_key_objectid(key) != get_key_objectid(&item_key)) + return 0; + if (get_key_offset(&item_key) + nr_units_ctail(coord) != + get_key_offset(key)) + return 0; + if (is_disk_cluster_key(key, coord)) + /* + * can not merge at the beginning + * of a logical cluster in a file + */ + return 0; + return 1; +} + +/* plugin->u.item.b.mergeable */ +int mergeable_ctail(const coord_t * p1, const coord_t * p2) +{ + reiser4_key key1, key2; + + assert("edward-62", item_id_by_coord(p1) == CTAIL_ID); + assert("edward-61", plugin_of_group(item_plugin_by_coord(p1), + UNIX_FILE_METADATA_ITEM_TYPE)); + + if (item_id_by_coord(p2) != CTAIL_ID) { + /* second item is of another type */ + return 0; + } + item_key_by_coord(p1, &key1); + item_key_by_coord(p2, &key2); + if (get_key_locality(&key1) != get_key_locality(&key2) || + get_key_objectid(&key1) != get_key_objectid(&key2) || + get_key_type(&key1) != get_key_type(&key2)) { + /* items of different objects */ + return 0; + } + if (get_key_offset(&key1) + nr_units_ctail(p1) != get_key_offset(&key2)) + /* not adjacent items */ + return 0; + if (is_disk_cluster_key(&key2, p2)) + /* + * can not merge at the beginning + * of a logical cluster in a file + */ + return 0; + return 1; +} + +/* plugin->u.item.b.nr_units */ +pos_in_node_t nr_units_ctail(const coord_t * coord) +{ + return (item_length_by_coord(coord) - + sizeof(ctail_formatted_at(coord)->cluster_shift)); +} + +/* plugin->u.item.b.estimate: + estimate how much space is needed to insert/paste @data->length bytes + into ctail at @coord */ +int estimate_ctail(const coord_t * coord /* coord of item */ , + const reiser4_item_data * + data /* parameters for new item */ ) +{ + if (coord == NULL) + /* insert */ + return (sizeof(ctail_item_format) + data->length); + else + /* paste */ + return data->length; +} + +/* ->init() method for this item plugin. */ +int init_ctail(coord_t * to /* coord of item */ , + coord_t * from /* old_item */ , + reiser4_item_data * data /* structure used for insertion */ ) +{ + int cluster_shift; /* cpu value to convert */ + + if (data) { + assert("edward-463", data->length > sizeof(ctail_item_format)); + cluster_shift = *((int *)(data->arg)); + data->length -= sizeof(ctail_item_format); + } else { + assert("edward-464", from != NULL); + assert("edward-855", ctail_ok(from)); + cluster_shift = (int)(cluster_shift_by_coord(from)); + } + put_unaligned((d8)cluster_shift, &ctail_formatted_at(to)->cluster_shift); + assert("edward-856", ctail_ok(to)); + return 0; +} + +/* plugin->u.item.b.lookup: + NULL: We are looking for item keys only */ + +#if REISER4_DEBUG +int ctail_ok(const coord_t * coord) +{ + return coord_is_unprepped_ctail(coord) || + cluster_shift_ok(cluster_shift_by_coord(coord)); +} + +/* plugin->u.item.b.check */ +int check_ctail(const coord_t * coord, const char **error) +{ + if (!ctail_ok(coord)) { + if (error) + *error = "bad cluster shift in ctail"; + return 1; + } + return 0; +} +#endif + +/* plugin->u.item.b.paste */ +int +paste_ctail(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG) +{ + unsigned old_nr_units; + + assert("edward-268", data->data != NULL); + /* copy only from kernel space */ + assert("edward-66", data->user == 0); + + old_nr_units = + item_length_by_coord(coord) - sizeof(ctail_item_format) - + data->length; + + /* ctail items never get pasted in the middle */ + + if (coord->unit_pos == 0 && coord->between == AT_UNIT) { + + /* paste at the beginning when create new item */ + assert("edward-450", + item_length_by_coord(coord) == + data->length + sizeof(ctail_item_format)); + assert("edward-451", old_nr_units == 0); + } else if (coord->unit_pos == old_nr_units - 1 + && coord->between == AFTER_UNIT) { + + /* paste at the end */ + coord->unit_pos++; + } else + impossible("edward-453", "bad paste position"); + + memcpy(first_unit(coord) + coord->unit_pos, data->data, data->length); + + assert("edward-857", ctail_ok(coord)); + + return 0; +} + +/* plugin->u.item.b.fast_paste */ + +/* + * plugin->u.item.b.can_shift + * + * Return number of units that can be shifted; + * Store space (in bytes) occupied by those units in @size. + */ +int can_shift_ctail(unsigned free_space, coord_t *source, + znode * target, shift_direction direction UNUSED_ARG, + unsigned *size, unsigned want) +{ + /* make sure that that we do not want to shift more than we have */ + assert("edward-68", want > 0 && want <= nr_units_ctail(source)); + + *size = min(want, free_space); + + if (!target) { + /* + * new item will be created + */ + if (*size <= sizeof(ctail_item_format)) { + /* + * can not shift only ctail header + */ + *size = 0; + return 0; + } + return *size - sizeof(ctail_item_format); + } + else + /* + * shifting to the mergeable item + */ + return *size; +} + +/* + * plugin->u.item.b.copy_units + * cooperates with ->can_shift() + */ +void copy_units_ctail(coord_t * target, coord_t * source, + unsigned from, unsigned count /* units */ , + shift_direction where_is_free_space, + unsigned free_space /* bytes */ ) +{ + /* make sure that item @target is expanded already */ + assert("edward-69", (unsigned)item_length_by_coord(target) >= count); + assert("edward-70", free_space == count || free_space == count + 1); + + assert("edward-858", ctail_ok(source)); + + if (where_is_free_space == SHIFT_LEFT) { + /* + * append item @target with @count first bytes + * of @source: this restriction came from ordinary tails + */ + assert("edward-71", from == 0); + assert("edward-860", ctail_ok(target)); + + memcpy(first_unit(target) + nr_units_ctail(target) - count, + first_unit(source), count); + } else { + /* + * target item is moved to right already + */ + reiser4_key key; + + assert("edward-72", nr_units_ctail(source) == from + count); + + if (free_space == count) { + init_ctail(target, source, NULL); + } else { + /* + * shifting to a mergeable item + */ + assert("edward-862", ctail_ok(target)); + } + memcpy(first_unit(target), first_unit(source) + from, count); + + assert("edward-863", ctail_ok(target)); + /* + * new units are inserted before first unit + * in an item, therefore, we have to update + * item key + */ + item_key_by_coord(source, &key); + set_key_offset(&key, get_key_offset(&key) + from); + + node_plugin_by_node(target->node)->update_item_key(target, + &key, + NULL /*info */); + } +} + +/* plugin->u.item.b.create_hook */ +int create_hook_ctail(const coord_t * coord, void *arg) +{ + assert("edward-864", znode_is_loaded(coord->node)); + + znode_set_convertible(coord->node); + return 0; +} + +/* plugin->u.item.b.kill_hook */ +int kill_hook_ctail(const coord_t * coord, pos_in_node_t from, + pos_in_node_t count, carry_kill_data * kdata) +{ + struct inode *inode; + + assert("edward-1157", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-291", znode_is_write_locked(coord->node)); + + inode = kdata->inode; + if (inode) { + reiser4_key key; + struct cryptcompress_info * info; + cloff_t index; + + item_key_by_coord(coord, &key); + info = cryptcompress_inode_data(inode); + index = off_to_clust(get_key_offset(&key), inode); + + if (from == 0) { + info->trunc_index = index; + if (is_disk_cluster_key(&key, coord)) { + /* + * first item of disk cluster is to be killed + */ + truncate_complete_page_cluster( + inode, index, kdata->params.truncate); + inode_sub_bytes(inode, + inode_cluster_size(inode)); + } + } + } + return 0; +} + +/* for shift_hook_ctail(), + return true if the first disk cluster item has dirty child +*/ +static int ctail_convertible(const coord_t * coord) +{ + int result; + reiser4_key key; + jnode *child = NULL; + + assert("edward-477", coord != NULL); + assert("edward-478", item_id_by_coord(coord) == CTAIL_ID); + + if (coord_is_unprepped_ctail(coord)) + /* unprepped ctail should be converted */ + return 1; + + item_key_by_coord(coord, &key); + child = jlookup(current_tree, + get_key_objectid(&key), + off_to_pg(off_by_coord(coord))); + if (!child) + return 0; + result = JF_ISSET(child, JNODE_DIRTY); + jput(child); + return result; +} + +/* FIXME-EDWARD */ +/* plugin->u.item.b.shift_hook */ +int shift_hook_ctail(const coord_t * item /* coord of item */ , + unsigned from UNUSED_ARG /* start unit */ , + unsigned count UNUSED_ARG /* stop unit */ , + znode * old_node /* old parent */ ) +{ + assert("edward-479", item != NULL); + assert("edward-480", item->node != old_node); + + if (!znode_convertible(old_node) || znode_convertible(item->node)) + return 0; + if (ctail_convertible(item)) + znode_set_convertible(item->node); + return 0; +} + +static int +cut_or_kill_ctail_units(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + int cut, void *p, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + pos_in_node_t count; /* number of units to cut */ + char *item; + + count = to - from + 1; + item = item_body_by_coord(coord); + + assert("edward-74", ergo(from != 0, to == coord_last_unit_pos(coord))); + + if (smallest_removed) { + /* store smallest key removed */ + item_key_by_coord(coord, smallest_removed); + set_key_offset(smallest_removed, + get_key_offset(smallest_removed) + from); + } + + if (new_first) { + assert("vs-1531", from == 0); + + item_key_by_coord(coord, new_first); + set_key_offset(new_first, + get_key_offset(new_first) + from + count); + } + + if (!cut) + kill_hook_ctail(coord, from, 0, (struct carry_kill_data *)p); + + if (from == 0) { + if (count != nr_units_ctail(coord)) { + /* part of item is removed, so move free space at the beginning + of the item and update item key */ + reiser4_key key; + memcpy(item + to + 1, item, sizeof(ctail_item_format)); + item_key_by_coord(coord, &key); + set_key_offset(&key, get_key_offset(&key) + count); + node_plugin_by_node(coord->node)->update_item_key(coord, + &key, + NULL); + } else { + /* cut_units should not be called to cut evrything */ + assert("vs-1532", ergo(cut, 0)); + /* whole item is cut, so more then amount of space occupied + by units got freed */ + count += sizeof(ctail_item_format); + } + } + return count; +} + +/* plugin->u.item.b.cut_units */ +int +cut_units_ctail(coord_t * item, pos_in_node_t from, pos_in_node_t to, + carry_cut_data * cdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + return cut_or_kill_ctail_units(item, from, to, 1, NULL, + smallest_removed, new_first); +} + +/* plugin->u.item.b.kill_units */ +int +kill_units_ctail(coord_t * item, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *kdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + return cut_or_kill_ctail_units(item, from, to, 0, kdata, + smallest_removed, new_first); +} + +/* plugin->u.item.s.file.read */ +int read_ctail(struct file *file UNUSED_ARG, flow_t * f, hint_t * hint) +{ + uf_coord_t *uf_coord; + coord_t *coord; + + uf_coord = &hint->ext_coord; + coord = &uf_coord->coord; + assert("edward-127", f->user == 0); + assert("edward-129", coord && coord->node); + assert("edward-130", coord_is_existing_unit(coord)); + assert("edward-132", znode_is_loaded(coord->node)); + + /* start read only from the beginning of ctail */ + assert("edward-133", coord->unit_pos == 0); + /* read only whole ctails */ + assert("edward-135", nr_units_ctail(coord) <= f->length); + + assert("edward-136", reiser4_schedulable()); + assert("edward-886", ctail_ok(coord)); + + if (f->data) + memcpy(f->data, (char *)first_unit(coord), + (size_t) nr_units_ctail(coord)); + + dclust_set_extension_shift(hint); + mark_page_accessed(znode_page(coord->node)); + move_flow_forward(f, nr_units_ctail(coord)); + + return 0; +} + +/** + * Prepare transform stream with plain text for page + * @page taking into account synchronization issues. + */ +static int ctail_read_disk_cluster(struct cluster_handle * clust, + struct inode * inode, struct page * page, + znode_lock_mode mode) +{ + int result; + + assert("edward-1450", mode == ZNODE_READ_LOCK || ZNODE_WRITE_LOCK); + assert("edward-671", clust->hint != NULL); + assert("edward-140", clust->dstat == INVAL_DISK_CLUSTER); + assert("edward-672", cryptcompress_inode_ok(inode)); + assert("edward-1527", PageLocked(page)); + + unlock_page(page); + + /* set input stream */ + result = grab_tfm_stream(inode, &clust->tc, INPUT_STREAM); + if (result) { + lock_page(page); + return result; + } + result = find_disk_cluster(clust, inode, 1 /* read items */, mode); + lock_page(page); + if (result) + return result; + /* + * at this point we have locked position in the tree + */ + assert("edward-1528", znode_is_any_locked(clust->hint->lh.node)); + + if (page->mapping != inode->i_mapping) { + /* page was truncated */ + reiser4_unset_hint(clust->hint); + reset_cluster_params(clust); + return AOP_TRUNCATED_PAGE; + } + if (PageUptodate(page)) { + /* disk cluster can be obsolete, don't use it! */ + reiser4_unset_hint(clust->hint); + reset_cluster_params(clust); + return 0; + } + if (clust->dstat == FAKE_DISK_CLUSTER || + clust->dstat == UNPR_DISK_CLUSTER || + clust->dstat == TRNC_DISK_CLUSTER) { + /* + * this information about disk cluster will be valid + * as long as we keep the position in the tree locked + */ + tfm_cluster_set_uptodate(&clust->tc); + return 0; + } + /* now prepare output stream.. */ + result = grab_coa(&clust->tc, inode_compression_plugin(inode)); + if (result) + return result; + /* ..and fill this with plain text */ + result = reiser4_inflate_cluster(clust, inode); + if (result) + return result; + /* + * The stream is ready! It won't be obsolete as + * long as we keep last disk cluster item locked. + */ + tfm_cluster_set_uptodate(&clust->tc); + return 0; +} + +/* + * fill one page with plain text. + */ +int do_readpage_ctail(struct inode * inode, struct cluster_handle * clust, + struct page *page, znode_lock_mode mode) +{ + int ret; + unsigned cloff; + char *data; + size_t to_page; + struct tfm_cluster * tc = &clust->tc; + + assert("edward-212", PageLocked(page)); + + if (unlikely(page->mapping != inode->i_mapping)) + return AOP_TRUNCATED_PAGE; + if (PageUptodate(page)) + goto exit; + to_page = pbytes(page_index(page), inode); + if (to_page == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + goto exit; + } + if (!tfm_cluster_is_uptodate(&clust->tc)) { + clust->index = pg_to_clust(page->index, inode); + + /* this will unlock/lock the page */ + ret = ctail_read_disk_cluster(clust, inode, page, mode); + + assert("edward-212", PageLocked(page)); + if (ret) + return ret; + + /* refresh bytes */ + to_page = pbytes(page_index(page), inode); + if (to_page == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + goto exit; + } + } + if (PageUptodate(page)) + /* somebody else fill it already */ + goto exit; + + assert("edward-119", tfm_cluster_is_uptodate(tc)); + assert("edward-1529", znode_is_any_locked(clust->hint->lh.node)); + + switch (clust->dstat) { + case UNPR_DISK_CLUSTER: + /* + * Page is not uptodate and item cluster is unprepped: + * this must not ever happen. + */ + warning("edward-1632", + "Bad item cluster %lu (Inode %llu). Fsck?", + clust->index, + (unsigned long long)get_inode_oid(inode)); + return RETERR(-EIO); + case TRNC_DISK_CLUSTER: + /* + * Race with truncate! + * We resolve it in favour of the last one (the only way, + * as in this case plain text is unrecoverable) + */ + case FAKE_DISK_CLUSTER: + /* fill the page by zeroes */ + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + break; + case PREP_DISK_CLUSTER: + /* fill page by transformed stream with plain text */ + assert("edward-1058", !PageUptodate(page)); + assert("edward-120", tc->len <= inode_cluster_size(inode)); + + /* page index in this logical cluster */ + cloff = pg_to_off_to_cloff(page->index, inode); + + data = kmap(page); + memcpy(data, tfm_stream_data(tc, OUTPUT_STREAM) + cloff, to_page); + memset(data + to_page, 0, (size_t) PAGE_SIZE - to_page); + flush_dcache_page(page); + kunmap(page); + SetPageUptodate(page); + break; + default: + impossible("edward-1169", "bad disk cluster state"); + } + exit: + return 0; +} + +/* plugin->u.item.s.file.readpage */ +int readpage_ctail(void *vp, struct page *page) +{ + int result; + hint_t * hint; + struct cluster_handle * clust = vp; + + assert("edward-114", clust != NULL); + assert("edward-115", PageLocked(page)); + assert("edward-116", !PageUptodate(page)); + assert("edward-118", page->mapping && page->mapping->host); + assert("edward-867", !tfm_cluster_is_uptodate(&clust->tc)); + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) { + unlock_page(page); + return RETERR(-ENOMEM); + } + clust->hint = hint; + result = load_file_hint(clust->file, hint); + if (result) { + kfree(hint); + unlock_page(page); + return result; + } + assert("vs-25", hint->ext_coord.lh == &hint->lh); + + result = do_readpage_ctail(page->mapping->host, clust, page, + ZNODE_READ_LOCK); + assert("edward-213", PageLocked(page)); + assert("edward-1163", ergo(!result, PageUptodate(page))); + + unlock_page(page); + done_lh(&hint->lh); + hint->ext_coord.valid = 0; + save_file_hint(clust->file, hint); + kfree(hint); + tfm_cluster_clr_uptodate(&clust->tc); + + return result; +} + +/* Helper function for ->readpages() */ +static int ctail_read_page_cluster(struct cluster_handle * clust, + struct inode *inode) +{ + int i; + int result; + assert("edward-779", clust != NULL); + assert("edward-1059", clust->win == NULL); + assert("edward-780", inode != NULL); + + result = prepare_page_cluster(inode, clust, READ_OP); + if (result) + return result; + + assert("edward-781", !tfm_cluster_is_uptodate(&clust->tc)); + + for (i = 0; i < clust->nr_pages; i++) { + struct page *page = clust->pages[i]; + lock_page(page); + result = do_readpage_ctail(inode, clust, page, ZNODE_READ_LOCK); + unlock_page(page); + if (result) + break; + } + tfm_cluster_clr_uptodate(&clust->tc); + put_page_cluster(clust, inode, READ_OP); + return result; +} + +/* filler for read_cache_pages() */ +static int ctail_readpages_filler(void * data, struct page * page) +{ + int ret = 0; + struct cluster_handle * clust = data; + struct inode * inode = file_inode(clust->file); + + assert("edward-1525", page->mapping == inode->i_mapping); + + if (PageUptodate(page)) { + unlock_page(page); + return 0; + } + if (pbytes(page_index(page), inode) == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + move_cluster_forward(clust, inode, page->index); + unlock_page(page); + /* + * read the whole page cluster + */ + ret = ctail_read_page_cluster(clust, inode); + + assert("edward-869", !tfm_cluster_is_uptodate(&clust->tc)); + return ret; +} + +/* + * We populate a bit more then upper readahead suggests: + * with each nominated page we read the whole page cluster + * this page belongs to. + */ +int readpages_ctail(struct file *file, struct address_space *mapping, + struct list_head *pages) +{ + int ret = 0; + hint_t *hint; + struct cluster_handle clust; + struct inode *inode = mapping->host; + + assert("edward-1521", inode == file_inode(file)); + + cluster_init_read(&clust, NULL); + clust.file = file; + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) { + warning("vs-28", "failed to allocate hint"); + ret = RETERR(-ENOMEM); + goto exit1; + } + clust.hint = hint; + ret = load_file_hint(clust.file, hint); + if (ret) { + warning("edward-1522", "failed to load hint"); + goto exit2; + } + assert("vs-26", hint->ext_coord.lh == &hint->lh); + ret = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (ret) { + warning("edward-1523", "failed to alloc pgset"); + goto exit3; + } + ret = read_cache_pages(mapping, pages, ctail_readpages_filler, &clust); + + assert("edward-870", !tfm_cluster_is_uptodate(&clust.tc)); + exit3: + done_lh(&hint->lh); + save_file_hint(file, hint); + hint->ext_coord.valid = 0; + exit2: + kfree(hint); + exit1: + put_cluster_handle(&clust); + return ret; +} + +/* + plugin->u.item.s.file.append_key + key of the first item of the next disk cluster +*/ +reiser4_key *append_key_ctail(const coord_t * coord, reiser4_key * key) +{ + assert("edward-1241", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-1242", cluster_shift_ok(cluster_shift_by_coord(coord))); + + item_key_by_coord(coord, key); + set_key_offset(key, ((__u64) (clust_by_coord(coord, NULL)) + 1) + << cluster_shift_by_coord(coord)); + return key; +} + +static int insert_unprepped_ctail(struct cluster_handle * clust, + struct inode *inode) +{ + int result; + char buf[UCTAIL_NR_UNITS]; + reiser4_item_data data; + reiser4_key key; + int shift = (int)UCTAIL_SHIFT; + + memset(buf, 0, (size_t) UCTAIL_NR_UNITS); + result = key_by_inode_cryptcompress(inode, + clust_to_off(clust->index, inode), + &key); + if (result) + return result; + data.user = 0; + data.iplug = item_plugin_by_id(CTAIL_ID); + data.arg = &shift; + data.length = sizeof(ctail_item_format) + (size_t) UCTAIL_NR_UNITS; + data.data = buf; + + result = insert_by_coord(&clust->hint->ext_coord.coord, + &data, &key, clust->hint->ext_coord.lh, 0); + return result; +} + +static int +insert_cryptcompress_flow(coord_t * coord, lock_handle * lh, flow_t * f, + int cluster_shift) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + reiser4_item_data *data; + carry_op *op; + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*data)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + data = (reiser4_item_data *) (lowest_level + 3); + + assert("edward-466", coord->between == AFTER_ITEM + || coord->between == AFTER_UNIT || coord->between == BEFORE_ITEM + || coord->between == EMPTY_NODE + || coord->between == BEFORE_UNIT); + + if (coord->between == AFTER_UNIT) { + coord->unit_pos = 0; + coord->between = AFTER_ITEM; + } + op = reiser4_post_carry(lowest_level, COP_INSERT_FLOW, coord->node, + 0 /* operate directly on coord -> node */); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + data->user = 0; + data->iplug = item_plugin_by_id(CTAIL_ID); + data->arg = &cluster_shift; + + data->length = 0; + data->data = NULL; + + op->u.insert_flow.flags = + COPI_SWEEP | + COPI_DONT_SHIFT_LEFT | + COPI_DONT_SHIFT_RIGHT; + op->u.insert_flow.insert_point = coord; + op->u.insert_flow.flow = f; + op->u.insert_flow.data = data; + op->u.insert_flow.new_nodes = 0; + + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* Implementation of CRC_APPEND_ITEM mode of ctail conversion */ +static int insert_cryptcompress_flow_in_place(coord_t * coord, + lock_handle * lh, flow_t * f, + int cluster_shift) +{ + int ret; + coord_t pos; + lock_handle lock; + + assert("edward-484", + coord->between == AT_UNIT || coord->between == AFTER_ITEM); + assert("edward-485", item_id_by_coord(coord) == CTAIL_ID); + + coord_dup(&pos, coord); + pos.unit_pos = 0; + pos.between = AFTER_ITEM; + + init_lh(&lock); + copy_lh(&lock, lh); + + ret = insert_cryptcompress_flow(&pos, &lock, f, cluster_shift); + done_lh(&lock); + assert("edward-1347", znode_is_write_locked(lh->node)); + assert("edward-1228", !ret); + return ret; +} + +/* Implementation of CRC_OVERWRITE_ITEM mode of ctail conversion */ +static int overwrite_ctail(coord_t * coord, flow_t * f) +{ + unsigned count; + + assert("edward-269", f->user == 0); + assert("edward-270", f->data != NULL); + assert("edward-271", f->length > 0); + assert("edward-272", coord_is_existing_unit(coord)); + assert("edward-273", coord->unit_pos == 0); + assert("edward-274", znode_is_write_locked(coord->node)); + assert("edward-275", reiser4_schedulable()); + assert("edward-467", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-1243", ctail_ok(coord)); + + count = nr_units_ctail(coord); + + if (count > f->length) + count = f->length; + memcpy(first_unit(coord), f->data, count); + move_flow_forward(f, count); + coord->unit_pos += count; + return 0; +} + +/* Implementation of CRC_CUT_ITEM mode of ctail conversion: + cut ctail (part or whole) starting from next unit position */ +static int cut_ctail(coord_t * coord) +{ + coord_t stop; + + assert("edward-435", coord->between == AT_UNIT && + coord->item_pos < coord_num_items(coord) && + coord->unit_pos <= coord_num_units(coord)); + + if (coord->unit_pos == coord_num_units(coord)) + /* nothing to cut */ + return 0; + coord_dup(&stop, coord); + stop.unit_pos = coord_last_unit_pos(coord); + + return cut_node_content(coord, &stop, NULL, NULL, NULL); +} + +int ctail_insert_unprepped_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + int result; + assert("edward-1244", inode != NULL); + assert("edward-1245", clust->hint != NULL); + assert("edward-1246", clust->dstat == FAKE_DISK_CLUSTER); + assert("edward-1247", clust->reserved == 1); + + result = get_disk_cluster_locked(clust, inode, ZNODE_WRITE_LOCK); + if (cbk_errored(result)) + return result; + assert("edward-1249", result == CBK_COORD_NOTFOUND); + assert("edward-1250", znode_is_write_locked(clust->hint->lh.node)); + + assert("edward-1295", + clust->hint->ext_coord.lh->node == + clust->hint->ext_coord.coord.node); + + coord_set_between_clusters(&clust->hint->ext_coord.coord); + + result = insert_unprepped_ctail(clust, inode); + all_grabbed2free(); + + assert("edward-1251", !result); + assert("edward-1252", cryptcompress_inode_ok(inode)); + assert("edward-1253", znode_is_write_locked(clust->hint->lh.node)); + assert("edward-1254", + reiser4_clustered_blocks(reiser4_get_current_sb())); + assert("edward-1255", + znode_convertible(clust->hint->ext_coord.coord.node)); + + return result; +} + +/* plugin->u.item.f.scan */ +int scan_ctail(flush_scan * scan) +{ + int result = 0; + struct page *page; + struct inode *inode; + jnode *node = scan->node; + + assert("edward-227", scan->node != NULL); + assert("edward-228", jnode_is_cluster_page(scan->node)); + assert("edward-639", znode_is_write_locked(scan->parent_lock.node)); + + page = jnode_page(node); + inode = page->mapping->host; + + if (!reiser4_scanning_left(scan)) + return result; + if (!ZF_ISSET(scan->parent_lock.node, JNODE_DIRTY)) + znode_make_dirty(scan->parent_lock.node); + + if (!znode_convertible(scan->parent_lock.node)) { + if (JF_ISSET(scan->node, JNODE_DIRTY)) + znode_set_convertible(scan->parent_lock.node); + else { + warning("edward-681", + "cluster page is already processed"); + return -EAGAIN; + } + } + return result; +} + +/* If true, this function attaches children */ +static int should_attach_convert_idata(flush_pos_t * pos) +{ + int result; + assert("edward-431", pos != NULL); + assert("edward-432", pos->child == NULL); + assert("edward-619", znode_is_write_locked(pos->coord.node)); + assert("edward-470", + item_plugin_by_coord(&pos->coord) == + item_plugin_by_id(CTAIL_ID)); + + /* check for leftmost child */ + utmost_child_ctail(&pos->coord, LEFT_SIDE, &pos->child); + + if (!pos->child) + return 0; + spin_lock_jnode(pos->child); + result = (JF_ISSET(pos->child, JNODE_DIRTY) && + pos->child->atom == ZJNODE(pos->coord.node)->atom); + spin_unlock_jnode(pos->child); + if (!result && pos->child) { + /* existing child isn't to attach, clear up this one */ + jput(pos->child); + pos->child = NULL; + } + return result; +} + +/** + * Collect all needed information about the object here, + * as in-memory inode can be evicted from memory before + * disk update completion. + */ +static int init_convert_data_ctail(struct convert_item_info * idata, + struct inode *inode) +{ + assert("edward-813", idata != NULL); + assert("edward-814", inode != NULL); + + idata->cluster_shift = inode_cluster_shift(inode); + idata->d_cur = DC_FIRST_ITEM; + idata->d_next = DC_INVALID_STATE; + + return 0; +} + +static int alloc_item_convert_data(struct convert_info * sq) +{ + assert("edward-816", sq != NULL); + assert("edward-817", sq->itm == NULL); + + sq->itm = kmalloc(sizeof(*sq->itm), reiser4_ctx_gfp_mask_get()); + if (sq->itm == NULL) + return RETERR(-ENOMEM); + init_lh(&sq->right_lock); + sq->right_locked = 0; + return 0; +} + +static void free_item_convert_data(struct convert_info * sq) +{ + assert("edward-818", sq != NULL); + assert("edward-819", sq->itm != NULL); + assert("edward-820", sq->iplug != NULL); + + done_lh(&sq->right_lock); + sq->right_locked = 0; + kfree(sq->itm); + sq->itm = NULL; + return; +} + +static struct convert_info *alloc_convert_data(void) +{ + struct convert_info *info; + + info = kmalloc(sizeof(*info), reiser4_ctx_gfp_mask_get()); + if (info != NULL) { + memset(info, 0, sizeof(*info)); + cluster_init_write(&info->clust, NULL); + } + return info; +} + +static void reset_convert_data(struct convert_info *info) +{ + info->clust.tc.hole = 0; +} + +void free_convert_data(flush_pos_t * pos) +{ + struct convert_info *sq; + + assert("edward-823", pos != NULL); + assert("edward-824", pos->sq != NULL); + + sq = pos->sq; + if (sq->itm) + free_item_convert_data(sq); + put_cluster_handle(&sq->clust); + kfree(pos->sq); + pos->sq = NULL; + return; +} + +static int init_item_convert_data(flush_pos_t * pos, struct inode *inode) +{ + struct convert_info *sq; + + assert("edward-825", pos != NULL); + assert("edward-826", pos->sq != NULL); + assert("edward-827", item_convert_data(pos) != NULL); + assert("edward-828", inode != NULL); + + sq = pos->sq; + memset(sq->itm, 0, sizeof(*sq->itm)); + + /* iplug->init_convert_data() */ + return init_convert_data_ctail(sq->itm, inode); +} + +/* create and attach disk cluster info used by 'convert' phase of the flush + squalloc() */ +static int attach_convert_idata(flush_pos_t * pos, struct inode *inode) +{ + int ret = 0; + struct convert_item_info *info; + struct cluster_handle *clust; + file_plugin *fplug = inode_file_plugin(inode); + + assert("edward-248", pos != NULL); + assert("edward-249", pos->child != NULL); + assert("edward-251", inode != NULL); + assert("edward-682", cryptcompress_inode_ok(inode)); + assert("edward-252", + fplug == file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + assert("edward-473", + item_plugin_by_coord(&pos->coord) == + item_plugin_by_id(CTAIL_ID)); + + if (!pos->sq) { + pos->sq = alloc_convert_data(); + if (!pos->sq) + return RETERR(-ENOMEM); + } + else + reset_convert_data(pos->sq); + + clust = &pos->sq->clust; + + ret = set_cluster_by_page(clust, + jnode_page(pos->child), + MAX_CLUSTER_NRPAGES); + if (ret) + goto err; + + assert("edward-829", pos->sq != NULL); + assert("edward-250", item_convert_data(pos) == NULL); + + pos->sq->iplug = item_plugin_by_id(CTAIL_ID); + + ret = alloc_item_convert_data(pos->sq); + if (ret) + goto err; + ret = init_item_convert_data(pos, inode); + if (ret) + goto err; + info = item_convert_data(pos); + + ret = checkout_logical_cluster(clust, pos->child, inode); + if (ret) + goto err; + + reiser4_deflate_cluster(clust, inode); + inc_item_convert_count(pos); + + /* prepare flow for insertion */ + fplug->flow_by_inode(inode, + (const char __user *)tfm_stream_data(&clust->tc, + OUTPUT_STREAM), + 0 /* kernel space */ , + clust->tc.len, + clust_to_off(clust->index, inode), + WRITE_OP, &info->flow); + if (clust->tc.hole) + info->flow.length = 0; + + jput(pos->child); + return 0; + err: + jput(pos->child); + free_convert_data(pos); + return ret; +} + +/* clear up disk cluster info */ +static void detach_convert_idata(struct convert_info * sq) +{ + struct convert_item_info *info; + + assert("edward-253", sq != NULL); + assert("edward-840", sq->itm != NULL); + + info = sq->itm; + assert("edward-1212", info->flow.length == 0); + + free_item_convert_data(sq); + return; +} + +/* plugin->u.item.f.utmost_child */ + +/* This function sets leftmost child for a first cluster item, + if the child exists, and NULL in other cases. + NOTE-EDWARD: Do not call this for RIGHT_SIDE */ + +int utmost_child_ctail(const coord_t * coord, sideof side, jnode ** child) +{ + reiser4_key key; + + item_key_by_coord(coord, &key); + + assert("edward-257", coord != NULL); + assert("edward-258", child != NULL); + assert("edward-259", side == LEFT_SIDE); + assert("edward-260", + item_plugin_by_coord(coord) == item_plugin_by_id(CTAIL_ID)); + + if (!is_disk_cluster_key(&key, coord)) + *child = NULL; + else + *child = jlookup(current_tree, + get_key_objectid(item_key_by_coord + (coord, &key)), + off_to_pg(get_key_offset(&key))); + return 0; +} + +/* + * Set status (d_next) of the first item at the right neighbor + * + * If the current position is the last item in the node, then + * look at its first item at the right neighbor (skip empty nodes). + * Note, that right neighbors may be not dirty because of races. + * If so, make it dirty and set convertible flag. + */ +static int pre_convert_ctail(flush_pos_t * pos) +{ + int ret = 0; + int stop = 0; + znode *slider; + lock_handle slider_lh; + lock_handle right_lh; + + assert("edward-1232", !node_is_empty(pos->coord.node)); + assert("edward-1014", + pos->coord.item_pos < coord_num_items(&pos->coord)); + assert("edward-1015", convert_data_attached(pos)); + assert("edward-1611", + item_convert_data(pos)->d_cur != DC_INVALID_STATE); + assert("edward-1017", + item_convert_data(pos)->d_next == DC_INVALID_STATE); + + /* + * In the following two cases we don't need + * to look at right neighbor + */ + if (item_convert_data(pos)->d_cur == DC_AFTER_CLUSTER) { + /* + * cluster is over, so the first item of the right + * neighbor doesn't belong to this cluster + */ + return 0; + } + if (pos->coord.item_pos < coord_num_items(&pos->coord) - 1) { + /* + * current position is not the last item in the node, + * so the first item of the right neighbor doesn't + * belong to this cluster + */ + return 0; + } + /* + * Look at right neighbor. + * Note that concurrent truncate is not a problem + * since we have locked the beginning of the cluster. + */ + slider = pos->coord.node; + init_lh(&slider_lh); + init_lh(&right_lh); + + while (!stop) { + coord_t coord; + + ret = reiser4_get_right_neighbor(&right_lh, + slider, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (ret) + break; + slider = right_lh.node; + ret = zload(slider); + if (ret) + break; + coord_init_before_first_item(&coord, slider); + + if (node_is_empty(slider)) { + warning("edward-1641", "Found empty right neighbor"); + znode_make_dirty(slider); + znode_set_convertible(slider); + /* + * skip this node, + * go rightward + */ + stop = 0; + } else if (same_disk_cluster(&pos->coord, &coord)) { + + item_convert_data(pos)->d_next = DC_CHAINED_ITEM; + + if (!ZF_ISSET(slider, JNODE_DIRTY)) { + /* + warning("edward-1024", + "next slum item mergeable, " + "but znode %p isn't dirty\n", + lh.node); + */ + znode_make_dirty(slider); + } + if (!znode_convertible(slider)) { + /* + warning("edward-1272", + "next slum item mergeable, " + "but znode %p isn't convertible\n", + lh.node); + */ + znode_set_convertible(slider); + } + stop = 1; + convert_data(pos)->right_locked = 1; + } else { + item_convert_data(pos)->d_next = DC_AFTER_CLUSTER; + stop = 1; + convert_data(pos)->right_locked = 1; + } + zrelse(slider); + done_lh(&slider_lh); + move_lh(&slider_lh, &right_lh); + } + if (convert_data(pos)->right_locked) + /* + * Store locked right neighbor in + * the conversion info. Otherwise, + * we won't be able to access it, + * if the current node gets deleted + * during conversion + */ + move_lh(&convert_data(pos)->right_lock, &slider_lh); + done_lh(&slider_lh); + done_lh(&right_lh); + + if (ret == -E_NO_NEIGHBOR) { + item_convert_data(pos)->d_next = DC_AFTER_CLUSTER; + ret = 0; + } + assert("edward-1610", + ergo(ret != 0, + item_convert_data(pos)->d_next == DC_INVALID_STATE)); + return ret; +} + +/* + * do some post-conversion actions; + * detach conversion data if there is nothing to convert anymore + */ +static void post_convert_ctail(flush_pos_t * pos, + ctail_convert_mode_t mode, int old_nr_items) +{ + switch (mode) { + case CTAIL_CUT_ITEM: + assert("edward-1214", item_convert_data(pos)->flow.length == 0); + assert("edward-1215", + coord_num_items(&pos->coord) == old_nr_items || + coord_num_items(&pos->coord) == old_nr_items - 1); + + if (item_convert_data(pos)->d_next == DC_CHAINED_ITEM) + /* + * the next item belongs to this cluster, + * and should be also killed + */ + break; + if (coord_num_items(&pos->coord) != old_nr_items) { + /* + * the latest item in the + * cluster has been killed, + */ + detach_convert_idata(pos->sq); + if (!node_is_empty(pos->coord.node)) + /* + * make sure the next item will be scanned + */ + coord_init_before_item(&pos->coord); + break; + } + case CTAIL_APPEND_ITEM: + /* + * in the append mode the whole flow has been inserted + * (see COP_INSERT_FLOW primitive) + */ + assert("edward-434", item_convert_data(pos)->flow.length == 0); + detach_convert_idata(pos->sq); + break; + case CTAIL_OVERWRITE_ITEM: + if (coord_is_unprepped_ctail(&pos->coord)) { + /* + * the first (unprepped) ctail has been overwritten; + * convert it to the prepped one + */ + assert("edward-1259", + cluster_shift_ok(item_convert_data(pos)-> + cluster_shift)); + put_unaligned((d8)item_convert_data(pos)->cluster_shift, + &ctail_formatted_at(&pos->coord)-> + cluster_shift); + } + break; + default: + impossible("edward-1609", "Bad ctail conversion mode"); + } +} + +static int assign_conversion_mode(flush_pos_t * pos, ctail_convert_mode_t *mode) +{ + int ret = 0; + + *mode = CTAIL_INVAL_CONVERT_MODE; + + if (!convert_data_attached(pos)) { + if (should_attach_convert_idata(pos)) { + struct inode *inode; + gfp_t old_mask = get_current_context()->gfp_mask; + + assert("edward-264", pos->child != NULL); + assert("edward-265", jnode_page(pos->child) != NULL); + assert("edward-266", + jnode_page(pos->child)->mapping != NULL); + + inode = jnode_page(pos->child)->mapping->host; + + assert("edward-267", inode != NULL); + /* + * attach new convert item info + */ + get_current_context()->gfp_mask |= __GFP_NOFAIL; + ret = attach_convert_idata(pos, inode); + get_current_context()->gfp_mask = old_mask; + pos->child = NULL; + if (ret == -E_REPEAT) { + /* + * jnode became clean, or there is no dirty + * pages (nothing to update in disk cluster) + */ + warning("edward-1021", + "convert_ctail: nothing to attach"); + ret = 0; + goto dont_convert; + } + if (ret) + goto dont_convert; + + if (pos->sq->clust.tc.hole) { + assert("edward-1634", + item_convert_data(pos)->flow.length == 0); + /* + * new content is filled with zeros - + * we punch a hole using cut (not kill) + * primitive, so attached pages won't + * be truncated + */ + *mode = CTAIL_CUT_ITEM; + } + else + /* + * this is the first ctail in the cluster, + * so it (may be only its head) should be + * overwritten + */ + *mode = CTAIL_OVERWRITE_ITEM; + } else + /* + * non-convertible item + */ + goto dont_convert; + } else { + /* + * use old convert info + */ + struct convert_item_info *idata; + idata = item_convert_data(pos); + + switch (idata->d_cur) { + case DC_FIRST_ITEM: + case DC_CHAINED_ITEM: + if (idata->flow.length) + *mode = CTAIL_OVERWRITE_ITEM; + else + *mode = CTAIL_CUT_ITEM; + break; + case DC_AFTER_CLUSTER: + if (idata->flow.length) + *mode = CTAIL_APPEND_ITEM; + else { + /* + * nothing to update anymore + */ + detach_convert_idata(pos->sq); + goto dont_convert; + } + break; + default: + impossible("edward-1018", + "wrong current item state"); + ret = RETERR(-EIO); + goto dont_convert; + } + } + /* + * ok, ctail will be converted + */ + assert("edward-433", convert_data_attached(pos)); + assert("edward-1022", + pos->coord.item_pos < coord_num_items(&pos->coord)); + return 0; + dont_convert: + return ret; +} + +/* + * perform an operation on the ctail item in + * accordance with assigned conversion @mode + */ +static int do_convert_ctail(flush_pos_t * pos, ctail_convert_mode_t mode) +{ + int result = 0; + struct convert_item_info * info; + + assert("edward-468", pos != NULL); + assert("edward-469", pos->sq != NULL); + assert("edward-845", item_convert_data(pos) != NULL); + + info = item_convert_data(pos); + assert("edward-679", info->flow.data != NULL); + + switch (mode) { + case CTAIL_APPEND_ITEM: + assert("edward-1229", info->flow.length != 0); + assert("edward-1256", + cluster_shift_ok(cluster_shift_by_coord(&pos->coord))); + /* + * insert flow without balancing + * (see comments to convert_node()) + */ + result = insert_cryptcompress_flow_in_place(&pos->coord, + &pos->lock, + &info->flow, + info->cluster_shift); + break; + case CTAIL_OVERWRITE_ITEM: + assert("edward-1230", info->flow.length != 0); + overwrite_ctail(&pos->coord, &info->flow); + if (info->flow.length != 0) + break; + else + /* + * fall through: + * cut the rest of item (if any) + */ + ; + case CTAIL_CUT_ITEM: + assert("edward-1231", info->flow.length == 0); + result = cut_ctail(&pos->coord); + break; + default: + result = RETERR(-EIO); + impossible("edward-244", "bad ctail conversion mode"); + } + return result; +} + +/* + * plugin->u.item.f.convert + * + * Convert ctail items at flush time + */ +int convert_ctail(flush_pos_t * pos) +{ + int ret; + int old_nr_items; + ctail_convert_mode_t mode; + + assert("edward-1020", pos != NULL); + assert("edward-1213", coord_num_items(&pos->coord) != 0); + assert("edward-1257", item_id_by_coord(&pos->coord) == CTAIL_ID); + assert("edward-1258", ctail_ok(&pos->coord)); + assert("edward-261", pos->coord.node != NULL); + + old_nr_items = coord_num_items(&pos->coord); + /* + * detach old conversion data and + * attach a new one, if needed + */ + ret = assign_conversion_mode(pos, &mode); + if (ret || mode == CTAIL_INVAL_CONVERT_MODE) { + assert("edward-1633", !convert_data_attached(pos)); + return ret; + } + /* + * find out the status of the right neighbor + */ + ret = pre_convert_ctail(pos); + if (ret) { + detach_convert_idata(pos->sq); + return ret; + } + ret = do_convert_ctail(pos, mode); + if (ret) { + detach_convert_idata(pos->sq); + return ret; + } + /* + * detach old conversion data if needed + */ + post_convert_ctail(pos, mode, old_nr_items); + return 0; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/ctail.h b/fs/reiser4/plugin/item/ctail.h new file mode 100644 index 000000000000..d18e04632ffe --- /dev/null +++ b/fs/reiser4/plugin/item/ctail.h @@ -0,0 +1,102 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Ctail items are fragments (or bodies) of special tipe to provide + optimal storage of encrypted and(or) compressed files. */ + + +#if !defined( __FS_REISER4_CTAIL_H__ ) +#define __FS_REISER4_CTAIL_H__ + +/* Disk format of ctail item */ +typedef struct ctail_item_format { + /* packed shift; + if its value is different from UCTAIL_SHIFT (see below), then + size of disk cluster is calculated as (1 << cluster_shift) */ + d8 cluster_shift; + /* ctail body */ + d8 body[0]; +} __attribute__ ((packed)) ctail_item_format; + +/* "Unprepped" disk cluster is represented by a single ctail item + with the following "magic" attributes: */ +/* "magic" cluster_shift */ +#define UCTAIL_SHIFT 0xff +/* How many units unprepped ctail item has */ +#define UCTAIL_NR_UNITS 1 + +/* The following is a set of various item states in a disk cluster. + Disk cluster is a set of items whose keys belong to the interval + [dc_key , dc_key + disk_cluster_size - 1] */ +typedef enum { + DC_INVALID_STATE = 0, + DC_FIRST_ITEM = 1, + DC_CHAINED_ITEM = 2, + DC_AFTER_CLUSTER = 3 +} dc_item_stat; + +/* ctail-specific extension. + In particular this describes parameters of disk cluster an item belongs to */ +struct ctail_coord_extension { + int shift; /* this contains cluster_shift extracted from + ctail_item_format (above), or UCTAIL_SHIFT + (the last one is the "magic" of unprepped disk clusters)*/ + int dsize; /* size of a prepped disk cluster */ + int ncount; /* count of nodes occupied by a disk cluster */ +}; + +struct cut_list; + +/* plugin->item.b.* */ +int can_contain_key_ctail(const coord_t *, const reiser4_key *, + const reiser4_item_data *); +int mergeable_ctail(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_ctail(const coord_t * coord); +int estimate_ctail(const coord_t * coord, const reiser4_item_data * data); +void print_ctail(const char *prefix, coord_t * coord); +lookup_result lookup_ctail(const reiser4_key *, lookup_bias, coord_t *); + +int paste_ctail(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG); +int init_ctail(coord_t *, coord_t *, reiser4_item_data *); +int can_shift_ctail(unsigned free_space, coord_t * coord, + znode * target, shift_direction pend, unsigned *size, + unsigned want); +void copy_units_ctail(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction where_is_free_space, + unsigned free_space); +int cut_units_ctail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_ctail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int ctail_ok(const coord_t * coord); +int check_ctail(const coord_t * coord, const char **error); + +/* plugin->u.item.s.* */ +int read_ctail(struct file *, flow_t *, hint_t *); +int readpage_ctail(void *, struct page *); +int readpages_ctail(struct file *, struct address_space *, struct list_head *); +reiser4_key *append_key_ctail(const coord_t *, reiser4_key *); +int create_hook_ctail(const coord_t * coord, void *arg); +int kill_hook_ctail(const coord_t *, pos_in_node_t, pos_in_node_t, + carry_kill_data *); +int shift_hook_ctail(const coord_t *, unsigned, unsigned, znode *); + +/* plugin->u.item.f */ +int utmost_child_ctail(const coord_t *, sideof, jnode **); +int scan_ctail(flush_scan *); +int convert_ctail(flush_pos_t *); +size_t inode_scaled_cluster_size(struct inode *); + +#endif /* __FS_REISER4_CTAIL_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/extent.c b/fs/reiser4/plugin/item/extent.c new file mode 100644 index 000000000000..e35a4d5b7868 --- /dev/null +++ b/fs/reiser4/plugin/item/extent.c @@ -0,0 +1,197 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../key.h" +#include "../../super.h" +#include "../../carry.h" +#include "../../inode.h" +#include "../../page_cache.h" +#include "../../flush.h" +#include "../object.h" + +/* prepare structure reiser4_item_data. It is used to put one extent unit into tree */ +/* Audited by: green(2002.06.13) */ +reiser4_item_data *init_new_extent(reiser4_item_data * data, void *ext_unit, + int nr_extents) +{ + data->data = ext_unit; + /* data->data is kernel space */ + data->user = 0; + data->length = sizeof(reiser4_extent) * nr_extents; + data->arg = NULL; + data->iplug = item_plugin_by_id(EXTENT_POINTER_ID); + return data; +} + +/* how many bytes are addressed by @nr first extents of the extent item */ +reiser4_block_nr reiser4_extent_size(const coord_t * coord, pos_in_node_t nr) +{ + pos_in_node_t i; + reiser4_block_nr blocks; + reiser4_extent *ext; + + ext = item_body_by_coord(coord); + assert("vs-263", nr <= nr_units_extent(coord)); + + blocks = 0; + for (i = 0; i < nr; i++, ext++) { + blocks += extent_get_width(ext); + } + + return blocks * current_blocksize; +} + +extent_state state_of_extent(reiser4_extent * ext) +{ + switch ((int)extent_get_start(ext)) { + case 0: + return HOLE_EXTENT; + case 1: + return UNALLOCATED_EXTENT; + default: + break; + } + return ALLOCATED_EXTENT; +} + +int extent_is_unallocated(const coord_t * item) +{ + assert("jmacd-5133", item_is_extent(item)); + + return state_of_extent(extent_by_coord(item)) == UNALLOCATED_EXTENT; +} + +/* set extent's start and width */ +void reiser4_set_extent(reiser4_extent * ext, reiser4_block_nr start, + reiser4_block_nr width) +{ + extent_set_start(ext, start); + extent_set_width(ext, width); +} + +/** + * reiser4_replace_extent - replace extent and paste 1 or 2 after it + * @un_extent: coordinate of extent to be overwritten + * @lh: need better comment + * @key: need better comment + * @exts_to_add: data prepared for insertion into tree + * @replace: need better comment + * @flags: need better comment + * @return_insert_position: need better comment + * + * Overwrites one extent, pastes 1 or 2 more ones after overwritten one. If + * @return_inserted_position is 1 - @un_extent and @lh are returned set to + * first of newly inserted units, if it is 0 - @un_extent and @lh are returned + * set to extent which was overwritten. + */ +int reiser4_replace_extent(struct replace_handle *h, + int return_inserted_position) +{ + int result; + znode *orig_znode; + /*ON_DEBUG(reiser4_extent orig_ext);*/ /* this is for debugging */ + + assert("vs-990", coord_is_existing_unit(h->coord)); + assert("vs-1375", znode_is_write_locked(h->coord->node)); + assert("vs-1426", extent_get_width(&h->overwrite) != 0); + assert("vs-1427", extent_get_width(&h->new_extents[0]) != 0); + assert("vs-1427", ergo(h->nr_new_extents == 2, + extent_get_width(&h->new_extents[1]) != 0)); + + /* compose structure for paste */ + init_new_extent(&h->item, &h->new_extents[0], h->nr_new_extents); + + coord_dup(&h->coord_after, h->coord); + init_lh(&h->lh_after); + copy_lh(&h->lh_after, h->lh); + reiser4_tap_init(&h->watch, &h->coord_after, &h->lh_after, ZNODE_WRITE_LOCK); + reiser4_tap_monitor(&h->watch); + + ON_DEBUG(h->orig_ext = *extent_by_coord(h->coord)); + orig_znode = h->coord->node; + +#if REISER4_DEBUG + /* make sure that key is set properly */ + unit_key_by_coord(h->coord, &h->tmp); + set_key_offset(&h->tmp, + get_key_offset(&h->tmp) + + extent_get_width(&h->overwrite) * current_blocksize); + assert("vs-1080", keyeq(&h->tmp, &h->paste_key)); +#endif + + /* set insert point after unit to be replaced */ + h->coord->between = AFTER_UNIT; + + result = insert_into_item(h->coord, return_inserted_position ? h->lh : NULL, + &h->paste_key, &h->item, h->flags); + if (!result) { + /* now we have to replace the unit after which new units were + inserted. Its position is tracked by @watch */ + reiser4_extent *ext; + znode *node; + + node = h->coord_after.node; + if (node != orig_znode) { + coord_clear_iplug(&h->coord_after); + result = zload(node); + } + + if (likely(!result)) { + ext = extent_by_coord(&h->coord_after); + + assert("vs-987", znode_is_loaded(node)); + assert("vs-988", !memcmp(ext, &h->orig_ext, sizeof(*ext))); + + /* overwrite extent unit */ + memcpy(ext, &h->overwrite, sizeof(reiser4_extent)); + znode_make_dirty(node); + + if (node != orig_znode) + zrelse(node); + + if (return_inserted_position == 0) { + /* coord and lh are to be set to overwritten + extent */ + assert("vs-1662", + WITH_DATA(node, !memcmp(&h->overwrite, + extent_by_coord( + &h->coord_after), + sizeof(reiser4_extent)))); + + *h->coord = h->coord_after; + done_lh(h->lh); + copy_lh(h->lh, &h->lh_after); + } else { + /* h->coord and h->lh are to be set to first of + inserted units */ + assert("vs-1663", + WITH_DATA(h->coord->node, + !memcmp(&h->new_extents[0], + extent_by_coord(h->coord), + sizeof(reiser4_extent)))); + assert("vs-1664", h->lh->node == h->coord->node); + } + } + } + reiser4_tap_done(&h->watch); + + return result; +} + +lock_handle *znode_lh(znode *node) +{ + assert("vs-1371", znode_is_write_locked(node)); + assert("vs-1372", znode_is_wlocked_once(node)); + return list_entry(node->lock.owners.next, lock_handle, owners_link); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/extent.h b/fs/reiser4/plugin/item/extent.h new file mode 100644 index 000000000000..1ea2e7bdc524 --- /dev/null +++ b/fs/reiser4/plugin/item/extent.h @@ -0,0 +1,231 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#ifndef __REISER4_EXTENT_H__ +#define __REISER4_EXTENT_H__ + +/* on disk extent */ +typedef struct { + reiser4_dblock_nr start; + reiser4_dblock_nr width; +} reiser4_extent; + +struct extent_stat { + int unallocated_units; + int unallocated_blocks; + int allocated_units; + int allocated_blocks; + int hole_units; + int hole_blocks; +}; + +/* extents in an extent item can be either holes, or unallocated or allocated + extents */ +typedef enum { + HOLE_EXTENT, + UNALLOCATED_EXTENT, + ALLOCATED_EXTENT +} extent_state; + +#define HOLE_EXTENT_START 0 +#define UNALLOCATED_EXTENT_START 1 +#define UNALLOCATED_EXTENT_START2 2 + +struct extent_coord_extension { + reiser4_block_nr pos_in_unit; + reiser4_block_nr width; /* width of current unit */ + pos_in_node_t nr_units; /* number of units */ + int ext_offset; /* offset from the beginning of zdata() */ + unsigned long expected_page; +#if REISER4_DEBUG + reiser4_extent extent; +#endif +}; + +/* macros to set/get fields of on-disk extent */ +static inline reiser4_block_nr extent_get_start(const reiser4_extent * ext) +{ + return le64_to_cpu(ext->start); +} + +static inline reiser4_block_nr extent_get_width(const reiser4_extent * ext) +{ + return le64_to_cpu(ext->width); +} + +extern __u64 reiser4_current_block_count(void); + +static inline void +extent_set_start(reiser4_extent * ext, reiser4_block_nr start) +{ + cassert(sizeof(ext->start) == 8); + assert("nikita-2510", + ergo(start > 1, start < reiser4_current_block_count())); + put_unaligned(cpu_to_le64(start), &ext->start); +} + +static inline void +extent_set_width(reiser4_extent * ext, reiser4_block_nr width) +{ + cassert(sizeof(ext->width) == 8); + assert("", width > 0); + put_unaligned(cpu_to_le64(width), &ext->width); + assert("nikita-2511", + ergo(extent_get_start(ext) > 1, + extent_get_start(ext) + width <= + reiser4_current_block_count())); +} + +#define extent_item(coord) \ +({ \ + assert("nikita-3143", item_is_extent(coord)); \ + ((reiser4_extent *)item_body_by_coord (coord)); \ +}) + +#define extent_by_coord(coord) \ +({ \ + assert("nikita-3144", item_is_extent(coord)); \ + (extent_item (coord) + (coord)->unit_pos); \ +}) + +#define width_by_coord(coord) \ +({ \ + assert("nikita-3145", item_is_extent(coord)); \ + extent_get_width (extent_by_coord(coord)); \ +}) + +struct carry_cut_data; +struct carry_kill_data; + +/* plugin->u.item.b.* */ +reiser4_key *max_key_inside_extent(const coord_t *, reiser4_key *); +int can_contain_key_extent(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data *); +int mergeable_extent(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_extent(const coord_t *); +lookup_result lookup_extent(const reiser4_key *, lookup_bias, coord_t *); +void init_coord_extent(coord_t *); +int init_extent(coord_t *, reiser4_item_data *); +int paste_extent(coord_t *, reiser4_item_data *, carry_plugin_info *); +int can_shift_extent(unsigned free_space, + coord_t * source, znode * target, shift_direction, + unsigned *size, unsigned want); +void copy_units_extent(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction where_is_free_space, + unsigned free_space); +int kill_hook_extent(const coord_t *, pos_in_node_t from, pos_in_node_t count, + struct carry_kill_data *); +int create_hook_extent(const coord_t * coord, void *arg); +int cut_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +reiser4_key *unit_key_extent(const coord_t *, reiser4_key *); +reiser4_key *max_unit_key_extent(const coord_t *, reiser4_key *); +void print_extent(const char *, coord_t *); +int utmost_child_extent(const coord_t * coord, sideof side, jnode ** child); +int utmost_child_real_block_extent(const coord_t * coord, sideof side, + reiser4_block_nr * block); +void item_stat_extent(const coord_t * coord, void *vp); +int reiser4_check_extent(const coord_t * coord, const char **error); + +/* plugin->u.item.s.file.* */ +ssize_t reiser4_write_extent(struct file *, struct inode * inode, + const char __user *, size_t, loff_t *); +int reiser4_read_extent(struct file *, flow_t *, hint_t *); +int reiser4_readpage_extent(void *, struct page *); +int reiser4_do_readpage_extent(reiser4_extent*, reiser4_block_nr, struct page*); +reiser4_key *append_key_extent(const coord_t *, reiser4_key *); +void init_coord_extension_extent(uf_coord_t *, loff_t offset); +int get_block_address_extent(const coord_t *, sector_t block, + sector_t * result); + +/* these are used in flush.c + FIXME-VS: should they be somewhere in item_plugin? */ +int allocate_extent_item_in_place(coord_t *, lock_handle *, flush_pos_t * pos); +int allocate_and_copy_extent(znode * left, coord_t * right, flush_pos_t * pos, + reiser4_key * stop_key); + +int extent_is_unallocated(const coord_t * item); /* True if this extent is unallocated (i.e., not a hole, not allocated). */ +__u64 extent_unit_index(const coord_t * item); /* Block offset of this unit. */ +__u64 extent_unit_width(const coord_t * item); /* Number of blocks in this unit. */ + +/* plugin->u.item.f. */ +int reiser4_scan_extent(flush_scan * scan); +extern int key_by_offset_extent(struct inode *, loff_t, reiser4_key *); + +reiser4_item_data *init_new_extent(reiser4_item_data * data, void *ext_unit, + int nr_extents); +reiser4_block_nr reiser4_extent_size(const coord_t * coord, pos_in_node_t nr); +extent_state state_of_extent(reiser4_extent * ext); +void reiser4_set_extent(reiser4_extent *, reiser4_block_nr start, + reiser4_block_nr width); +int reiser4_update_extent(struct inode *, jnode *, loff_t pos, + int *plugged_hole); + +#include "../../coord.h" +#include "../../lock.h" +#include "../../tap.h" + +struct replace_handle { + /* these are to be set before calling reiser4_replace_extent */ + coord_t *coord; + lock_handle *lh; + reiser4_key key; + reiser4_key *pkey; + reiser4_extent overwrite; + reiser4_extent new_extents[2]; + int nr_new_extents; + unsigned flags; + + /* these are used by reiser4_replace_extent */ + reiser4_item_data item; + coord_t coord_after; + lock_handle lh_after; + tap_t watch; + reiser4_key paste_key; +#if REISER4_DEBUG + reiser4_extent orig_ext; + reiser4_key tmp; +#endif +}; + +/* this structure is kmalloced before calling make_extent to avoid excessive + stack consumption on plug_hole->reiser4_replace_extent */ +struct make_extent_handle { + uf_coord_t *uf_coord; + reiser4_block_nr blocknr; + int created; + struct inode *inode; + union { + struct { + } append; + struct replace_handle replace; + } u; +}; + +int reiser4_replace_extent(struct replace_handle *, + int return_inserted_position); +lock_handle *znode_lh(znode *); + +/* the reiser4 repacker support */ +struct repacker_cursor; +extern int process_extent_backward_for_repacking(tap_t *, + struct repacker_cursor *); +extern int mark_extent_for_repacking(tap_t *, int); + +#define coord_by_uf_coord(uf_coord) (&((uf_coord)->coord)) +#define ext_coord_by_uf_coord(uf_coord) (&((uf_coord)->extension.extent)) + +/* __REISER4_EXTENT_H__ */ +#endif +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/extent_file_ops.c b/fs/reiser4/plugin/item/extent_file_ops.c new file mode 100644 index 000000000000..ef82745a68c9 --- /dev/null +++ b/fs/reiser4/plugin/item/extent_file_ops.c @@ -0,0 +1,1434 @@ +/* COPYRIGHT 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../inode.h" +#include "../../page_cache.h" +#include "../object.h" + +#include + +static inline reiser4_extent *ext_by_offset(const znode *node, int offset) +{ + reiser4_extent *ext; + + ext = (reiser4_extent *) (zdata(node) + offset); + return ext; +} + +/** + * check_uf_coord - verify coord extension + * @uf_coord: + * @key: + * + * Makes sure that all fields of @uf_coord are set properly. If @key is + * specified - check whether @uf_coord is set correspondingly. + */ +static void check_uf_coord(const uf_coord_t *uf_coord, const reiser4_key *key) +{ +#if REISER4_DEBUG + const coord_t *coord; + const struct extent_coord_extension *ext_coord; + reiser4_extent *ext; + + coord = &uf_coord->coord; + ext_coord = &uf_coord->extension.extent; + ext = ext_by_offset(coord->node, uf_coord->extension.extent.ext_offset); + + assert("", + WITH_DATA(coord->node, + (uf_coord->valid == 1 && + coord_is_iplug_set(coord) && + item_is_extent(coord) && + ext_coord->nr_units == nr_units_extent(coord) && + ext == extent_by_coord(coord) && + ext_coord->width == extent_get_width(ext) && + coord->unit_pos < ext_coord->nr_units && + ext_coord->pos_in_unit < ext_coord->width && + memcmp(ext, &ext_coord->extent, + sizeof(reiser4_extent)) == 0))); + if (key) { + reiser4_key coord_key; + + unit_key_by_coord(&uf_coord->coord, &coord_key); + set_key_offset(&coord_key, + get_key_offset(&coord_key) + + (uf_coord->extension.extent. + pos_in_unit << PAGE_SHIFT)); + assert("", keyeq(key, &coord_key)); + } +#endif +} + +static inline reiser4_extent *ext_by_ext_coord(const uf_coord_t *uf_coord) +{ + return ext_by_offset(uf_coord->coord.node, + uf_coord->extension.extent.ext_offset); +} + +#if REISER4_DEBUG + +/** + * offset_is_in_unit + * + * + * + */ +/* return 1 if offset @off is inside of extent unit pointed to by @coord. Set + pos_in_unit inside of unit correspondingly */ +static int offset_is_in_unit(const coord_t *coord, loff_t off) +{ + reiser4_key unit_key; + __u64 unit_off; + reiser4_extent *ext; + + ext = extent_by_coord(coord); + + unit_key_extent(coord, &unit_key); + unit_off = get_key_offset(&unit_key); + if (off < unit_off) + return 0; + if (off >= (unit_off + (current_blocksize * extent_get_width(ext)))) + return 0; + return 1; +} + +static int +coord_matches_key_extent(const coord_t * coord, const reiser4_key * key) +{ + reiser4_key item_key; + + assert("vs-771", coord_is_existing_unit(coord)); + assert("vs-1258", keylt(key, append_key_extent(coord, &item_key))); + assert("vs-1259", keyge(key, item_key_by_coord(coord, &item_key))); + + return offset_is_in_unit(coord, get_key_offset(key)); +} + +#endif + +/** + * can_append - + * @key: + * @coord: + * + * Returns 1 if @key is equal to an append key of item @coord is set to + */ +static int can_append(const reiser4_key *key, const coord_t *coord) +{ + reiser4_key append_key; + + return keyeq(key, append_key_extent(coord, &append_key)); +} + +/** + * append_hole + * @coord: + * @lh: + * @key: + * + */ +static int append_hole(coord_t *coord, lock_handle *lh, + const reiser4_key *key) +{ + reiser4_key append_key; + reiser4_block_nr hole_width; + reiser4_extent *ext, new_ext; + reiser4_item_data idata; + + /* last item of file may have to be appended with hole */ + assert("vs-708", znode_get_level(coord->node) == TWIG_LEVEL); + assert("vs-714", item_id_by_coord(coord) == EXTENT_POINTER_ID); + + /* key of first byte which is not addressed by this extent */ + append_key_extent(coord, &append_key); + + assert("", keyle(&append_key, key)); + + /* + * extent item has to be appended with hole. Calculate length of that + * hole + */ + hole_width = ((get_key_offset(key) - get_key_offset(&append_key) + + current_blocksize - 1) >> current_blocksize_bits); + assert("vs-954", hole_width > 0); + + /* set coord after last unit */ + coord_init_after_item_end(coord); + + /* get last extent in the item */ + ext = extent_by_coord(coord); + if (state_of_extent(ext) == HOLE_EXTENT) { + /* + * last extent of a file is hole extent. Widen that extent by + * @hole_width blocks. Note that we do not worry about + * overflowing - extent width is 64 bits + */ + reiser4_set_extent(ext, HOLE_EXTENT_START, + extent_get_width(ext) + hole_width); + znode_make_dirty(coord->node); + return 0; + } + + /* append last item of the file with hole extent unit */ + assert("vs-713", (state_of_extent(ext) == ALLOCATED_EXTENT || + state_of_extent(ext) == UNALLOCATED_EXTENT)); + + reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width); + init_new_extent(&idata, &new_ext, 1); + return insert_into_item(coord, lh, &append_key, &idata, 0); +} + +/** + * check_jnodes + * @twig: longterm locked twig node + * @key: + * + */ +static void check_jnodes(znode *twig, const reiser4_key *key, int count) +{ +#if REISER4_DEBUG + coord_t c; + reiser4_key node_key, jnode_key; + + jnode_key = *key; + + assert("", twig != NULL); + assert("", znode_get_level(twig) == TWIG_LEVEL); + assert("", znode_is_write_locked(twig)); + + zload(twig); + /* get the smallest key in twig node */ + coord_init_first_unit(&c, twig); + unit_key_by_coord(&c, &node_key); + assert("", keyle(&node_key, &jnode_key)); + + coord_init_last_unit(&c, twig); + unit_key_by_coord(&c, &node_key); + if (item_plugin_by_coord(&c)->s.file.append_key) + item_plugin_by_coord(&c)->s.file.append_key(&c, &node_key); + set_key_offset(&jnode_key, + get_key_offset(&jnode_key) + (loff_t)count * PAGE_SIZE - 1); + assert("", keylt(&jnode_key, &node_key)); + zrelse(twig); +#endif +} + +/** + * append_last_extent - append last file item + * @uf_coord: coord to start insertion from + * @jnodes: array of jnodes + * @count: number of jnodes in the array + * + * There is already at least one extent item of file @inode in the tree. Append + * the last of them with unallocated extent unit of width @count. Assign + * fake block numbers to jnodes corresponding to the inserted extent. + */ +static int append_last_extent(uf_coord_t *uf_coord, const reiser4_key *key, + jnode **jnodes, int count) +{ + int result; + reiser4_extent new_ext; + reiser4_item_data idata; + coord_t *coord; + struct extent_coord_extension *ext_coord; + reiser4_extent *ext; + reiser4_block_nr block; + jnode *node; + int i; + + coord = &uf_coord->coord; + ext_coord = &uf_coord->extension.extent; + ext = ext_by_ext_coord(uf_coord); + + /* check correctness of position in the item */ + assert("vs-228", coord->unit_pos == coord_last_unit_pos(coord)); + assert("vs-1311", coord->between == AFTER_UNIT); + assert("vs-1302", ext_coord->pos_in_unit == ext_coord->width - 1); + + if (!can_append(key, coord)) { + /* hole extent has to be inserted */ + result = append_hole(coord, uf_coord->lh, key); + uf_coord->valid = 0; + return result; + } + + if (count == 0) + return 0; + + assert("", get_key_offset(key) == (loff_t)index_jnode(jnodes[0]) * PAGE_SIZE); + + inode_add_blocks(mapping_jnode(jnodes[0])->host, count); + + switch (state_of_extent(ext)) { + case UNALLOCATED_EXTENT: + /* + * last extent unit of the file is unallocated one. Increase + * its width by @count + */ + reiser4_set_extent(ext, UNALLOCATED_EXTENT_START, + extent_get_width(ext) + count); + znode_make_dirty(coord->node); + + /* update coord extension */ + ext_coord->width += count; + ON_DEBUG(extent_set_width + (&uf_coord->extension.extent.extent, + ext_coord->width)); + break; + + case HOLE_EXTENT: + case ALLOCATED_EXTENT: + /* + * last extent unit of the file is either hole or allocated + * one. Append one unallocated extent of width @count + */ + reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count); + init_new_extent(&idata, &new_ext, 1); + result = insert_into_item(coord, uf_coord->lh, key, &idata, 0); + uf_coord->valid = 0; + if (result) + return result; + break; + + default: + return RETERR(-EIO); + } + + /* + * make sure that we hold long term locked twig node containing all + * jnodes we are about to capture + */ + check_jnodes(uf_coord->lh->node, key, count); + + /* + * assign fake block numbers to all jnodes. FIXME: make sure whether + * twig node containing inserted extent item is locked + */ + block = fake_blocknr_unformatted(count); + for (i = 0; i < count; i ++, block ++) { + node = jnodes[i]; + spin_lock_jnode(node); + JF_SET(node, JNODE_CREATED); + jnode_set_block(node, &block); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + } + return count; +} + +/** + * insert_first_hole - inser hole extent into tree + * @coord: + * @lh: + * @key: + * + * + */ +static int insert_first_hole(coord_t *coord, lock_handle *lh, + const reiser4_key *key) +{ + reiser4_extent new_ext; + reiser4_item_data idata; + reiser4_key item_key; + reiser4_block_nr hole_width; + + /* @coord must be set for inserting of new item */ + assert("vs-711", coord_is_between_items(coord)); + + item_key = *key; + set_key_offset(&item_key, 0ull); + + hole_width = ((get_key_offset(key) + current_blocksize - 1) >> + current_blocksize_bits); + assert("vs-710", hole_width > 0); + + /* compose body of hole extent and insert item into tree */ + reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width); + init_new_extent(&idata, &new_ext, 1); + return insert_extent_by_coord(coord, &idata, &item_key, lh); +} + + +/** + * insert_first_extent - insert first file item + * @inode: inode of file + * @uf_coord: coord to start insertion from + * @jnodes: array of jnodes + * @count: number of jnodes in the array + * @inode: + * + * There are no items of file @inode in the tree yet. Insert unallocated extent + * of width @count into tree or hole extent if writing not to the + * beginning. Assign fake block numbers to jnodes corresponding to the inserted + * unallocated extent. Returns number of jnodes or error code. + */ +static int insert_first_extent(uf_coord_t *uf_coord, const reiser4_key *key, + jnode **jnodes, int count, + struct inode *inode) +{ + int result; + int i; + reiser4_extent new_ext; + reiser4_item_data idata; + reiser4_block_nr block; + struct unix_file_info *uf_info; + jnode *node; + + /* first extent insertion starts at leaf level */ + assert("vs-719", znode_get_level(uf_coord->coord.node) == LEAF_LEVEL); + assert("vs-711", coord_is_between_items(&uf_coord->coord)); + + if (get_key_offset(key) != 0) { + result = insert_first_hole(&uf_coord->coord, uf_coord->lh, key); + uf_coord->valid = 0; + uf_info = unix_file_inode_data(inode); + + /* + * first item insertion is only possible when writing to empty + * file or performing tail conversion + */ + assert("", (uf_info->container == UF_CONTAINER_EMPTY || + (reiser4_inode_get_flag(inode, + REISER4_PART_MIXED) && + reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)))); + /* if file was empty - update its state */ + if (result == 0 && uf_info->container == UF_CONTAINER_EMPTY) + uf_info->container = UF_CONTAINER_EXTENTS; + return result; + } + + if (count == 0) + return 0; + + inode_add_blocks(mapping_jnode(jnodes[0])->host, count); + + /* + * prepare for tree modification: compose body of item and item data + * structure needed for insertion + */ + reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count); + init_new_extent(&idata, &new_ext, 1); + + /* insert extent item into the tree */ + result = insert_extent_by_coord(&uf_coord->coord, &idata, key, + uf_coord->lh); + if (result) + return result; + + /* + * make sure that we hold long term locked twig node containing all + * jnodes we are about to capture + */ + check_jnodes(uf_coord->lh->node, key, count); + /* + * assign fake block numbers to all jnodes, capture and mark them dirty + */ + block = fake_blocknr_unformatted(count); + for (i = 0; i < count; i ++, block ++) { + node = jnodes[i]; + spin_lock_jnode(node); + JF_SET(node, JNODE_CREATED); + jnode_set_block(node, &block); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + } + + /* + * invalidate coordinate, research must be performed to continue + * because write will continue on twig level + */ + uf_coord->valid = 0; + return count; +} + +/** + * plug_hole - replace hole extent with unallocated and holes + * @uf_coord: + * @key: + * @node: + * @h: structure containing coordinate, lock handle, key, etc + * + * Creates an unallocated extent of width 1 within a hole. In worst case two + * additional extents can be created. + */ +static int plug_hole(uf_coord_t *uf_coord, const reiser4_key *key, int *how) +{ + struct replace_handle rh; + reiser4_extent *ext; + reiser4_block_nr width, pos_in_unit; + coord_t *coord; + struct extent_coord_extension *ext_coord; + int return_inserted_position; + + check_uf_coord(uf_coord, key); + + rh.coord = coord_by_uf_coord(uf_coord); + rh.lh = uf_coord->lh; + rh.flags = 0; + + coord = coord_by_uf_coord(uf_coord); + ext_coord = ext_coord_by_uf_coord(uf_coord); + ext = ext_by_ext_coord(uf_coord); + + width = ext_coord->width; + pos_in_unit = ext_coord->pos_in_unit; + + *how = 0; + if (width == 1) { + reiser4_set_extent(ext, UNALLOCATED_EXTENT_START, 1); + znode_make_dirty(coord->node); + /* update uf_coord */ + ON_DEBUG(ext_coord->extent = *ext); + *how = 1; + return 0; + } else if (pos_in_unit == 0) { + /* we deal with first element of extent */ + if (coord->unit_pos) { + /* there is an extent to the left */ + if (state_of_extent(ext - 1) == UNALLOCATED_EXTENT) { + /* + * left neighboring unit is an unallocated + * extent. Increase its width and decrease + * width of hole + */ + extent_set_width(ext - 1, + extent_get_width(ext - 1) + 1); + extent_set_width(ext, width - 1); + znode_make_dirty(coord->node); + + /* update coord extension */ + coord->unit_pos--; + ext_coord->width = extent_get_width(ext - 1); + ext_coord->pos_in_unit = ext_coord->width - 1; + ext_coord->ext_offset -= sizeof(reiser4_extent); + ON_DEBUG(ext_coord->extent = + *extent_by_coord(coord)); + *how = 2; + return 0; + } + } + /* extent for replace */ + reiser4_set_extent(&rh.overwrite, UNALLOCATED_EXTENT_START, 1); + /* extent to be inserted */ + reiser4_set_extent(&rh.new_extents[0], HOLE_EXTENT_START, + width - 1); + rh.nr_new_extents = 1; + + /* have reiser4_replace_extent to return with @coord and + @uf_coord->lh set to unit which was replaced */ + return_inserted_position = 0; + *how = 3; + } else if (pos_in_unit == width - 1) { + /* we deal with last element of extent */ + if (coord->unit_pos < nr_units_extent(coord) - 1) { + /* there is an extent unit to the right */ + if (state_of_extent(ext + 1) == UNALLOCATED_EXTENT) { + /* + * right neighboring unit is an unallocated + * extent. Increase its width and decrease + * width of hole + */ + extent_set_width(ext + 1, + extent_get_width(ext + 1) + 1); + extent_set_width(ext, width - 1); + znode_make_dirty(coord->node); + + /* update coord extension */ + coord->unit_pos++; + ext_coord->width = extent_get_width(ext + 1); + ext_coord->pos_in_unit = 0; + ext_coord->ext_offset += sizeof(reiser4_extent); + ON_DEBUG(ext_coord->extent = + *extent_by_coord(coord)); + *how = 4; + return 0; + } + } + /* extent for replace */ + reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START, width - 1); + /* extent to be inserted */ + reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START, + 1); + rh.nr_new_extents = 1; + + /* have reiser4_replace_extent to return with @coord and + @uf_coord->lh set to unit which was inserted */ + return_inserted_position = 1; + *how = 5; + } else { + /* extent for replace */ + reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START, + pos_in_unit); + /* extents to be inserted */ + reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START, + 1); + reiser4_set_extent(&rh.new_extents[1], HOLE_EXTENT_START, + width - pos_in_unit - 1); + rh.nr_new_extents = 2; + + /* have reiser4_replace_extent to return with @coord and + @uf_coord->lh set to first of units which were inserted */ + return_inserted_position = 1; + *how = 6; + } + unit_key_by_coord(coord, &rh.paste_key); + set_key_offset(&rh.paste_key, get_key_offset(&rh.paste_key) + + extent_get_width(&rh.overwrite) * current_blocksize); + + uf_coord->valid = 0; + return reiser4_replace_extent(&rh, return_inserted_position); +} + +/** + * overwrite_one_block - + * @uf_coord: + * @key: + * @node: + * + * If @node corresponds to hole extent - create unallocated extent for it and + * assign fake block number. If @node corresponds to allocated extent - assign + * block number of jnode + */ +static int overwrite_one_block(uf_coord_t *uf_coord, const reiser4_key *key, + jnode *node, int *hole_plugged) +{ + int result; + struct extent_coord_extension *ext_coord; + reiser4_extent *ext; + reiser4_block_nr block; + int how; + + assert("vs-1312", uf_coord->coord.between == AT_UNIT); + + result = 0; + ext_coord = ext_coord_by_uf_coord(uf_coord); + check_uf_coord(uf_coord, NULL); + ext = ext_by_ext_coord(uf_coord); + assert("", state_of_extent(ext) != UNALLOCATED_EXTENT); + + switch (state_of_extent(ext)) { + case ALLOCATED_EXTENT: + block = extent_get_start(ext) + ext_coord->pos_in_unit; + break; + + case HOLE_EXTENT: + inode_add_blocks(mapping_jnode(node)->host, 1); + result = plug_hole(uf_coord, key, &how); + if (result) + return result; + block = fake_blocknr_unformatted(1); + if (hole_plugged) + *hole_plugged = 1; + JF_SET(node, JNODE_CREATED); + break; + + default: + return RETERR(-EIO); + } + + jnode_set_block(node, &block); + return 0; +} + +/** + * move_coord - move coordinate forward + * @uf_coord: + * + * Move coordinate one data block pointer forward. Return 1 if coord is set to + * the last one already or is invalid. + */ +static int move_coord(uf_coord_t *uf_coord) +{ + struct extent_coord_extension *ext_coord; + + if (uf_coord->valid == 0) + return 1; + ext_coord = &uf_coord->extension.extent; + ext_coord->pos_in_unit ++; + if (ext_coord->pos_in_unit < ext_coord->width) + /* coordinate moved within the unit */ + return 0; + + /* end of unit is reached. Try to move to next unit */ + ext_coord->pos_in_unit = 0; + uf_coord->coord.unit_pos ++; + if (uf_coord->coord.unit_pos < ext_coord->nr_units) { + /* coordinate moved to next unit */ + ext_coord->ext_offset += sizeof(reiser4_extent); + ext_coord->width = + extent_get_width(ext_by_offset + (uf_coord->coord.node, + ext_coord->ext_offset)); + ON_DEBUG(ext_coord->extent = + *ext_by_offset(uf_coord->coord.node, + ext_coord->ext_offset)); + return 0; + } + /* end of item is reached */ + uf_coord->valid = 0; + return 1; +} + +/** + * overwrite_extent - + * @inode: + * + * Returns number of handled jnodes. + */ +static int overwrite_extent(uf_coord_t *uf_coord, const reiser4_key *key, + jnode **jnodes, int count, int *plugged_hole) +{ + int result; + reiser4_key k; + int i; + jnode *node; + + k = *key; + for (i = 0; i < count; i ++) { + node = jnodes[i]; + if (*jnode_get_block(node) == 0) { + result = overwrite_one_block(uf_coord, &k, node, plugged_hole); + if (result) + return result; + } + /* + * make sure that we hold long term locked twig node containing + * all jnodes we are about to capture + */ + check_jnodes(uf_coord->lh->node, &k, 1); + /* + * assign fake block numbers to all jnodes, capture and mark + * them dirty + */ + spin_lock_jnode(node); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + + if (uf_coord->valid == 0) + return i + 1; + + check_uf_coord(uf_coord, &k); + + if (move_coord(uf_coord)) { + /* + * failed to move to the next node pointer. Either end + * of file or end of twig node is reached. In the later + * case we might go to the right neighbor. + */ + uf_coord->valid = 0; + return i + 1; + } + set_key_offset(&k, get_key_offset(&k) + PAGE_SIZE); + } + + return count; +} + +/** + * reiser4_update_extent + * @file: + * @jnodes: + * @count: + * @off: + * + */ +int reiser4_update_extent(struct inode *inode, jnode *node, loff_t pos, + int *plugged_hole) +{ + int result; + znode *loaded; + uf_coord_t uf_coord; + coord_t *coord; + lock_handle lh; + reiser4_key key; + + assert("", reiser4_lock_counters()->d_refs == 0); + + key_by_inode_and_offset_common(inode, pos, &key); + + init_uf_coord(&uf_coord, &lh); + coord = &uf_coord.coord; + result = find_file_item_nohint(coord, &lh, &key, + ZNODE_WRITE_LOCK, inode); + if (IS_CBKERR(result)) { + assert("", reiser4_lock_counters()->d_refs == 0); + return result; + } + + result = zload(coord->node); + BUG_ON(result != 0); + loaded = coord->node; + + if (coord->between == AFTER_UNIT) { + /* + * append existing extent item with unallocated extent of width + * nr_jnodes + */ + init_coord_extension_extent(&uf_coord, + get_key_offset(&key)); + result = append_last_extent(&uf_coord, &key, + &node, 1); + } else if (coord->between == AT_UNIT) { + /* + * overwrite + * not optimal yet. Will be optimized if new write will show + * performance win. + */ + init_coord_extension_extent(&uf_coord, + get_key_offset(&key)); + result = overwrite_extent(&uf_coord, &key, + &node, 1, plugged_hole); + } else { + /* + * there are no items of this file in the tree yet. Create + * first item of the file inserting one unallocated extent of + * width nr_jnodes + */ + result = insert_first_extent(&uf_coord, &key, &node, 1, inode); + } + assert("", result == 1 || result < 0); + zrelse(loaded); + done_lh(&lh); + assert("", reiser4_lock_counters()->d_refs == 0); + return (result == 1) ? 0 : result; +} + +/** + * update_extents + * @file: + * @jnodes: + * @count: + * @off: + * + */ +static int update_extents(struct file *file, struct inode *inode, + jnode **jnodes, int count, loff_t pos) +{ + struct hint hint; + reiser4_key key; + int result; + znode *loaded; + + result = load_file_hint(file, &hint); + BUG_ON(result != 0); + + if (count != 0) + /* + * count == 0 is special case: expanding truncate + */ + pos = (loff_t)index_jnode(jnodes[0]) << PAGE_SHIFT; + key_by_inode_and_offset_common(inode, pos, &key); + + assert("", reiser4_lock_counters()->d_refs == 0); + + do { + result = find_file_item(&hint, &key, ZNODE_WRITE_LOCK, inode); + if (IS_CBKERR(result)) { + assert("", reiser4_lock_counters()->d_refs == 0); + return result; + } + + result = zload(hint.ext_coord.coord.node); + BUG_ON(result != 0); + loaded = hint.ext_coord.coord.node; + + if (hint.ext_coord.coord.between == AFTER_UNIT) { + /* + * append existing extent item with unallocated extent + * of width nr_jnodes + */ + if (hint.ext_coord.valid == 0) + /* NOTE: get statistics on this */ + init_coord_extension_extent(&hint.ext_coord, + get_key_offset(&key)); + result = append_last_extent(&hint.ext_coord, &key, + jnodes, count); + } else if (hint.ext_coord.coord.between == AT_UNIT) { + /* + * overwrite + * not optimal yet. Will be optimized if new write will + * show performance win. + */ + if (hint.ext_coord.valid == 0) + /* NOTE: get statistics on this */ + init_coord_extension_extent(&hint.ext_coord, + get_key_offset(&key)); + result = overwrite_extent(&hint.ext_coord, &key, + jnodes, count, NULL); + } else { + /* + * there are no items of this file in the tree + * yet. Create first item of the file inserting one + * unallocated extent of * width nr_jnodes + */ + result = insert_first_extent(&hint.ext_coord, &key, + jnodes, count, inode); + } + zrelse(loaded); + if (result < 0) { + done_lh(hint.ext_coord.lh); + break; + } + + jnodes += result; + count -= result; + set_key_offset(&key, get_key_offset(&key) + result * PAGE_SIZE); + + /* seal and unlock znode */ + if (hint.ext_coord.valid) + reiser4_set_hint(&hint, &key, ZNODE_WRITE_LOCK); + else + reiser4_unset_hint(&hint); + + } while (count > 0); + + save_file_hint(file, &hint); + assert("", reiser4_lock_counters()->d_refs == 0); + return result; +} + +/** + * write_extent_reserve_space - reserve space for extent write operation + * @inode: + * + * Estimates and reserves space which may be required for writing + * WRITE_GRANULARITY pages of file. + */ +static int write_extent_reserve_space(struct inode *inode) +{ + __u64 count; + reiser4_tree *tree; + + /* + * to write WRITE_GRANULARITY pages to a file by extents we have to + * reserve disk space for: + + * 1. find_file_item may have to insert empty node to the tree (empty + * leaf node between two extent items). This requires 1 block and + * number of blocks which are necessary to perform insertion of an + * internal item into twig level. + + * 2. for each of written pages there might be needed 1 block and + * number of blocks which might be necessary to perform insertion of or + * paste to an extent item. + + * 3. stat data update + */ + tree = reiser4_tree_by_inode(inode); + count = estimate_one_insert_item(tree) + + WRITE_GRANULARITY * (1 + estimate_one_insert_into_item(tree)) + + estimate_one_insert_item(tree); + grab_space_enable(); + return reiser4_grab_space(count, 0 /* flags */); +} + +/* + * filemap_copy_from_user no longer exists in generic code, because it + * is deadlocky (copying from user while holding the page lock is bad). + * As a temporary fix for reiser4, just define it here. + */ +static inline size_t +filemap_copy_from_user(struct page *page, unsigned long offset, + const char __user *buf, unsigned bytes) +{ + char *kaddr; + int left; + + kaddr = kmap_atomic(page); + left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); + kunmap_atomic(kaddr); + + if (left != 0) { + /* Do it the slow way */ + kaddr = kmap(page); + left = __copy_from_user(kaddr + offset, buf, bytes); + kunmap(page); + } + return bytes - left; +} + +/** + * reiser4_write_extent - write method of extent item plugin + * @file: file to write to + * @buf: address of user-space buffer + * @count: number of bytes to write + * @pos: position in file to write to + * + */ +ssize_t reiser4_write_extent(struct file *file, struct inode * inode, + const char __user *buf, size_t count, loff_t *pos) +{ + int have_to_update_extent; + int nr_pages, nr_dirty; + struct page *page; + jnode *jnodes[WRITE_GRANULARITY + 1]; + unsigned long index; + unsigned long end; + int i; + int to_page, page_off; + size_t left, written; + int result = 0; + + if (write_extent_reserve_space(inode)) + return RETERR(-ENOSPC); + + if (count == 0) { + /* truncate case */ + update_extents(file, inode, jnodes, 0, *pos); + return 0; + } + + BUG_ON(get_current_context()->trans->atom != NULL); + + left = count; + index = *pos >> PAGE_SHIFT; + /* calculate number of pages which are to be written */ + end = ((*pos + count - 1) >> PAGE_SHIFT); + nr_pages = end - index + 1; + nr_dirty = 0; + assert("", nr_pages <= WRITE_GRANULARITY + 1); + + /* get pages and jnodes */ + for (i = 0; i < nr_pages; i ++) { + page = find_or_create_page(inode->i_mapping, index + i, + reiser4_ctx_gfp_mask_get()); + if (page == NULL) { + nr_pages = i; + result = RETERR(-ENOMEM); + goto out; + } + + jnodes[i] = jnode_of_page(page); + if (IS_ERR(jnodes[i])) { + unlock_page(page); + put_page(page); + nr_pages = i; + result = RETERR(-ENOMEM); + goto out; + } + /* prevent jnode and page from disconnecting */ + JF_SET(jnodes[i], JNODE_WRITE_PREPARED); + unlock_page(page); + } + + BUG_ON(get_current_context()->trans->atom != NULL); + + have_to_update_extent = 0; + + page_off = (*pos & (PAGE_SIZE - 1)); + for (i = 0; i < nr_pages; i ++) { + to_page = PAGE_SIZE - page_off; + if (to_page > left) + to_page = left; + page = jnode_page(jnodes[i]); + if (page_offset(page) < inode->i_size && + !PageUptodate(page) && to_page != PAGE_SIZE) { + /* + * the above is not optimal for partial write to last + * page of file when file size is not at boundary of + * page + */ + lock_page(page); + if (!PageUptodate(page)) { + result = readpage_unix_file(NULL, page); + BUG_ON(result != 0); + /* wait for read completion */ + lock_page(page); + BUG_ON(!PageUptodate(page)); + } else + result = 0; + unlock_page(page); + } + + BUG_ON(get_current_context()->trans->atom != NULL); + fault_in_pages_readable(buf, to_page); + BUG_ON(get_current_context()->trans->atom != NULL); + + lock_page(page); + if (!PageUptodate(page) && to_page != PAGE_SIZE) + zero_user_segments(page, 0, page_off, + page_off + to_page, + PAGE_SIZE); + + written = filemap_copy_from_user(page, page_off, buf, to_page); + if (unlikely(written != to_page)) { + unlock_page(page); + result = RETERR(-EFAULT); + break; + } + + flush_dcache_page(page); + set_page_dirty_notag(page); + unlock_page(page); + nr_dirty++; + + mark_page_accessed(page); + SetPageUptodate(page); + + if (jnodes[i]->blocknr == 0) + have_to_update_extent ++; + + page_off = 0; + buf += to_page; + left -= to_page; + BUG_ON(get_current_context()->trans->atom != NULL); + } + + if (have_to_update_extent) { + update_extents(file, inode, jnodes, nr_dirty, *pos); + } else { + for (i = 0; i < nr_dirty; i ++) { + int ret; + spin_lock_jnode(jnodes[i]); + ret = reiser4_try_capture(jnodes[i], + ZNODE_WRITE_LOCK, 0); + BUG_ON(ret != 0); + jnode_make_dirty_locked(jnodes[i]); + spin_unlock_jnode(jnodes[i]); + } + } +out: + for (i = 0; i < nr_pages; i ++) { + put_page(jnode_page(jnodes[i])); + JF_CLR(jnodes[i], JNODE_WRITE_PREPARED); + jput(jnodes[i]); + } + + /* the only errors handled so far is ENOMEM and + EFAULT on copy_from_user */ + + return (count - left) ? (count - left) : result; +} + +int reiser4_do_readpage_extent(reiser4_extent * ext, reiser4_block_nr pos, + struct page *page) +{ + jnode *j; + struct address_space *mapping; + unsigned long index; + oid_t oid; + reiser4_block_nr block; + + mapping = page->mapping; + oid = get_inode_oid(mapping->host); + index = page->index; + + switch (state_of_extent(ext)) { + case HOLE_EXTENT: + /* + * it is possible to have hole page with jnode, if page was + * eflushed previously. + */ + j = jfind(mapping, index); + if (j == NULL) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + spin_lock_jnode(j); + if (!jnode_page(j)) { + jnode_attach_page(j, page); + } else { + BUG_ON(jnode_page(j) != page); + assert("vs-1504", jnode_page(j) == page); + } + block = *jnode_get_io_block(j); + spin_unlock_jnode(j); + if (block == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + jput(j); + return 0; + } + break; + + case ALLOCATED_EXTENT: + j = jnode_of_page(page); + if (IS_ERR(j)) + return PTR_ERR(j); + if (*jnode_get_block(j) == 0) { + reiser4_block_nr blocknr; + + blocknr = extent_get_start(ext) + pos; + jnode_set_block(j, &blocknr); + } else + assert("vs-1403", + j->blocknr == extent_get_start(ext) + pos); + break; + + case UNALLOCATED_EXTENT: + j = jfind(mapping, index); + assert("nikita-2688", j); + assert("vs-1426", jnode_page(j) == NULL); + + spin_lock_jnode(j); + jnode_attach_page(j, page); + spin_unlock_jnode(j); + break; + + default: + warning("vs-957", "wrong extent\n"); + return RETERR(-EIO); + } + + BUG_ON(j == 0); + reiser4_page_io(page, j, READ, reiser4_ctx_gfp_mask_get()); + jput(j); + return 0; +} + +/* Implements plugin->u.item.s.file.read operation for extent items. */ +int reiser4_read_extent(struct file *file, flow_t *flow, hint_t *hint) +{ + int result; + struct page *page; + unsigned long page_idx; + unsigned long page_off; /* offset within the page to start read from */ + unsigned long page_cnt; /* bytes which can be read from the page which + contains file_off */ + struct address_space *mapping; + loff_t file_off; /* offset in a file to start read from */ + uf_coord_t *uf_coord; + coord_t *coord; + struct extent_coord_extension *ext_coord; + char *kaddr; + + assert("vs-1353", current_blocksize == PAGE_SIZE); + assert("vs-572", flow->user == 1); + assert("vs-1351", flow->length > 0); + + uf_coord = &hint->ext_coord; + + check_uf_coord(uf_coord, NULL); + assert("vs-33", uf_coord->lh == &hint->lh); + + coord = &uf_coord->coord; + assert("vs-1119", znode_is_rlocked(coord->node)); + assert("vs-1120", znode_is_loaded(coord->node)); + assert("vs-1256", coord_matches_key_extent(coord, &flow->key)); + + mapping = file_inode(file)->i_mapping; + ext_coord = &uf_coord->extension.extent; + + file_off = get_key_offset(&flow->key); + page_off = (unsigned long)(file_off & (PAGE_SIZE - 1)); + page_cnt = PAGE_SIZE - page_off; + + page_idx = (unsigned long)(file_off >> PAGE_SHIFT); + + /* we start having twig node read locked. However, we do not want to + keep that lock all the time readahead works. So, set a seal and + release twig node. */ + reiser4_set_hint(hint, &flow->key, ZNODE_READ_LOCK); + /* &hint->lh is done-ed */ + + do { + reiser4_txn_restart_current(); + page = read_mapping_page(mapping, page_idx, file); + if (IS_ERR(page)) + return PTR_ERR(page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + put_page(page); + warning("jmacd-97178", + "extent_read: page is not up to date"); + return RETERR(-EIO); + } + mark_page_accessed(page); + unlock_page(page); + + /* If users can be writing to this page using arbitrary virtual + addresses, take care about potential aliasing before reading + the page on the kernel side. + */ + if (mapping_writably_mapped(mapping)) + flush_dcache_page(page); + + assert("nikita-3034", reiser4_schedulable()); + + /* number of bytes which are to be read from the page */ + if (page_cnt > flow->length) + page_cnt = flow->length; + + result = fault_in_pages_writeable(flow->data, page_cnt); + if (result) { + put_page(page); + return RETERR(-EFAULT); + } + + kaddr = kmap_atomic(page); + result = __copy_to_user_inatomic(flow->data, + kaddr + page_off, page_cnt); + kunmap_atomic(kaddr); + if (result != 0) { + kaddr = kmap(page); + result = __copy_to_user(flow->data, + kaddr + page_off, page_cnt); + kunmap(page); + if (unlikely(result)) + return RETERR(-EFAULT); + } + put_page(page); + + /* increase (flow->key) offset, + * update (flow->data) user area pointer + */ + move_flow_forward(flow, page_cnt); + + page_off = 0; + page_idx++; + + } while (flow->length); + return 0; +} + +/* + * plugin->s.file.readpage + * + * reiser4_read->unix_file_read->page_cache_readahead-> + * ->reiser4_readpage_dispatch->readpage_unix_file->readpage_extent + * or + * filemap_fault->reiser4_readpage_dispatch->readpage_unix_file-> + * ->readpage_extent + * + * At the beginning: coord->node is read locked, zloaded, page is + * locked, coord is set to existing unit inside of extent item (it + * is not necessary that coord matches to page->index) + */ +int reiser4_readpage_extent(void *vp, struct page *page) +{ + uf_coord_t *uf_coord = vp; + ON_DEBUG(coord_t * coord = &uf_coord->coord); + ON_DEBUG(reiser4_key key); + + assert("vs-1040", PageLocked(page)); + assert("vs-1050", !PageUptodate(page)); + assert("vs-1039", page->mapping && page->mapping->host); + + assert("vs-1044", znode_is_loaded(coord->node)); + assert("vs-758", item_is_extent(coord)); + assert("vs-1046", coord_is_existing_unit(coord)); + assert("vs-1045", znode_is_rlocked(coord->node)); + assert("vs-1047", + page->mapping->host->i_ino == + get_key_objectid(item_key_by_coord(coord, &key))); + check_uf_coord(uf_coord, NULL); + + return reiser4_do_readpage_extent(ext_by_ext_coord(uf_coord), + uf_coord->extension.extent.pos_in_unit, + page); +} + +int get_block_address_extent(const coord_t *coord, sector_t block, + sector_t *result) +{ + reiser4_extent *ext; + + if (!coord_is_existing_unit(coord)) + return RETERR(-EINVAL); + + ext = extent_by_coord(coord); + + if (state_of_extent(ext) != ALLOCATED_EXTENT) + /* FIXME: bad things may happen if it is unallocated extent */ + *result = 0; + else { + reiser4_key key; + + unit_key_by_coord(coord, &key); + assert("vs-1645", + block >= get_key_offset(&key) >> current_blocksize_bits); + assert("vs-1646", + block < + (get_key_offset(&key) >> current_blocksize_bits) + + extent_get_width(ext)); + *result = + extent_get_start(ext) + (block - + (get_key_offset(&key) >> + current_blocksize_bits)); + } + return 0; +} + +/* + plugin->u.item.s.file.append_key + key of first byte which is the next to last byte by addressed by this extent +*/ +reiser4_key *append_key_extent(const coord_t * coord, reiser4_key * key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, + get_key_offset(key) + reiser4_extent_size(coord, + nr_units_extent + (coord))); + + assert("vs-610", get_key_offset(key) + && (get_key_offset(key) & (current_blocksize - 1)) == 0); + return key; +} + +/* plugin->u.item.s.file.init_coord_extension */ +void init_coord_extension_extent(uf_coord_t * uf_coord, loff_t lookuped) +{ + coord_t *coord; + struct extent_coord_extension *ext_coord; + reiser4_key key; + loff_t offset; + + assert("vs-1295", uf_coord->valid == 0); + + coord = &uf_coord->coord; + assert("vs-1288", coord_is_iplug_set(coord)); + assert("vs-1327", znode_is_loaded(coord->node)); + + if (coord->between != AFTER_UNIT && coord->between != AT_UNIT) + return; + + ext_coord = &uf_coord->extension.extent; + ext_coord->nr_units = nr_units_extent(coord); + ext_coord->ext_offset = + (char *)extent_by_coord(coord) - zdata(coord->node); + ext_coord->width = extent_get_width(extent_by_coord(coord)); + ON_DEBUG(ext_coord->extent = *extent_by_coord(coord)); + uf_coord->valid = 1; + + /* pos_in_unit is the only uninitialized field in extended coord */ + if (coord->between == AFTER_UNIT) { + assert("vs-1330", + coord->unit_pos == nr_units_extent(coord) - 1); + + ext_coord->pos_in_unit = ext_coord->width - 1; + } else { + /* AT_UNIT */ + unit_key_by_coord(coord, &key); + offset = get_key_offset(&key); + + assert("vs-1328", offset <= lookuped); + assert("vs-1329", + lookuped < + offset + ext_coord->width * current_blocksize); + ext_coord->pos_in_unit = + ((lookuped - offset) >> current_blocksize_bits); + } +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/extent_flush_ops.c b/fs/reiser4/plugin/item/extent_flush_ops.c new file mode 100644 index 000000000000..34bd946e7bc6 --- /dev/null +++ b/fs/reiser4/plugin/item/extent_flush_ops.c @@ -0,0 +1,686 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../tree.h" +#include "../../jnode.h" +#include "../../super.h" +#include "../../flush.h" +#include "../../carry.h" +#include "../object.h" + +#include + +static reiser4_block_nr extent_unit_start(const coord_t * item); + +/* Return either first or last extent (depending on @side) of the item + @coord is set to. Set @pos_in_unit either to first or to last block + of extent. */ +static reiser4_extent *extent_utmost_ext(const coord_t * coord, sideof side, + reiser4_block_nr * pos_in_unit) +{ + reiser4_extent *ext; + + if (side == LEFT_SIDE) { + /* get first extent of item */ + ext = extent_item(coord); + *pos_in_unit = 0; + } else { + /* get last extent of item and last position within it */ + assert("vs-363", side == RIGHT_SIDE); + ext = extent_item(coord) + coord_last_unit_pos(coord); + *pos_in_unit = extent_get_width(ext) - 1; + } + + return ext; +} + +/* item_plugin->f.utmost_child */ +/* Return the child. Coord is set to extent item. Find jnode corresponding + either to first or to last unformatted node pointed by the item */ +int utmost_child_extent(const coord_t * coord, sideof side, jnode ** childp) +{ + reiser4_extent *ext; + reiser4_block_nr pos_in_unit; + + ext = extent_utmost_ext(coord, side, &pos_in_unit); + + switch (state_of_extent(ext)) { + case HOLE_EXTENT: + *childp = NULL; + return 0; + case ALLOCATED_EXTENT: + case UNALLOCATED_EXTENT: + break; + default: + /* this should never happen */ + assert("vs-1417", 0); + } + + { + reiser4_key key; + reiser4_tree *tree; + unsigned long index; + + if (side == LEFT_SIDE) { + /* get key of first byte addressed by the extent */ + item_key_by_coord(coord, &key); + } else { + /* get key of byte which next after last byte addressed by the extent */ + append_key_extent(coord, &key); + } + + assert("vs-544", + (get_key_offset(&key) >> PAGE_SHIFT) < ~0ul); + /* index of first or last (depending on @side) page addressed + by the extent */ + index = + (unsigned long)(get_key_offset(&key) >> PAGE_SHIFT); + if (side == RIGHT_SIDE) + index--; + + tree = coord->node->zjnode.tree; + *childp = jlookup(tree, get_key_objectid(&key), index); + } + + return 0; +} + +/* item_plugin->f.utmost_child_real_block */ +/* Return the child's block, if allocated. */ +int +utmost_child_real_block_extent(const coord_t * coord, sideof side, + reiser4_block_nr * block) +{ + reiser4_extent *ext; + + ext = extent_by_coord(coord); + + switch (state_of_extent(ext)) { + case ALLOCATED_EXTENT: + *block = extent_get_start(ext); + if (side == RIGHT_SIDE) + *block += extent_get_width(ext) - 1; + break; + case HOLE_EXTENT: + case UNALLOCATED_EXTENT: + *block = 0; + break; + default: + /* this should never happen */ + assert("vs-1418", 0); + } + + return 0; +} + +/* item_plugin->f.scan */ +/* Performs leftward scanning starting from an unformatted node and its parent coordinate. + This scan continues, advancing the parent coordinate, until either it encounters a + formatted child or it finishes scanning this node. + + If unallocated, the entire extent must be dirty and in the same atom. (Actually, I'm + not sure this is last property (same atom) is enforced, but it should be the case since + one atom must write the parent and the others must read the parent, thus fusing?). In + any case, the code below asserts this case for unallocated extents. Unallocated + extents are thus optimized because we can skip to the endpoint when scanning. + + It returns control to reiser4_scan_extent, handles these terminating conditions, + e.g., by loading the next twig. +*/ +int reiser4_scan_extent(flush_scan * scan) +{ + coord_t coord; + jnode *neighbor; + unsigned long scan_index, unit_index, unit_width, scan_max, scan_dist; + reiser4_block_nr unit_start; + __u64 oid; + reiser4_key key; + int ret = 0, allocated, incr; + reiser4_tree *tree; + + if (!JF_ISSET(scan->node, JNODE_DIRTY)) { + scan->stop = 1; + return 0; /* Race with truncate, this node is already + * truncated. */ + } + + coord_dup(&coord, &scan->parent_coord); + + assert("jmacd-1404", !reiser4_scan_finished(scan)); + assert("jmacd-1405", jnode_get_level(scan->node) == LEAF_LEVEL); + assert("jmacd-1406", jnode_is_unformatted(scan->node)); + + /* The scan_index variable corresponds to the current page index of the + unformatted block scan position. */ + scan_index = index_jnode(scan->node); + + assert("jmacd-7889", item_is_extent(&coord)); + + repeat: + /* objectid of file */ + oid = get_key_objectid(item_key_by_coord(&coord, &key)); + + allocated = !extent_is_unallocated(&coord); + /* Get the values of this extent unit: */ + unit_index = extent_unit_index(&coord); + unit_width = extent_unit_width(&coord); + unit_start = extent_unit_start(&coord); + + assert("jmacd-7187", unit_width > 0); + assert("jmacd-7188", scan_index >= unit_index); + assert("jmacd-7189", scan_index <= unit_index + unit_width - 1); + + /* Depending on the scan direction, we set different maximum values for scan_index + (scan_max) and the number of nodes that would be passed if the scan goes the + entire way (scan_dist). Incr is an integer reflecting the incremental + direction of scan_index. */ + if (reiser4_scanning_left(scan)) { + scan_max = unit_index; + scan_dist = scan_index - unit_index; + incr = -1; + } else { + scan_max = unit_index + unit_width - 1; + scan_dist = scan_max - unit_index; + incr = +1; + } + + tree = coord.node->zjnode.tree; + + /* If the extent is allocated we have to check each of its blocks. If the extent + is unallocated we can skip to the scan_max. */ + if (allocated) { + do { + neighbor = jlookup(tree, oid, scan_index); + if (neighbor == NULL) + goto stop_same_parent; + + if (scan->node != neighbor + && !reiser4_scan_goto(scan, neighbor)) { + /* @neighbor was jput() by reiser4_scan_goto */ + goto stop_same_parent; + } + + ret = scan_set_current(scan, neighbor, 1, &coord); + if (ret != 0) { + goto exit; + } + + /* reference to @neighbor is stored in @scan, no need + to jput(). */ + scan_index += incr; + + } while (incr + scan_max != scan_index); + + } else { + /* Optimized case for unallocated extents, skip to the end. */ + neighbor = jlookup(tree, oid, scan_max /*index */ ); + if (neighbor == NULL) { + /* Race with truncate */ + scan->stop = 1; + ret = 0; + goto exit; + } + + assert("zam-1043", + reiser4_blocknr_is_fake(jnode_get_block(neighbor))); + + ret = scan_set_current(scan, neighbor, scan_dist, &coord); + if (ret != 0) { + goto exit; + } + } + + if (coord_sideof_unit(&coord, scan->direction) == 0 + && item_is_extent(&coord)) { + /* Continue as long as there are more extent units. */ + + scan_index = + extent_unit_index(&coord) + + (reiser4_scanning_left(scan) ? + extent_unit_width(&coord) - 1 : 0); + goto repeat; + } + + if (0) { + stop_same_parent: + + /* If we are scanning left and we stop in the middle of an allocated + extent, we know the preceder immediately.. */ + /* middle of extent is (scan_index - unit_index) != 0. */ + if (reiser4_scanning_left(scan) && + (scan_index - unit_index) != 0) { + /* FIXME(B): Someone should step-through and verify that this preceder + calculation is indeed correct. */ + /* @unit_start is starting block (number) of extent + unit. Flush stopped at the @scan_index block from + the beginning of the file, which is (scan_index - + unit_index) block within extent. + */ + if (unit_start) { + /* skip preceder update when we are at hole */ + scan->preceder_blk = + unit_start + scan_index - unit_index; + check_preceder(scan->preceder_blk); + } + } + + /* In this case, we leave coord set to the parent of scan->node. */ + scan->stop = 1; + + } else { + /* In this case, we are still scanning, coord is set to the next item which is + either off-the-end of the node or not an extent. */ + assert("jmacd-8912", scan->stop == 0); + assert("jmacd-7812", + (coord_is_after_sideof_unit(&coord, scan->direction) + || !item_is_extent(&coord))); + } + + ret = 0; + exit: + return ret; +} + +/** + * When on flush time unallocated extent is to be replaced with allocated one + * it may happen that one unallocated extent will have to be replaced with set + * of allocated extents. In this case insert_into_item will be called which may + * have to add new nodes into tree. Space for that is taken from inviolable + * reserve (5%). + */ +static reiser4_block_nr reserve_replace(void) +{ + reiser4_block_nr grabbed, needed; + + grabbed = get_current_context()->grabbed_blocks; + needed = estimate_one_insert_into_item(current_tree); + check_me("vpf-340", !reiser4_grab_space_force(needed, BA_RESERVED)); + return grabbed; +} + +static void free_replace_reserved(reiser4_block_nr grabbed) +{ + reiser4_context *ctx; + + ctx = get_current_context(); + grabbed2free(ctx, get_super_private(ctx->super), + ctx->grabbed_blocks - grabbed); +} + +/* Block offset of first block addressed by unit */ +__u64 extent_unit_index(const coord_t * item) +{ + reiser4_key key; + + assert("vs-648", coord_is_existing_unit(item)); + unit_key_by_coord(item, &key); + return get_key_offset(&key) >> current_blocksize_bits; +} + +/* AUDIT shouldn't return value be of reiser4_block_nr type? + Josh's answer: who knows? Is a "number of blocks" the same type as "block offset"? */ +__u64 extent_unit_width(const coord_t * item) +{ + assert("vs-649", coord_is_existing_unit(item)); + return width_by_coord(item); +} + +/* Starting block location of this unit */ +static reiser4_block_nr extent_unit_start(const coord_t * item) +{ + return extent_get_start(extent_by_coord(item)); +} + +/** + * split_allocated_extent - + * @coord: + * @pos_in_unit: + * + * replace allocated extent with two allocated extents + */ +int split_allocated_extent(coord_t *coord, reiser4_block_nr pos_in_unit) +{ + int result; + struct replace_handle *h; + reiser4_extent *ext; + reiser4_block_nr grabbed; + + ext = extent_by_coord(coord); + assert("vs-1410", state_of_extent(ext) == ALLOCATED_EXTENT); + assert("vs-1411", extent_get_width(ext) > pos_in_unit); + + h = kmalloc(sizeof(*h), reiser4_ctx_gfp_mask_get()); + if (h == NULL) + return RETERR(-ENOMEM); + h->coord = coord; + h->lh = znode_lh(coord->node); + h->pkey = &h->key; + unit_key_by_coord(coord, h->pkey); + set_key_offset(h->pkey, + (get_key_offset(h->pkey) + + pos_in_unit * current_blocksize)); + reiser4_set_extent(&h->overwrite, extent_get_start(ext), + pos_in_unit); + reiser4_set_extent(&h->new_extents[0], + extent_get_start(ext) + pos_in_unit, + extent_get_width(ext) - pos_in_unit); + h->nr_new_extents = 1; + h->flags = COPI_DONT_SHIFT_LEFT; + h->paste_key = h->key; + + /* reserve space for extent unit paste, @grabbed is reserved before */ + grabbed = reserve_replace(); + result = reiser4_replace_extent(h, 0 /* leave @coord set to overwritten + extent */); + /* restore reserved */ + free_replace_reserved(grabbed); + kfree(h); + return result; +} + +/* replace extent @ext by extent @replace. Try to merge @replace with previous extent of the item (if there is + one). Return 1 if it succeeded, 0 - otherwise */ +static int try_to_merge_with_left(coord_t *coord, reiser4_extent *ext, + reiser4_extent *replace) +{ + assert("vs-1415", extent_by_coord(coord) == ext); + + if (coord->unit_pos == 0 + || state_of_extent(ext - 1) != ALLOCATED_EXTENT) + /* @ext either does not exist or is not allocated extent */ + return 0; + if (extent_get_start(ext - 1) + extent_get_width(ext - 1) != + extent_get_start(replace)) + return 0; + + /* we can glue, widen previous unit */ + extent_set_width(ext - 1, + extent_get_width(ext - 1) + extent_get_width(replace)); + + if (extent_get_width(ext) != extent_get_width(replace)) { + /* make current extent narrower */ + if (state_of_extent(ext) == ALLOCATED_EXTENT) + extent_set_start(ext, + extent_get_start(ext) + + extent_get_width(replace)); + extent_set_width(ext, + extent_get_width(ext) - + extent_get_width(replace)); + } else { + /* current extent completely glued with its left neighbor, remove it */ + coord_t from, to; + + coord_dup(&from, coord); + from.unit_pos = nr_units_extent(coord) - 1; + coord_dup(&to, &from); + + /* currently cut from extent can cut either from the beginning or from the end. Move place which got + freed after unit removal to end of item */ + memmove(ext, ext + 1, + (from.unit_pos - + coord->unit_pos) * sizeof(reiser4_extent)); + /* wipe part of item which is going to be cut, so that node_check will not be confused */ + cut_node_content(&from, &to, NULL, NULL, NULL); + } + znode_make_dirty(coord->node); + /* move coord back */ + coord->unit_pos--; + return 1; +} + +/** + * convert_extent - replace extent with 2 ones + * @coord: coordinate of extent to be replaced + * @replace: extent to overwrite the one @coord is set to + * + * Overwrites extent @coord is set to and paste one extent unit after + * overwritten one if @replace is shorter than initial extent + */ +int convert_extent(coord_t *coord, reiser4_extent *replace) +{ + int result; + struct replace_handle *h; + reiser4_extent *ext; + reiser4_block_nr start, width, new_width; + reiser4_block_nr grabbed; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + start = extent_get_start(ext); + width = extent_get_width(ext); + new_width = extent_get_width(replace); + + assert("vs-1458", (state == UNALLOCATED_EXTENT || + state == ALLOCATED_EXTENT)); + assert("vs-1459", width >= new_width); + + if (try_to_merge_with_left(coord, ext, replace)) { + /* merged @replace with left neighbor. Current unit is either + removed or narrowed */ + return 0; + } + + if (width == new_width) { + /* replace current extent with @replace */ + *ext = *replace; + znode_make_dirty(coord->node); + return 0; + } + + h = kmalloc(sizeof(*h), reiser4_ctx_gfp_mask_get()); + if (h == NULL) + return RETERR(-ENOMEM); + h->coord = coord; + h->lh = znode_lh(coord->node); + h->pkey = &h->key; + unit_key_by_coord(coord, h->pkey); + set_key_offset(h->pkey, + (get_key_offset(h->pkey) + new_width * current_blocksize)); + h->overwrite = *replace; + + /* replace @ext with @replace and padding extent */ + reiser4_set_extent(&h->new_extents[0], + (state == ALLOCATED_EXTENT) ? + (start + new_width) : + UNALLOCATED_EXTENT_START, + width - new_width); + h->nr_new_extents = 1; + h->flags = COPI_DONT_SHIFT_LEFT; + h->paste_key = h->key; + + /* reserve space for extent unit paste, @grabbed is reserved before */ + grabbed = reserve_replace(); + result = reiser4_replace_extent(h, 0 /* leave @coord set to overwritten + extent */); + + /* restore reserved */ + free_replace_reserved(grabbed); + kfree(h); + return result; +} + +/** + * assign_real_blocknrs + * @flush_pos: + * @oid: objectid of file jnodes to assign block number to belongs to + * @index: first jnode on the range + * @count: number of jnodes to assign block numbers to + * @first: start of allocated block range + * + * Assigns block numbers to each of @count jnodes. Index of first jnode is + * @index. Jnodes get lookuped with jlookup. + */ +void assign_real_blocknrs(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, reiser4_block_nr count, + reiser4_block_nr first) +{ + unsigned long i; + reiser4_tree *tree; + txn_atom *atom; + int nr; + + atom = atom_locked_by_fq(flush_pos->fq); + assert("vs-1468", atom); + BUG_ON(atom == NULL); + + nr = 0; + tree = current_tree; + for (i = 0; i < count; ++i, ++index) { + jnode *node; + + node = jlookup(tree, oid, index); + assert("", node != NULL); + BUG_ON(node == NULL); + + spin_lock_jnode(node); + assert("", !jnode_is_flushprepped(node)); + assert("vs-1475", node->atom == atom); + assert("vs-1476", atomic_read(&node->x_count) > 0); + + JF_CLR(node, JNODE_FLUSH_RESERVED); + jnode_set_block(node, &first); + unformatted_make_reloc(node, flush_pos->fq); + ON_DEBUG(count_jnode(node->atom, node, NODE_LIST(node), + FQ_LIST, 0)); + spin_unlock_jnode(node); + first++; + + atomic_dec(&node->x_count); + nr ++; + } + + spin_unlock_atom(atom); + return; +} + +/** + * allocated_extent_slum_size + * @flush_pos: + * @oid: + * @index: + * @count: + * + * + */ +int allocated_extent_slum_size(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, unsigned long count) +{ + unsigned long i; + reiser4_tree *tree; + txn_atom *atom; + int nr; + + atom = atom_locked_by_fq(reiser4_pos_fq(flush_pos)); + assert("vs-1468", atom); + + nr = 0; + tree = current_tree; + for (i = 0; i < count; ++i, ++index) { + jnode *node; + + node = jlookup(tree, oid, index); + if (!node) + break; + + if (jnode_check_flushprepped(node)) { + atomic_dec(&node->x_count); + break; + } + + if (node->atom != atom) { + /* + * this is possible on overwrite: extent_write may + * capture several unformatted nodes without capturing + * any formatted nodes. + */ + atomic_dec(&node->x_count); + break; + } + + assert("vs-1476", atomic_read(&node->x_count) > 1); + atomic_dec(&node->x_count); + nr ++; + } + + spin_unlock_atom(atom); + return nr; +} + +/* if @key is glueable to the item @coord is set to */ +static int must_insert(const coord_t *coord, const reiser4_key *key) +{ + reiser4_key last; + + if (item_id_by_coord(coord) == EXTENT_POINTER_ID + && keyeq(append_key_extent(coord, &last), key)) + return 0; + return 1; +} + +/** + * copy extent @copy to the end of @node. + * It may have to either insert new item after the last one, + * or append last item, or modify last unit of last item to have + * greater width + */ +int put_unit_to_end(znode *node, + const reiser4_key *key, reiser4_extent *copy_ext) +{ + int result; + coord_t coord; + cop_insert_flag flags; + reiser4_extent *last_ext; + reiser4_item_data data; + + /* set coord after last unit in an item */ + coord_init_last_unit(&coord, node); + coord.between = AFTER_UNIT; + + flags = + COPI_DONT_SHIFT_LEFT | COPI_DONT_SHIFT_RIGHT | COPI_DONT_ALLOCATE; + if (must_insert(&coord, key)) { + result = + insert_by_coord(&coord, init_new_extent(&data, copy_ext, 1), + key, NULL /*lh */ , flags); + + } else { + /* try to glue with last unit */ + last_ext = extent_by_coord(&coord); + if (state_of_extent(last_ext) && + extent_get_start(last_ext) + extent_get_width(last_ext) == + extent_get_start(copy_ext)) { + /* widen last unit of node */ + extent_set_width(last_ext, + extent_get_width(last_ext) + + extent_get_width(copy_ext)); + znode_make_dirty(node); + return 0; + } + + /* FIXME: put an assertion here that we can not merge last unit in @node and new unit */ + result = + insert_into_item(&coord, NULL /*lh */ , key, + init_new_extent(&data, copy_ext, 1), + flags); + } + + assert("vs-438", result == 0 || result == -E_NODE_FULL); + return result; +} + +int key_by_offset_extent(struct inode *inode, loff_t off, reiser4_key * key) +{ + return key_by_inode_and_offset_common(inode, off, key); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/extent_item_ops.c b/fs/reiser4/plugin/item/extent_item_ops.c new file mode 100644 index 000000000000..f04f4af5d3f8 --- /dev/null +++ b/fs/reiser4/plugin/item/extent_item_ops.c @@ -0,0 +1,887 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../inode.h" +#include "../../tree_walk.h" /* check_sibling_list() */ +#include "../../page_cache.h" +#include "../../carry.h" + +/* item_plugin->b.max_key_inside */ +reiser4_key *max_key_inside_extent(const coord_t * coord, reiser4_key * key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, get_key_offset(reiser4_max_key())); + return key; +} + +/* item_plugin->b.can_contain_key + this checks whether @key of @data is matching to position set by @coord */ +int +can_contain_key_extent(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data * data) +{ + reiser4_key item_key; + + if (item_plugin_by_coord(coord) != data->iplug) + return 0; + + item_key_by_coord(coord, &item_key); + if (get_key_locality(key) != get_key_locality(&item_key) || + get_key_objectid(key) != get_key_objectid(&item_key) || + get_key_ordering(key) != get_key_ordering(&item_key)) + return 0; + + return 1; +} + +/* item_plugin->b.mergeable + first item is of extent type */ +/* Audited by: green(2002.06.13) */ +int mergeable_extent(const coord_t * p1, const coord_t * p2) +{ + reiser4_key key1, key2; + + assert("vs-299", item_id_by_coord(p1) == EXTENT_POINTER_ID); + /* FIXME-VS: Which is it? Assert or return 0 */ + if (item_id_by_coord(p2) != EXTENT_POINTER_ID) { + return 0; + } + + item_key_by_coord(p1, &key1); + item_key_by_coord(p2, &key2); + if (get_key_locality(&key1) != get_key_locality(&key2) || + get_key_objectid(&key1) != get_key_objectid(&key2) || + get_key_ordering(&key1) != get_key_ordering(&key2) || + get_key_type(&key1) != get_key_type(&key2)) + return 0; + if (get_key_offset(&key1) + + reiser4_extent_size(p1, nr_units_extent(p1)) != + get_key_offset(&key2)) + return 0; + return 1; +} + +/* item_plugin->b.nr_units */ +pos_in_node_t nr_units_extent(const coord_t * coord) +{ + /* length of extent item has to be multiple of extent size */ + assert("vs-1424", + (item_length_by_coord(coord) % sizeof(reiser4_extent)) == 0); + return item_length_by_coord(coord) / sizeof(reiser4_extent); +} + +/* item_plugin->b.lookup */ +lookup_result +lookup_extent(const reiser4_key * key, lookup_bias bias UNUSED_ARG, + coord_t * coord) +{ /* znode and item_pos are + set to an extent item to + look through */ + reiser4_key item_key; + reiser4_block_nr lookuped, offset; + unsigned i, nr_units; + reiser4_extent *ext; + unsigned blocksize; + unsigned char blocksize_bits; + + item_key_by_coord(coord, &item_key); + offset = get_key_offset(&item_key); + + /* key we are looking for must be greater than key of item @coord */ + assert("vs-414", keygt(key, &item_key)); + + assert("umka-99945", + !keygt(key, max_key_inside_extent(coord, &item_key))); + + ext = extent_item(coord); + assert("vs-1350", (char *)ext == (zdata(coord->node) + coord->offset)); + + blocksize = current_blocksize; + blocksize_bits = current_blocksize_bits; + + /* offset we are looking for */ + lookuped = get_key_offset(key); + + nr_units = nr_units_extent(coord); + /* go through all extents until the one which address given offset */ + for (i = 0; i < nr_units; i++, ext++) { + offset += (extent_get_width(ext) << blocksize_bits); + if (offset > lookuped) { + /* desired byte is somewhere in this extent */ + coord->unit_pos = i; + coord->between = AT_UNIT; + return CBK_COORD_FOUND; + } + } + + /* set coord after last unit */ + coord->unit_pos = nr_units - 1; + coord->between = AFTER_UNIT; + return CBK_COORD_FOUND; +} + +/* item_plugin->b.paste + item @coord is set to has been appended with @data->length of free + space. data->data contains data to be pasted into the item in position + @coord->in_item.unit_pos. It must fit into that free space. + @coord must be set between units. +*/ +int +paste_extent(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG) +{ + unsigned old_nr_units; + reiser4_extent *ext; + int item_length; + + ext = extent_item(coord); + item_length = item_length_by_coord(coord); + old_nr_units = (item_length - data->length) / sizeof(reiser4_extent); + + /* this is also used to copy extent into newly created item, so + old_nr_units could be 0 */ + assert("vs-260", item_length >= data->length); + + /* make sure that coord is set properly */ + assert("vs-35", + ((!coord_is_existing_unit(coord)) + || (!old_nr_units && !coord->unit_pos))); + + /* first unit to be moved */ + switch (coord->between) { + case AFTER_UNIT: + coord->unit_pos++; + case BEFORE_UNIT: + coord->between = AT_UNIT; + break; + case AT_UNIT: + assert("vs-331", !old_nr_units && !coord->unit_pos); + break; + default: + impossible("vs-330", "coord is set improperly"); + } + + /* prepare space for new units */ + memmove(ext + coord->unit_pos + data->length / sizeof(reiser4_extent), + ext + coord->unit_pos, + (old_nr_units - coord->unit_pos) * sizeof(reiser4_extent)); + + /* copy new data from kernel space */ + assert("vs-556", data->user == 0); + memcpy(ext + coord->unit_pos, data->data, (unsigned)data->length); + + /* after paste @coord is set to first of pasted units */ + assert("vs-332", coord_is_existing_unit(coord)); + assert("vs-333", + !memcmp(data->data, extent_by_coord(coord), + (unsigned)data->length)); + return 0; +} + +/* item_plugin->b.can_shift */ +int +can_shift_extent(unsigned free_space, coord_t * source, + znode * target UNUSED_ARG, shift_direction pend UNUSED_ARG, + unsigned *size, unsigned want) +{ + *size = item_length_by_coord(source); + if (*size > free_space) + /* never split a unit of extent item */ + *size = free_space - free_space % sizeof(reiser4_extent); + + /* we can shift *size bytes, calculate how many do we want to shift */ + if (*size > want * sizeof(reiser4_extent)) + *size = want * sizeof(reiser4_extent); + + if (*size % sizeof(reiser4_extent) != 0) + impossible("vs-119", "Wrong extent size: %i %zd", *size, + sizeof(reiser4_extent)); + return *size / sizeof(reiser4_extent); + +} + +/* item_plugin->b.copy_units */ +void +copy_units_extent(coord_t * target, coord_t * source, + unsigned from, unsigned count, + shift_direction where_is_free_space, unsigned free_space) +{ + char *from_ext, *to_ext; + + assert("vs-217", free_space == count * sizeof(reiser4_extent)); + + from_ext = item_body_by_coord(source); + to_ext = item_body_by_coord(target); + + if (where_is_free_space == SHIFT_LEFT) { + assert("vs-215", from == 0); + + /* At this moment, item length was already updated in the item + header by shifting code, hence nr_units_extent() will + return "new" number of units---one we obtain after copying + units. + */ + to_ext += + (nr_units_extent(target) - count) * sizeof(reiser4_extent); + } else { + reiser4_key key; + coord_t coord; + + assert("vs-216", + from + count == coord_last_unit_pos(source) + 1); + + from_ext += item_length_by_coord(source) - free_space; + + /* new units are inserted before first unit in an item, + therefore, we have to update item key */ + coord = *source; + coord.unit_pos = from; + unit_key_extent(&coord, &key); + + node_plugin_by_node(target->node)->update_item_key(target, &key, + NULL /*info */); + } + + memcpy(to_ext, from_ext, free_space); +} + +/* item_plugin->b.create_hook + @arg is znode of leaf node for which we need to update right delimiting key */ +int create_hook_extent(const coord_t * coord, void *arg) +{ + coord_t *child_coord; + znode *node; + reiser4_key key; + reiser4_tree *tree; + + if (!arg) + return 0; + + child_coord = arg; + tree = znode_get_tree(coord->node); + + assert("nikita-3246", znode_get_level(child_coord->node) == LEAF_LEVEL); + + write_lock_tree(tree); + write_lock_dk(tree); + /* find a node on the left level for which right delimiting key has to + be updated */ + if (coord_wrt(child_coord) == COORD_ON_THE_LEFT) { + assert("vs-411", znode_is_left_connected(child_coord->node)); + node = child_coord->node->left; + } else { + assert("vs-412", coord_wrt(child_coord) == COORD_ON_THE_RIGHT); + node = child_coord->node; + assert("nikita-3314", node != NULL); + } + + if (node != NULL) { + znode_set_rd_key(node, item_key_by_coord(coord, &key)); + + assert("nikita-3282", check_sibling_list(node)); + /* break sibling links */ + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && node->right) { + ON_DEBUG(node->right->left_version = + atomic_inc_return(&delim_key_version); + node->right_version = + atomic_inc_return(&delim_key_version);); + + node->right->left = NULL; + node->right = NULL; + } + } + write_unlock_dk(tree); + write_unlock_tree(tree); + return 0; +} + +#define ITEM_TAIL_KILLED 0 +#define ITEM_HEAD_KILLED 1 +#define ITEM_KILLED 2 + +/* item_plugin->b.kill_hook + this is called when @count units starting from @from-th one are going to be removed + */ +int +kill_hook_extent(const coord_t * coord, pos_in_node_t from, pos_in_node_t count, + struct carry_kill_data *kdata) +{ + reiser4_extent *ext; + reiser4_block_nr start, length; + const reiser4_key *pfrom_key, *pto_key; + struct inode *inode; + reiser4_tree *tree; + pgoff_t from_off, to_off, offset, skip; + int retval; + + /* these are located in memory kmalloc-ed by kill_node_content */ + reiser4_key *min_item_key, *max_item_key, *from_key, *to_key, *key; + coord_t *dup, *next; + + assert("zam-811", znode_is_write_locked(coord->node)); + assert("nikita-3315", kdata != NULL); + assert("vs-34", kdata->buf != NULL); + + /* map structures to kdata->buf */ + min_item_key = (reiser4_key *) (kdata->buf); + max_item_key = min_item_key + 1; + from_key = max_item_key + 1; + to_key = from_key + 1; + key = to_key + 1; + dup = (coord_t *) (key + 1); + next = dup + 1; + + item_key_by_coord(coord, min_item_key); + max_item_key_by_coord(coord, max_item_key); + + if (kdata->params.from_key) { + pfrom_key = kdata->params.from_key; + pto_key = kdata->params.to_key; + } else { + assert("vs-1549", from == coord->unit_pos); + unit_key_by_coord(coord, from_key); + pfrom_key = from_key; + + coord_dup(dup, coord); + dup->unit_pos = from + count - 1; + max_unit_key_by_coord(dup, to_key); + pto_key = to_key; + } + + if (!keylt(pto_key, max_item_key)) { + if (!keygt(pfrom_key, min_item_key)) { + znode *left, *right; + + /* item is to be removed completely */ + assert("nikita-3316", kdata->left != NULL + && kdata->right != NULL); + + left = kdata->left->node; + right = kdata->right->node; + + tree = current_tree; + /* we have to do two things: + * + * 1. link left and right formatted neighbors of + * extent being removed, and + * + * 2. update their delimiting keys. + * + * atomicity of these operations is protected by + * taking dk-lock and tree-lock. + */ + /* if neighbors of item being removed are znodes - + * link them */ + write_lock_tree(tree); + write_lock_dk(tree); + link_left_and_right(left, right); + if (left) { + /* update right delimiting key of left + * neighbor of extent item */ + /*coord_t next; + reiser4_key key; */ + + coord_dup(next, coord); + + if (coord_next_item(next)) + *key = *znode_get_rd_key(coord->node); + else + item_key_by_coord(next, key); + znode_set_rd_key(left, key); + } + write_unlock_dk(tree); + write_unlock_tree(tree); + + from_off = + get_key_offset(min_item_key) >> PAGE_SHIFT; + to_off = + (get_key_offset(max_item_key) + + 1) >> PAGE_SHIFT; + retval = ITEM_KILLED; + } else { + /* tail of item is to be removed */ + from_off = + (get_key_offset(pfrom_key) + PAGE_SIZE - + 1) >> PAGE_SHIFT; + to_off = + (get_key_offset(max_item_key) + + 1) >> PAGE_SHIFT; + retval = ITEM_TAIL_KILLED; + } + } else { + /* head of item is to be removed */ + assert("vs-1571", keyeq(pfrom_key, min_item_key)); + assert("vs-1572", + (get_key_offset(pfrom_key) & (PAGE_SIZE - 1)) == + 0); + assert("vs-1573", + ((get_key_offset(pto_key) + 1) & (PAGE_SIZE - + 1)) == 0); + + if (kdata->left->node) { + /* update right delimiting key of left neighbor of extent item */ + /*reiser4_key key; */ + + *key = *pto_key; + set_key_offset(key, get_key_offset(pto_key) + 1); + + write_lock_dk(current_tree); + znode_set_rd_key(kdata->left->node, key); + write_unlock_dk(current_tree); + } + + from_off = get_key_offset(pfrom_key) >> PAGE_SHIFT; + to_off = (get_key_offset(pto_key) + 1) >> PAGE_SHIFT; + retval = ITEM_HEAD_KILLED; + } + + inode = kdata->inode; + assert("vs-1545", inode != NULL); + if (inode != NULL) + /* take care of pages and jnodes corresponding to part of item being killed */ + reiser4_invalidate_pages(inode->i_mapping, from_off, + to_off - from_off, + kdata->params.truncate); + + ext = extent_item(coord) + from; + offset = + (get_key_offset(min_item_key) + + reiser4_extent_size(coord, from)) >> PAGE_SHIFT; + + assert("vs-1551", from_off >= offset); + assert("vs-1552", from_off - offset <= extent_get_width(ext)); + skip = from_off - offset; + offset = from_off; + + while (offset < to_off) { + length = extent_get_width(ext) - skip; + if (state_of_extent(ext) == HOLE_EXTENT) { + skip = 0; + offset += length; + ext++; + continue; + } + + if (offset + length > to_off) { + length = to_off - offset; + } + + inode_sub_blocks(inode, length); + + if (state_of_extent(ext) == UNALLOCATED_EXTENT) { + /* some jnodes corresponding to this unallocated extent */ + fake_allocated2free(length, 0 /* unformatted */ ); + + skip = 0; + offset += length; + ext++; + continue; + } + + assert("vs-1218", state_of_extent(ext) == ALLOCATED_EXTENT); + + if (length != 0) { + start = extent_get_start(ext) + skip; + + /* BA_DEFER bit parameter is turned on because blocks which get freed are not safe to be freed + immediately */ + reiser4_dealloc_blocks(&start, &length, + 0 /* not used */ , + BA_DEFER + /* unformatted with defer */ ); + } + skip = 0; + offset += length; + ext++; + } + return retval; +} + +/* item_plugin->b.kill_units */ +int +kill_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *kdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + reiser4_extent *ext; + reiser4_key item_key; + pos_in_node_t count; + reiser4_key from_key, to_key; + const reiser4_key *pfrom_key, *pto_key; + loff_t off; + int result; + + assert("vs-1541", + ((kdata->params.from_key == NULL && kdata->params.to_key == NULL) + || (kdata->params.from_key != NULL + && kdata->params.to_key != NULL))); + + if (kdata->params.from_key) { + pfrom_key = kdata->params.from_key; + pto_key = kdata->params.to_key; + } else { + coord_t dup; + + /* calculate key range of kill */ + assert("vs-1549", from == coord->unit_pos); + unit_key_by_coord(coord, &from_key); + pfrom_key = &from_key; + + coord_dup(&dup, coord); + dup.unit_pos = to; + max_unit_key_by_coord(&dup, &to_key); + pto_key = &to_key; + } + + item_key_by_coord(coord, &item_key); + +#if REISER4_DEBUG + { + reiser4_key max_item_key; + + max_item_key_by_coord(coord, &max_item_key); + + if (new_first) { + /* head of item is to be cut */ + assert("vs-1542", keyeq(pfrom_key, &item_key)); + assert("vs-1538", keylt(pto_key, &max_item_key)); + } else { + /* tail of item is to be cut */ + assert("vs-1540", keygt(pfrom_key, &item_key)); + assert("vs-1543", !keylt(pto_key, &max_item_key)); + } + } +#endif + + if (smallest_removed) + *smallest_removed = *pfrom_key; + + if (new_first) { + /* item head is cut. Item key will change. This new key is calculated here */ + assert("vs-1556", + (get_key_offset(pto_key) & (PAGE_SIZE - 1)) == + (PAGE_SIZE - 1)); + *new_first = *pto_key; + set_key_offset(new_first, get_key_offset(new_first) + 1); + } + + count = to - from + 1; + result = kill_hook_extent(coord, from, count, kdata); + if (result == ITEM_TAIL_KILLED) { + assert("vs-1553", + get_key_offset(pfrom_key) >= + get_key_offset(&item_key) + + reiser4_extent_size(coord, from)); + off = + get_key_offset(pfrom_key) - + (get_key_offset(&item_key) + + reiser4_extent_size(coord, from)); + if (off) { + /* unit @from is to be cut partially. Its width decreases */ + ext = extent_item(coord) + from; + extent_set_width(ext, + (off + PAGE_SIZE - + 1) >> PAGE_SHIFT); + count--; + } + } else { + __u64 max_to_offset; + __u64 rest; + + assert("vs-1575", result == ITEM_HEAD_KILLED); + assert("", from == 0); + assert("", + ((get_key_offset(pto_key) + 1) & (PAGE_SIZE - + 1)) == 0); + assert("", + get_key_offset(pto_key) + 1 > + get_key_offset(&item_key) + + reiser4_extent_size(coord, to)); + max_to_offset = + get_key_offset(&item_key) + + reiser4_extent_size(coord, to + 1) - 1; + assert("", get_key_offset(pto_key) <= max_to_offset); + + rest = + (max_to_offset - + get_key_offset(pto_key)) >> PAGE_SHIFT; + if (rest) { + /* unit @to is to be cut partially */ + ext = extent_item(coord) + to; + + assert("", extent_get_width(ext) > rest); + + if (state_of_extent(ext) == ALLOCATED_EXTENT) + extent_set_start(ext, + extent_get_start(ext) + + (extent_get_width(ext) - + rest)); + + extent_set_width(ext, rest); + count--; + } + } + return count * sizeof(reiser4_extent); +} + +/* item_plugin->b.cut_units + this is too similar to kill_units_extent */ +int +cut_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *cdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + reiser4_extent *ext; + reiser4_key item_key; + pos_in_node_t count; + reiser4_key from_key, to_key; + const reiser4_key *pfrom_key, *pto_key; + loff_t off; + + assert("vs-1541", + ((cdata->params.from_key == NULL && cdata->params.to_key == NULL) + || (cdata->params.from_key != NULL + && cdata->params.to_key != NULL))); + + if (cdata->params.from_key) { + pfrom_key = cdata->params.from_key; + pto_key = cdata->params.to_key; + } else { + coord_t dup; + + /* calculate key range of kill */ + coord_dup(&dup, coord); + dup.unit_pos = from; + unit_key_by_coord(&dup, &from_key); + + dup.unit_pos = to; + max_unit_key_by_coord(&dup, &to_key); + + pfrom_key = &from_key; + pto_key = &to_key; + } + + assert("vs-1555", + (get_key_offset(pfrom_key) & (PAGE_SIZE - 1)) == 0); + assert("vs-1556", + (get_key_offset(pto_key) & (PAGE_SIZE - 1)) == + (PAGE_SIZE - 1)); + + item_key_by_coord(coord, &item_key); + +#if REISER4_DEBUG + { + reiser4_key max_item_key; + + assert("vs-1584", + get_key_locality(pfrom_key) == + get_key_locality(&item_key)); + assert("vs-1585", + get_key_type(pfrom_key) == get_key_type(&item_key)); + assert("vs-1586", + get_key_objectid(pfrom_key) == + get_key_objectid(&item_key)); + assert("vs-1587", + get_key_ordering(pfrom_key) == + get_key_ordering(&item_key)); + + max_item_key_by_coord(coord, &max_item_key); + + if (new_first != NULL) { + /* head of item is to be cut */ + assert("vs-1542", keyeq(pfrom_key, &item_key)); + assert("vs-1538", keylt(pto_key, &max_item_key)); + } else { + /* tail of item is to be cut */ + assert("vs-1540", keygt(pfrom_key, &item_key)); + assert("vs-1543", keyeq(pto_key, &max_item_key)); + } + } +#endif + + if (smallest_removed) + *smallest_removed = *pfrom_key; + + if (new_first) { + /* item head is cut. Item key will change. This new key is calculated here */ + *new_first = *pto_key; + set_key_offset(new_first, get_key_offset(new_first) + 1); + } + + count = to - from + 1; + + assert("vs-1553", + get_key_offset(pfrom_key) >= + get_key_offset(&item_key) + reiser4_extent_size(coord, from)); + off = + get_key_offset(pfrom_key) - (get_key_offset(&item_key) + + reiser4_extent_size(coord, from)); + if (off) { + /* tail of unit @from is to be cut partially. Its width decreases */ + assert("vs-1582", new_first == NULL); + ext = extent_item(coord) + from; + extent_set_width(ext, off >> PAGE_SHIFT); + count--; + } + + assert("vs-1554", + get_key_offset(pto_key) <= + get_key_offset(&item_key) + + reiser4_extent_size(coord, to + 1) - 1); + off = + (get_key_offset(&item_key) + + reiser4_extent_size(coord, to + 1) - 1) - + get_key_offset(pto_key); + if (off) { + /* @to_key is smaller than max key of unit @to. Unit @to will not be removed. It gets start increased + and width decreased. */ + assert("vs-1583", (off & (PAGE_SIZE - 1)) == 0); + ext = extent_item(coord) + to; + if (state_of_extent(ext) == ALLOCATED_EXTENT) + extent_set_start(ext, + extent_get_start(ext) + + (extent_get_width(ext) - + (off >> PAGE_SHIFT))); + + extent_set_width(ext, (off >> PAGE_SHIFT)); + count--; + } + return count * sizeof(reiser4_extent); +} + +/* item_plugin->b.unit_key */ +reiser4_key *unit_key_extent(const coord_t * coord, reiser4_key * key) +{ + assert("vs-300", coord_is_existing_unit(coord)); + + item_key_by_coord(coord, key); + set_key_offset(key, + (get_key_offset(key) + + reiser4_extent_size(coord, coord->unit_pos))); + + return key; +} + +/* item_plugin->b.max_unit_key */ +reiser4_key *max_unit_key_extent(const coord_t * coord, reiser4_key * key) +{ + assert("vs-300", coord_is_existing_unit(coord)); + + item_key_by_coord(coord, key); + set_key_offset(key, + (get_key_offset(key) + + reiser4_extent_size(coord, coord->unit_pos + 1) - 1)); + return key; +} + +/* item_plugin->b.estimate + item_plugin->b.item_data_by_flow */ + +#if REISER4_DEBUG + +/* item_plugin->b.check + used for debugging, every item should have here the most complete + possible check of the consistency of the item that the inventor can + construct +*/ +int reiser4_check_extent(const coord_t * coord /* coord of item to check */, + const char **error /* where to store error message */) +{ + reiser4_extent *ext, *first; + unsigned i, j; + reiser4_block_nr start, width, blk_cnt; + unsigned num_units; + reiser4_tree *tree; + oid_t oid; + reiser4_key key; + coord_t scan; + + assert("vs-933", REISER4_DEBUG); + + if (znode_get_level(coord->node) != TWIG_LEVEL) { + *error = "Extent on the wrong level"; + return -1; + } + if (item_length_by_coord(coord) % sizeof(reiser4_extent) != 0) { + *error = "Wrong item size"; + return -1; + } + ext = first = extent_item(coord); + blk_cnt = reiser4_block_count(reiser4_get_current_sb()); + num_units = coord_num_units(coord); + tree = znode_get_tree(coord->node); + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + coord_dup(&scan, coord); + + for (i = 0; i < num_units; ++i, ++ext) { + __u64 index; + + scan.unit_pos = i; + index = extent_unit_index(&scan); + +#if 0 + /* check that all jnodes are present for the unallocated + * extent */ + if (state_of_extent(ext) == UNALLOCATED_EXTENT) { + for (j = 0; j < extent_get_width(ext); j++) { + jnode *node; + + node = jlookup(tree, oid, index + j); + if (node == NULL) { + print_coord("scan", &scan, 0); + *error = "Jnode missing"; + return -1; + } + jput(node); + } + } +#endif + + start = extent_get_start(ext); + if (start < 2) + continue; + /* extent is allocated one */ + width = extent_get_width(ext); + if (start >= blk_cnt) { + *error = "Start too large"; + return -1; + } + if (start + width > blk_cnt) { + *error = "End too large"; + return -1; + } + /* make sure that this extent does not overlap with other + allocated extents extents */ + for (j = 0; j < i; j++) { + if (state_of_extent(first + j) != ALLOCATED_EXTENT) + continue; + if (! + ((extent_get_start(ext) >= + extent_get_start(first + j) + + extent_get_width(first + j)) + || (extent_get_start(ext) + + extent_get_width(ext) <= + extent_get_start(first + j)))) { + *error = "Extent overlaps with others"; + return -1; + } + } + + } + + return 0; +} + +#endif /* REISER4_DEBUG */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/item/internal.c b/fs/reiser4/plugin/item/internal.c new file mode 100644 index 000000000000..24cebb67eacf --- /dev/null +++ b/fs/reiser4/plugin/item/internal.c @@ -0,0 +1,404 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Implementation of internal-item plugin methods. */ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../key.h" +#include "../../coord.h" +#include "internal.h" +#include "item.h" +#include "../node/node.h" +#include "../plugin.h" +#include "../../jnode.h" +#include "../../znode.h" +#include "../../tree_walk.h" +#include "../../tree_mod.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../block_alloc.h" + +/* see internal.h for explanation */ + +/* plugin->u.item.b.mergeable */ +int mergeable_internal(const coord_t * p1 UNUSED_ARG /* first item */ , + const coord_t * p2 UNUSED_ARG /* second item */ ) +{ + /* internal items are not mergeable */ + return 0; +} + +/* ->lookup() method for internal items */ +lookup_result lookup_internal(const reiser4_key * key /* key to look up */ , + lookup_bias bias UNUSED_ARG /* lookup bias */ , + coord_t * coord /* coord of item */ ) +{ + reiser4_key ukey; + + switch (keycmp(unit_key_by_coord(coord, &ukey), key)) { + default: + impossible("", "keycmp()?!"); + case LESS_THAN: + /* FIXME-VS: AFTER_ITEM used to be here. But with new coord + item plugin can not be taken using coord set this way */ + assert("vs-681", coord->unit_pos == 0); + coord->between = AFTER_UNIT; + case EQUAL_TO: + return CBK_COORD_FOUND; + case GREATER_THAN: + return CBK_COORD_NOTFOUND; + } +} + +/* return body of internal item at @coord */ +static internal_item_layout *internal_at(const coord_t * coord /* coord of + * item */ ) +{ + assert("nikita-607", coord != NULL); + assert("nikita-1650", + item_plugin_by_coord(coord) == + item_plugin_by_id(NODE_POINTER_ID)); + return (internal_item_layout *) item_body_by_coord(coord); +} + +void reiser4_update_internal(const coord_t * coord, + const reiser4_block_nr * blocknr) +{ + internal_item_layout *item = internal_at(coord); + assert("nikita-2959", reiser4_blocknr_is_sane(blocknr)); + + put_unaligned(cpu_to_le64(*blocknr), &item->pointer); +} + +/* return child block number stored in the internal item at @coord */ +static reiser4_block_nr pointer_at(const coord_t * coord /* coord of item */ ) +{ + assert("nikita-608", coord != NULL); + return le64_to_cpu(get_unaligned(&internal_at(coord)->pointer)); +} + +/* get znode pointed to by internal @item */ +static znode *znode_at(const coord_t * item /* coord of item */ , + znode * parent /* parent node */ ) +{ + return child_znode(item, parent, 1, 0); +} + +/* store pointer from internal item into "block". Implementation of + ->down_link() method */ +void down_link_internal(const coord_t * coord /* coord of item */ , + const reiser4_key * key UNUSED_ARG /* key to get + * pointer for */ , + reiser4_block_nr * block /* resulting block number */ ) +{ + ON_DEBUG(reiser4_key item_key); + + assert("nikita-609", coord != NULL); + assert("nikita-611", block != NULL); + assert("nikita-612", (key == NULL) || + /* twig horrors */ + (znode_get_level(coord->node) == TWIG_LEVEL) + || keyle(item_key_by_coord(coord, &item_key), key)); + + *block = pointer_at(coord); + assert("nikita-2960", reiser4_blocknr_is_sane(block)); +} + +/* Get the child's block number, or 0 if the block is unallocated. */ +int +utmost_child_real_block_internal(const coord_t * coord, sideof side UNUSED_ARG, + reiser4_block_nr * block) +{ + assert("jmacd-2059", coord != NULL); + + *block = pointer_at(coord); + assert("nikita-2961", reiser4_blocknr_is_sane(block)); + + if (reiser4_blocknr_is_fake(block)) { + *block = 0; + } + + return 0; +} + +/* Return the child. */ +int +utmost_child_internal(const coord_t * coord, sideof side UNUSED_ARG, + jnode ** childp) +{ + reiser4_block_nr block = pointer_at(coord); + znode *child; + + assert("jmacd-2059", childp != NULL); + assert("nikita-2962", reiser4_blocknr_is_sane(&block)); + + child = zlook(znode_get_tree(coord->node), &block); + + if (IS_ERR(child)) { + return PTR_ERR(child); + } + + *childp = ZJNODE(child); + + return 0; +} + +#if REISER4_DEBUG + +static void check_link(znode * left, znode * right) +{ + znode *scan; + + for (scan = left; scan != right; scan = scan->right) { + if (ZF_ISSET(scan, JNODE_RIP)) + break; + if (znode_is_right_connected(scan) && scan->right != NULL) { + if (ZF_ISSET(scan->right, JNODE_RIP)) + break; + assert("nikita-3285", + znode_is_left_connected(scan->right)); + assert("nikita-3265", + ergo(scan != left, + ZF_ISSET(scan, JNODE_HEARD_BANSHEE))); + assert("nikita-3284", scan->right->left == scan); + } else + break; + } +} + +int check__internal(const coord_t * coord, const char **error) +{ + reiser4_block_nr blk; + znode *child; + coord_t cpy; + + blk = pointer_at(coord); + if (!reiser4_blocknr_is_sane(&blk)) { + *error = "Invalid pointer"; + return -1; + } + coord_dup(&cpy, coord); + child = znode_at(&cpy, cpy.node); + if (child != NULL) { + znode *left_child; + znode *right_child; + + left_child = right_child = NULL; + + assert("nikita-3256", znode_invariant(child)); + if (coord_prev_item(&cpy) == 0 && item_is_internal(&cpy)) { + left_child = znode_at(&cpy, cpy.node); + if (left_child != NULL) { + read_lock_tree(znode_get_tree(child)); + check_link(left_child, child); + read_unlock_tree(znode_get_tree(child)); + zput(left_child); + } + } + coord_dup(&cpy, coord); + if (coord_next_item(&cpy) == 0 && item_is_internal(&cpy)) { + right_child = znode_at(&cpy, cpy.node); + if (right_child != NULL) { + read_lock_tree(znode_get_tree(child)); + check_link(child, right_child); + read_unlock_tree(znode_get_tree(child)); + zput(right_child); + } + } + zput(child); + } + return 0; +} + +#endif /* REISER4_DEBUG */ + +/* return true only if this item really points to "block" */ +/* Audited by: green(2002.06.14) */ +int has_pointer_to_internal(const coord_t * coord /* coord of item */ , + const reiser4_block_nr * block /* block number to + * check */ ) +{ + assert("nikita-613", coord != NULL); + assert("nikita-614", block != NULL); + + return pointer_at(coord) == *block; +} + +/* hook called by ->create_item() method of node plugin after new internal + item was just created. + + This is point where pointer to new node is inserted into tree. Initialize + parent pointer in child znode, insert child into sibling list and slum. + +*/ +int create_hook_internal(const coord_t * item /* coord of item */ , + void *arg /* child's left neighbor, if any */ ) +{ + znode *child; + __u64 child_ptr; + + assert("nikita-1252", item != NULL); + assert("nikita-1253", item->node != NULL); + assert("nikita-1181", znode_get_level(item->node) > LEAF_LEVEL); + assert("nikita-1450", item->unit_pos == 0); + + /* + * preparing to item insertion build_child_ptr_data sets pointer to + * data to be inserted to jnode's blocknr which is in cpu byte + * order. Node's create_item simply copied those data. As result we + * have child pointer in cpu's byte order. Convert content of internal + * item to little endian byte order. + */ + child_ptr = get_unaligned((__u64 *)item_body_by_coord(item)); + reiser4_update_internal(item, &child_ptr); + + child = znode_at(item, item->node); + if (child != NULL && !IS_ERR(child)) { + znode *left; + int result = 0; + reiser4_tree *tree; + + left = arg; + tree = znode_get_tree(item->node); + write_lock_tree(tree); + write_lock_dk(tree); + assert("nikita-1400", (child->in_parent.node == NULL) + || (znode_above_root(child->in_parent.node))); + ++item->node->c_count; + coord_to_parent_coord(item, &child->in_parent); + sibling_list_insert_nolock(child, left); + + assert("nikita-3297", ZF_ISSET(child, JNODE_ORPHAN)); + ZF_CLR(child, JNODE_ORPHAN); + + if ((left != NULL) && !keyeq(znode_get_rd_key(left), + znode_get_rd_key(child))) { + znode_set_rd_key(child, znode_get_rd_key(left)); + } + write_unlock_dk(tree); + write_unlock_tree(tree); + zput(child); + return result; + } else { + if (child == NULL) + child = ERR_PTR(-EIO); + return PTR_ERR(child); + } +} + +/* hook called by ->cut_and_kill() method of node plugin just before internal + item is removed. + + This is point where empty node is removed from the tree. Clear parent + pointer in child, and mark node for pending deletion. + + Node will be actually deleted later and in several installations: + + . when last lock on this node will be released, node will be removed from + the sibling list and its lock will be invalidated + + . when last reference to this node will be dropped, bitmap will be updated + and node will be actually removed from the memory. + +*/ +int kill_hook_internal(const coord_t * item /* coord of item */ , + pos_in_node_t from UNUSED_ARG /* start unit */ , + pos_in_node_t count UNUSED_ARG /* stop unit */ , + struct carry_kill_data *p UNUSED_ARG) +{ + znode *child; + int result = 0; + + assert("nikita-1222", item != NULL); + assert("nikita-1224", from == 0); + assert("nikita-1225", count == 1); + + child = znode_at(item, item->node); + if (child == NULL) + return 0; + if (IS_ERR(child)) + return PTR_ERR(child); + result = zload(child); + if (result) { + zput(child); + return result; + } + if (node_is_empty(child)) { + reiser4_tree *tree; + + assert("nikita-1397", znode_is_write_locked(child)); + assert("nikita-1398", child->c_count == 0); + assert("nikita-2546", ZF_ISSET(child, JNODE_HEARD_BANSHEE)); + + tree = znode_get_tree(item->node); + write_lock_tree(tree); + init_parent_coord(&child->in_parent, NULL); + --item->node->c_count; + write_unlock_tree(tree); + } else { + warning("nikita-1223", + "Cowardly refuse to remove link to non-empty node"); + result = RETERR(-EIO); + } + zrelse(child); + zput(child); + return result; +} + +/* hook called by ->shift() node plugin method when iternal item was just + moved from one node to another. + + Update parent pointer in child and c_counts in old and new parent + +*/ +int shift_hook_internal(const coord_t * item /* coord of item */ , + unsigned from UNUSED_ARG /* start unit */ , + unsigned count UNUSED_ARG /* stop unit */ , + znode * old_node /* old parent */ ) +{ + znode *child; + znode *new_node; + reiser4_tree *tree; + + assert("nikita-1276", item != NULL); + assert("nikita-1277", from == 0); + assert("nikita-1278", count == 1); + assert("nikita-1451", item->unit_pos == 0); + + new_node = item->node; + assert("nikita-2132", new_node != old_node); + tree = znode_get_tree(item->node); + child = child_znode(item, old_node, 1, 0); + if (child == NULL) + return 0; + if (!IS_ERR(child)) { + write_lock_tree(tree); + ++new_node->c_count; + assert("nikita-1395", znode_parent(child) == old_node); + assert("nikita-1396", old_node->c_count > 0); + coord_to_parent_coord(item, &child->in_parent); + assert("nikita-1781", znode_parent(child) == new_node); + assert("nikita-1782", + check_tree_pointer(item, child) == NS_FOUND); + --old_node->c_count; + write_unlock_tree(tree); + zput(child); + return 0; + } else + return PTR_ERR(child); +} + +/* plugin->u.item.b.max_key_inside - not defined */ + +/* plugin->u.item.b.nr_units - item.c:single_unit */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/internal.h b/fs/reiser4/plugin/item/internal.h new file mode 100644 index 000000000000..27aa27d7fb08 --- /dev/null +++ b/fs/reiser4/plugin/item/internal.h @@ -0,0 +1,57 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* Internal item contains down-link to the child of the internal/twig + node in a tree. It is internal items that are actually used during + tree traversal. */ + +#if !defined( __FS_REISER4_PLUGIN_ITEM_INTERNAL_H__ ) +#define __FS_REISER4_PLUGIN_ITEM_INTERNAL_H__ + +#include "../../forward.h" +#include "../../dformat.h" + +/* on-disk layout of internal item */ +typedef struct internal_item_layout { + /* 0 */ reiser4_dblock_nr pointer; + /* 4 */ +} internal_item_layout; + +struct cut_list; + +int mergeable_internal(const coord_t * p1, const coord_t * p2); +lookup_result lookup_internal(const reiser4_key * key, lookup_bias bias, + coord_t * coord); +/* store pointer from internal item into "block". Implementation of + ->down_link() method */ +extern void down_link_internal(const coord_t * coord, const reiser4_key * key, + reiser4_block_nr * block); +extern int has_pointer_to_internal(const coord_t * coord, + const reiser4_block_nr * block); +extern int create_hook_internal(const coord_t * item, void *arg); +extern int kill_hook_internal(const coord_t * item, pos_in_node_t from, + pos_in_node_t count, struct carry_kill_data *); +extern int shift_hook_internal(const coord_t * item, unsigned from, + unsigned count, znode * old_node); +extern void reiser4_print_internal(const char *prefix, coord_t * coord); + +extern int utmost_child_internal(const coord_t * coord, sideof side, + jnode ** child); +int utmost_child_real_block_internal(const coord_t * coord, sideof side, + reiser4_block_nr * block); + +extern void reiser4_update_internal(const coord_t * coord, + const reiser4_block_nr * blocknr); +/* FIXME: reiserfs has check_internal */ +extern int check__internal(const coord_t * coord, const char **error); + +/* __FS_REISER4_PLUGIN_ITEM_INTERNAL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/item.c b/fs/reiser4/plugin/item/item.c new file mode 100644 index 000000000000..e226f045f03d --- /dev/null +++ b/fs/reiser4/plugin/item/item.c @@ -0,0 +1,719 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* definition of item plugins. */ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "sde.h" +#include "internal.h" +#include "item.h" +#include "static_stat.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../tree.h" +#include "../../context.h" +#include "ctail.h" + +/* return pointer to item body */ +void item_body_by_coord_hard(coord_t * coord /* coord to query */ ) +{ + assert("nikita-324", coord != NULL); + assert("nikita-325", coord->node != NULL); + assert("nikita-326", znode_is_loaded(coord->node)); + assert("nikita-3200", coord->offset == INVALID_OFFSET); + + coord->offset = + node_plugin_by_node(coord->node)->item_by_coord(coord) - + zdata(coord->node); + ON_DEBUG(coord->body_v = coord->node->times_locked); +} + +void *item_body_by_coord_easy(const coord_t * coord /* coord to query */ ) +{ + return zdata(coord->node) + coord->offset; +} + +#if REISER4_DEBUG + +int item_body_is_valid(const coord_t * coord) +{ + return + coord->offset == + node_plugin_by_node(coord->node)->item_by_coord(coord) - + zdata(coord->node); +} + +#endif + +/* return length of item at @coord */ +pos_in_node_t item_length_by_coord(const coord_t * coord /* coord to query */ ) +{ + int len; + + assert("nikita-327", coord != NULL); + assert("nikita-328", coord->node != NULL); + assert("nikita-329", znode_is_loaded(coord->node)); + + len = node_plugin_by_node(coord->node)->length_by_coord(coord); + return len; +} + +void obtain_item_plugin(const coord_t * coord) +{ + assert("nikita-330", coord != NULL); + assert("nikita-331", coord->node != NULL); + assert("nikita-332", znode_is_loaded(coord->node)); + + coord_set_iplug((coord_t *) coord, + node_plugin_by_node(coord->node)-> + plugin_by_coord(coord)); + assert("nikita-2479", + coord_iplug(coord) == + node_plugin_by_node(coord->node)->plugin_by_coord(coord)); +} + +/* return id of item */ +/* Audited by: green(2002.06.15) */ +item_id item_id_by_coord(const coord_t * coord /* coord to query */ ) +{ + assert("vs-539", coord != NULL); + assert("vs-538", coord->node != NULL); + assert("vs-537", znode_is_loaded(coord->node)); + assert("vs-536", item_plugin_by_coord(coord) != NULL); + assert("vs-540", + item_id_by_plugin(item_plugin_by_coord(coord)) < LAST_ITEM_ID); + + return item_id_by_plugin(item_plugin_by_coord(coord)); +} + +/* return key of item at @coord */ +/* Audited by: green(2002.06.15) */ +reiser4_key *item_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + assert("nikita-338", coord != NULL); + assert("nikita-339", coord->node != NULL); + assert("nikita-340", znode_is_loaded(coord->node)); + + return node_plugin_by_node(coord->node)->key_at(coord, key); +} + +/* this returns max key in the item */ +reiser4_key *max_item_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + coord_t last; + + assert("nikita-338", coord != NULL); + assert("nikita-339", coord->node != NULL); + assert("nikita-340", znode_is_loaded(coord->node)); + + /* make coord pointing to last item's unit */ + coord_dup(&last, coord); + last.unit_pos = coord_num_units(&last) - 1; + assert("vs-1560", coord_is_existing_unit(&last)); + + max_unit_key_by_coord(&last, key); + return key; +} + +/* return key of unit at @coord */ +reiser4_key *unit_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + assert("nikita-772", coord != NULL); + assert("nikita-774", coord->node != NULL); + assert("nikita-775", znode_is_loaded(coord->node)); + + if (item_plugin_by_coord(coord)->b.unit_key != NULL) + return item_plugin_by_coord(coord)->b.unit_key(coord, key); + else + return item_key_by_coord(coord, key); +} + +/* return the biggest key contained the unit @coord */ +reiser4_key *max_unit_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + assert("nikita-772", coord != NULL); + assert("nikita-774", coord->node != NULL); + assert("nikita-775", znode_is_loaded(coord->node)); + + if (item_plugin_by_coord(coord)->b.max_unit_key != NULL) + return item_plugin_by_coord(coord)->b.max_unit_key(coord, key); + else + return unit_key_by_coord(coord, key); +} + +/* ->max_key_inside() method for items consisting of exactly one key (like + stat-data) */ +static reiser4_key *max_key_inside_single_key(const coord_t * + coord /* coord of item */ , + reiser4_key * + result /* resulting key */ ) +{ + assert("nikita-604", coord != NULL); + + /* coord -> key is starting key of this item and it has to be already + filled in */ + return unit_key_by_coord(coord, result); +} + +/* ->nr_units() method for items consisting of exactly one unit always */ +pos_in_node_t +nr_units_single_unit(const coord_t * coord UNUSED_ARG /* coord of item */ ) +{ + return 1; +} + +static int +paste_no_paste(coord_t * coord UNUSED_ARG, + reiser4_item_data * data UNUSED_ARG, + carry_plugin_info * info UNUSED_ARG) +{ + return 0; +} + +/* default ->fast_paste() method */ +static int +agree_to_fast_op(const coord_t * coord UNUSED_ARG /* coord of item */ ) +{ + return 1; +} + +int item_can_contain_key(const coord_t * item /* coord of item */ , + const reiser4_key * key /* key to check */ , + const reiser4_item_data * data /* parameters of item + * being created */ ) +{ + item_plugin *iplug; + reiser4_key min_key_in_item; + reiser4_key max_key_in_item; + + assert("nikita-1658", item != NULL); + assert("nikita-1659", key != NULL); + + iplug = item_plugin_by_coord(item); + if (iplug->b.can_contain_key != NULL) + return iplug->b.can_contain_key(item, key, data); + else { + assert("nikita-1681", iplug->b.max_key_inside != NULL); + item_key_by_coord(item, &min_key_in_item); + iplug->b.max_key_inside(item, &max_key_in_item); + + /* can contain key if + min_key_in_item <= key && + key <= max_key_in_item + */ + return keyle(&min_key_in_item, key) + && keyle(key, &max_key_in_item); + } +} + +/* mergeable method for non mergeable items */ +static int +not_mergeable(const coord_t * i1 UNUSED_ARG, const coord_t * i2 UNUSED_ARG) +{ + return 0; +} + +/* return 0 if @item1 and @item2 are not mergeable, !0 - otherwise */ +int are_items_mergeable(const coord_t * i1 /* coord of first item */ , + const coord_t * i2 /* coord of second item */ ) +{ + item_plugin *iplug; + reiser4_key k1; + reiser4_key k2; + + assert("nikita-1336", i1 != NULL); + assert("nikita-1337", i2 != NULL); + + iplug = item_plugin_by_coord(i1); + assert("nikita-1338", iplug != NULL); + + /* NOTE-NIKITA are_items_mergeable() is also called by assertions in + shifting code when nodes are in "suspended" state. */ + assert("nikita-1663", + keyle(item_key_by_coord(i1, &k1), item_key_by_coord(i2, &k2))); + + if (iplug->b.mergeable != NULL) { + return iplug->b.mergeable(i1, i2); + } else if (iplug->b.max_key_inside != NULL) { + iplug->b.max_key_inside(i1, &k1); + item_key_by_coord(i2, &k2); + + /* mergeable if ->max_key_inside() >= key of i2; */ + return keyge(iplug->b.max_key_inside(i1, &k1), + item_key_by_coord(i2, &k2)); + } else { + item_key_by_coord(i1, &k1); + item_key_by_coord(i2, &k2); + + return + (get_key_locality(&k1) == get_key_locality(&k2)) && + (get_key_objectid(&k1) == get_key_objectid(&k2)) + && (iplug == item_plugin_by_coord(i2)); + } +} + +int item_is_extent(const coord_t * item) +{ + assert("vs-482", coord_is_existing_item(item)); + return item_id_by_coord(item) == EXTENT_POINTER_ID; +} + +int item_is_tail(const coord_t * item) +{ + assert("vs-482", coord_is_existing_item(item)); + return item_id_by_coord(item) == FORMATTING_ID; +} + +#if REISER4_DEBUG + +int item_is_statdata(const coord_t * item) +{ + assert("vs-516", coord_is_existing_item(item)); + return plugin_of_group(item_plugin_by_coord(item), STAT_DATA_ITEM_TYPE); +} + +int item_is_ctail(const coord_t * item) +{ + assert("edward-xx", coord_is_existing_item(item)); + return item_id_by_coord(item) == CTAIL_ID; +} + +#endif /* REISER4_DEBUG */ + +static int change_item(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + /* cannot change constituent item (sd, or dir_item) */ + return RETERR(-EINVAL); +} + +static reiser4_plugin_ops item_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = change_item +}; + +item_plugin item_plugins[LAST_ITEM_ID] = { + [STATIC_STAT_DATA_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = STATIC_STAT_DATA_ID, + .groups = (1 << STAT_DATA_ITEM_TYPE), + .pops = &item_plugin_ops, + .label = "sd", + .desc = "stat-data", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_single_key, + .can_contain_key = NULL, + .mergeable = not_mergeable, + .nr_units = nr_units_single_unit, + .lookup = NULL, + .init = NULL, + .paste = paste_no_paste, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .sd = { + .init_inode = init_inode_static_sd, + .save_len = save_len_static_sd, + .save = save_static_sd + } + } + }, + [SIMPLE_DIR_ENTRY_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = SIMPLE_DIR_ENTRY_ID, + .groups = (1 << DIR_ENTRY_ITEM_TYPE), + .pops = &item_plugin_ops, + .label = "de", + .desc = "directory entry", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_single_key, + .can_contain_key = NULL, + .mergeable = NULL, + .nr_units = nr_units_single_unit, + .lookup = NULL, + .init = NULL, + .paste = NULL, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .dir = { + .extract_key = extract_key_de, + .update_key = update_key_de, + .extract_name = extract_name_de, + .extract_file_type = extract_file_type_de, + .add_entry = add_entry_de, + .rem_entry = rem_entry_de, + .max_name_len = max_name_len_de + } + } + }, + [COMPOUND_DIR_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = COMPOUND_DIR_ID, + .groups = (1 << DIR_ENTRY_ITEM_TYPE), + .pops = &item_plugin_ops, + .label = "cde", + .desc = "compressed directory entry", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_cde, + .can_contain_key = can_contain_key_cde, + .mergeable = mergeable_cde, + .nr_units = nr_units_cde, + .lookup = lookup_cde, + .init = init_cde, + .paste = paste_cde, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_cde, + .copy_units = copy_units_cde, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = cut_units_cde, + .kill_units = kill_units_cde, + .unit_key = unit_key_cde, + .max_unit_key = unit_key_cde, + .estimate = estimate_cde, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = reiser4_check_cde +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .dir = { + .extract_key = extract_key_cde, + .update_key = update_key_cde, + .extract_name = extract_name_cde, + .extract_file_type = extract_file_type_de, + .add_entry = add_entry_cde, + .rem_entry = rem_entry_cde, + .max_name_len = max_name_len_cde + } + } + }, + [NODE_POINTER_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = NODE_POINTER_ID, + .groups = (1 << INTERNAL_ITEM_TYPE), + .pops = NULL, + .label = "internal", + .desc = "internal item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = NULL, + .can_contain_key = NULL, + .mergeable = mergeable_internal, + .nr_units = nr_units_single_unit, + .lookup = lookup_internal, + .init = NULL, + .paste = NULL, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = create_hook_internal, + .kill_hook = kill_hook_internal, + .shift_hook = shift_hook_internal, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = check__internal +#endif + }, + .f = { + .utmost_child = utmost_child_internal, + .utmost_child_real_block = + utmost_child_real_block_internal, + .update = reiser4_update_internal, + .scan = NULL, + .convert = NULL + }, + .s = { + .internal = { + .down_link = down_link_internal, + .has_pointer_to = has_pointer_to_internal + } + } + }, + [EXTENT_POINTER_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = EXTENT_POINTER_ID, + .groups = (1 << UNIX_FILE_METADATA_ITEM_TYPE), + .pops = NULL, + .label = "extent", + .desc = "extent item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_extent, + .can_contain_key = can_contain_key_extent, + .mergeable = mergeable_extent, + .nr_units = nr_units_extent, + .lookup = lookup_extent, + .init = NULL, + .paste = paste_extent, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_extent, + .create_hook = create_hook_extent, + .copy_units = copy_units_extent, + .kill_hook = kill_hook_extent, + .shift_hook = NULL, + .cut_units = cut_units_extent, + .kill_units = kill_units_extent, + .unit_key = unit_key_extent, + .max_unit_key = max_unit_key_extent, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = reiser4_check_extent +#endif + }, + .f = { + .utmost_child = utmost_child_extent, + .utmost_child_real_block = + utmost_child_real_block_extent, + .update = NULL, + .scan = reiser4_scan_extent, + .convert = NULL, + .key_by_offset = key_by_offset_extent + }, + .s = { + .file = { + .write = reiser4_write_extent, + .read = reiser4_read_extent, + .readpage = reiser4_readpage_extent, + .get_block = get_block_address_extent, + .append_key = append_key_extent, + .init_coord_extension = + init_coord_extension_extent + } + } + }, + [FORMATTING_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = FORMATTING_ID, + .groups = (1 << UNIX_FILE_METADATA_ITEM_TYPE), + .pops = NULL, + .label = "body", + .desc = "body (or tail?) item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_tail, + .can_contain_key = can_contain_key_tail, + .mergeable = mergeable_tail, + .nr_units = nr_units_tail, + .lookup = lookup_tail, + .init = NULL, + .paste = paste_tail, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_tail, + .create_hook = NULL, + .copy_units = copy_units_tail, + .kill_hook = kill_hook_tail, + .shift_hook = NULL, + .cut_units = cut_units_tail, + .kill_units = kill_units_tail, + .unit_key = unit_key_tail, + .max_unit_key = unit_key_tail, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .file = { + .write = reiser4_write_tail, + .read = reiser4_read_tail, + .readpage = readpage_tail, + .get_block = get_block_address_tail, + .append_key = append_key_tail, + .init_coord_extension = + init_coord_extension_tail + } + } + }, + [CTAIL_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = CTAIL_ID, + .groups = (1 << UNIX_FILE_METADATA_ITEM_TYPE), + .pops = NULL, + .label = "ctail", + .desc = "cryptcompress tail item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_tail, + .can_contain_key = can_contain_key_ctail, + .mergeable = mergeable_ctail, + .nr_units = nr_units_ctail, + .lookup = NULL, + .init = init_ctail, + .paste = paste_ctail, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_ctail, + .create_hook = create_hook_ctail, + .copy_units = copy_units_ctail, + .kill_hook = kill_hook_ctail, + .shift_hook = shift_hook_ctail, + .cut_units = cut_units_ctail, + .kill_units = kill_units_ctail, + .unit_key = unit_key_tail, + .max_unit_key = unit_key_tail, + .estimate = estimate_ctail, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = check_ctail +#endif + }, + .f = { + .utmost_child = utmost_child_ctail, + /* FIXME-EDWARD: write this */ + .utmost_child_real_block = NULL, + .update = NULL, + .scan = scan_ctail, + .convert = convert_ctail + }, + .s = { + .file = { + .write = NULL, + .read = read_ctail, + .readpage = readpage_ctail, + .get_block = get_block_address_tail, + .append_key = append_key_ctail, + .init_coord_extension = + init_coord_extension_tail + } + } + }, + [BLACK_BOX_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = BLACK_BOX_ID, + .groups = (1 << OTHER_ITEM_TYPE), + .pops = NULL, + .label = "blackbox", + .desc = "black box item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = NULL, + .can_contain_key = NULL, + .mergeable = not_mergeable, + .nr_units = nr_units_single_unit, + /* to need for ->lookup method */ + .lookup = NULL, + .init = NULL, + .paste = NULL, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + } + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/item.h b/fs/reiser4/plugin/item/item.h new file mode 100644 index 000000000000..5998701f5745 --- /dev/null +++ b/fs/reiser4/plugin/item/item.h @@ -0,0 +1,398 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* first read balance.c comments before reading this */ + +/* An item_plugin implements all of the operations required for + balancing that are item specific. */ + +/* an item plugin also implements other operations that are specific to that + item. These go into the item specific operations portion of the item + handler, and all of the item specific portions of the item handler are put + into a union. */ + +#if !defined( __REISER4_ITEM_H__ ) +#define __REISER4_ITEM_H__ + +#include "../../forward.h" +#include "../plugin_header.h" +#include "../../dformat.h" +#include "../../seal.h" +#include "../../plugin/file/file.h" + +#include /* for struct file, struct inode */ +#include /* for struct page */ +#include /* for struct dentry */ + +typedef enum { + STAT_DATA_ITEM_TYPE, + DIR_ENTRY_ITEM_TYPE, + INTERNAL_ITEM_TYPE, + UNIX_FILE_METADATA_ITEM_TYPE, + OTHER_ITEM_TYPE +} item_type_id; + +/* this is the part of each item plugin that all items are expected to + support or at least explicitly fail to support by setting the + pointer to null. */ +struct balance_ops { + /* operations called by balancing + + It is interesting to consider that some of these item + operations could be given sources or targets that are not + really items in nodes. This could be ok/useful. + + */ + /* maximal key that can _possibly_ be occupied by this item + + When inserting, and node ->lookup() method (called by + coord_by_key()) reaches an item after binary search, + the ->max_key_inside() item plugin method is used to determine + whether new item should pasted into existing item + (new_key<=max_key_inside()) or new item has to be created + (new_key>max_key_inside()). + + For items that occupy exactly one key (like stat-data) + this method should return this key. For items that can + grow indefinitely (extent, directory item) this should + return reiser4_max_key(). + + For example extent with the key + + (LOCALITY,4,OBJID,STARTING-OFFSET), and length BLK blocks, + + ->max_key_inside is (LOCALITY,4,OBJID,0xffffffffffffffff), and + */ + reiser4_key *(*max_key_inside) (const coord_t *, reiser4_key *); + + /* true if item @coord can merge data at @key. */ + int (*can_contain_key) (const coord_t *, const reiser4_key *, + const reiser4_item_data *); + /* mergeable() - check items for mergeability + + Optional method. Returns true if two items can be merged. + + */ + int (*mergeable) (const coord_t *, const coord_t *); + + /* number of atomic things in an item. + NOTE FOR CONTRIBUTORS: use a generic method + nr_units_single_unit() for solid (atomic) items, as + tree operations use it as a criterion of solidness + (see is_solid_item macro) */ + pos_in_node_t(*nr_units) (const coord_t *); + + /* search within item for a unit within the item, and return a + pointer to it. This can be used to calculate how many + bytes to shrink an item if you use pointer arithmetic and + compare to the start of the item body if the item's data + are continuous in the node, if the item's data are not + continuous in the node, all sorts of other things are maybe + going to break as well. */ + lookup_result(*lookup) (const reiser4_key *, lookup_bias, coord_t *); + /* method called by ode_plugin->create_item() to initialise new + item */ + int (*init) (coord_t * target, coord_t * from, + reiser4_item_data * data); + /* method called (e.g., by reiser4_resize_item()) to place new data + into item when it grows */ + int (*paste) (coord_t *, reiser4_item_data *, carry_plugin_info *); + /* return true if paste into @coord is allowed to skip + carry. That is, if such paste would require any changes + at the parent level + */ + int (*fast_paste) (const coord_t *); + /* how many but not more than @want units of @source can be + shifted into @target node. If pend == append - we try to + append last item of @target by first units of @source. If + pend == prepend - we try to "prepend" first item in @target + by last units of @source. @target node has @free_space + bytes of free space. Total size of those units are returned + via @size. + + @target is not NULL if shifting to the mergeable item and + NULL is new item will be created during shifting. + */ + int (*can_shift) (unsigned free_space, coord_t *, + znode *, shift_direction, unsigned *size, + unsigned want); + + /* starting off @from-th unit of item @source append or + prepend @count units to @target. @target has been already + expanded by @free_space bytes. That must be exactly what is + needed for those items in @target. If @where_is_free_space + == SHIFT_LEFT - free space is at the end of @target item, + othersize - it is in the beginning of it. */ + void (*copy_units) (coord_t *, coord_t *, + unsigned from, unsigned count, + shift_direction where_is_free_space, + unsigned free_space); + + int (*create_hook) (const coord_t *, void *); + /* do whatever is necessary to do when @count units starting + from @from-th one are removed from the tree */ + /* FIXME-VS: this is used to be here for, in particular, + extents and items of internal type to free blocks they point + to at the same time with removing items from a + tree. Problems start, however, when dealloc_block fails due + to some reason. Item gets removed, but blocks it pointed to + are not freed. It is not clear how to fix this for items of + internal type because a need to remove internal item may + appear in the middle of balancing, and there is no way to + undo changes made. OTOH, if space allocator involves + balancing to perform dealloc_block - this will probably + break balancing due to deadlock issues + */ + int (*kill_hook) (const coord_t *, pos_in_node_t from, + pos_in_node_t count, struct carry_kill_data *); + int (*shift_hook) (const coord_t *, unsigned from, unsigned count, + znode * _node); + + /* unit @*from contains @from_key. unit @*to contains @to_key. Cut all keys between @from_key and @to_key + including boundaries. When units are cut from item beginning - move space which gets freed to head of + item. When units are cut from item end - move freed space to item end. When units are cut from the middle of + item - move freed space to item head. Return amount of space which got freed. Save smallest removed key in + @smallest_removed if it is not 0. Save new first item key in @new_first_key if it is not 0 + */ + int (*cut_units) (coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, + reiser4_key * smallest_removed, + reiser4_key * new_first_key); + + /* like cut_units, except that these units are removed from the + tree, not only from a node */ + int (*kill_units) (coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, + reiser4_key * smallest_removed, + reiser4_key * new_first); + + /* if @key_of_coord == 1 - returned key of coord, otherwise - + key of unit is returned. If @coord is not set to certain + unit - ERR_PTR(-ENOENT) is returned */ + reiser4_key *(*unit_key) (const coord_t *, reiser4_key *); + reiser4_key *(*max_unit_key) (const coord_t *, reiser4_key *); + /* estimate how much space is needed for paste @data into item at + @coord. if @coord==0 - estimate insertion, otherwise - estimate + pasting + */ + int (*estimate) (const coord_t *, const reiser4_item_data *); + + /* converts flow @f to item data. @coord == 0 on insert */ + int (*item_data_by_flow) (const coord_t *, const flow_t *, + reiser4_item_data *); + + /*void (*show) (struct seq_file *, coord_t *); */ + +#if REISER4_DEBUG + /* used for debugging, every item should have here the most + complete possible check of the consistency of the item that + the inventor can construct */ + int (*check) (const coord_t *, const char **error); +#endif + +}; + +struct flush_ops { + /* return the right or left child of @coord, only if it is in memory */ + int (*utmost_child) (const coord_t *, sideof side, jnode ** child); + + /* return whether the right or left child of @coord has a non-fake + block number. */ + int (*utmost_child_real_block) (const coord_t *, sideof side, + reiser4_block_nr *); + /* relocate child at @coord to the @block */ + void (*update) (const coord_t *, const reiser4_block_nr *); + /* count unformatted nodes per item for leave relocation policy, etc.. */ + int (*scan) (flush_scan * scan); + /* convert item by flush */ + int (*convert) (flush_pos_t * pos); + /* backward mapping from jnode offset to a key. */ + int (*key_by_offset) (struct inode *, loff_t, reiser4_key *); +}; + +/* operations specific to the directory item */ +struct dir_entry_iops { + /* extract stat-data key from directory entry at @coord and place it + into @key. */ + int (*extract_key) (const coord_t *, reiser4_key * key); + /* update object key in item. */ + int (*update_key) (const coord_t *, const reiser4_key *, lock_handle *); + /* extract name from directory entry at @coord and return it */ + char *(*extract_name) (const coord_t *, char *buf); + /* extract file type (DT_* stuff) from directory entry at @coord and + return it */ + unsigned (*extract_file_type) (const coord_t *); + int (*add_entry) (struct inode * dir, + coord_t *, lock_handle *, + const struct dentry * name, + reiser4_dir_entry_desc * entry); + int (*rem_entry) (struct inode * dir, const struct qstr * name, + coord_t *, lock_handle *, + reiser4_dir_entry_desc * entry); + int (*max_name_len) (const struct inode * dir); +}; + +/* operations specific to items regular (unix) file metadata are built of */ +struct file_iops{ + ssize_t (*write) (struct file *, struct inode *, + const char __user *, size_t, loff_t *pos); + int (*read) (struct file *, flow_t *, hint_t *); + int (*readpage) (void *, struct page *); + int (*get_block) (const coord_t *, sector_t, sector_t *); + /* + * key of first byte which is not addressed by the item @coord is set + * to. + * For example, for extent item with the key + * + * (LOCALITY,4,OBJID,STARTING-OFFSET), and length BLK blocks, + * + * ->append_key is + * + * (LOCALITY,4,OBJID,STARTING-OFFSET + BLK * block_size) + */ + reiser4_key *(*append_key) (const coord_t *, reiser4_key *); + + void (*init_coord_extension) (uf_coord_t *, loff_t); +}; + +/* operations specific to items of stat data type */ +struct sd_iops { + int (*init_inode) (struct inode * inode, char *sd, int len); + int (*save_len) (struct inode * inode); + int (*save) (struct inode * inode, char **area); +}; + +/* operations specific to internal item */ +struct internal_iops{ + /* all tree traversal want to know from internal item is where + to go next. */ + void (*down_link) (const coord_t * coord, + const reiser4_key * key, reiser4_block_nr * block); + /* check that given internal item contains given pointer. */ + int (*has_pointer_to) (const coord_t * coord, + const reiser4_block_nr * block); +}; + +struct item_plugin { + /* generic fields */ + plugin_header h; + /* methods common for all item types */ + struct balance_ops b; /* balance operations */ + struct flush_ops f; /* flush operates with items via this methods */ + + /* methods specific to particular type of item */ + union { + struct dir_entry_iops dir; + struct file_iops file; + struct sd_iops sd; + struct internal_iops internal; + } s; +}; + +#define is_solid_item(iplug) ((iplug)->b.nr_units == nr_units_single_unit) + +static inline item_id item_id_by_plugin(item_plugin * plugin) +{ + return plugin->h.id; +} + +static inline char get_iplugid(item_plugin * iplug) +{ + assert("nikita-2838", iplug != NULL); + assert("nikita-2839", iplug->h.id < 0xff); + return (char)item_id_by_plugin(iplug); +} + +extern unsigned long znode_times_locked(const znode * z); + +static inline void coord_set_iplug(coord_t * coord, item_plugin * iplug) +{ + assert("nikita-2837", coord != NULL); + assert("nikita-2838", iplug != NULL); + coord->iplugid = get_iplugid(iplug); + ON_DEBUG(coord->plug_v = znode_times_locked(coord->node)); +} + +static inline item_plugin *coord_iplug(const coord_t * coord) +{ + assert("nikita-2833", coord != NULL); + assert("nikita-2834", coord->iplugid != INVALID_PLUGID); + assert("nikita-3549", coord->plug_v == znode_times_locked(coord->node)); + return (item_plugin *) plugin_by_id(REISER4_ITEM_PLUGIN_TYPE, + coord->iplugid); +} + +extern int item_can_contain_key(const coord_t * item, const reiser4_key * key, + const reiser4_item_data *); +extern int are_items_mergeable(const coord_t * i1, const coord_t * i2); +extern int item_is_extent(const coord_t *); +extern int item_is_tail(const coord_t *); +extern int item_is_statdata(const coord_t * item); +extern int item_is_ctail(const coord_t *); + +extern pos_in_node_t item_length_by_coord(const coord_t * coord); +extern pos_in_node_t nr_units_single_unit(const coord_t * coord); +extern item_id item_id_by_coord(const coord_t * coord /* coord to query */ ); +extern reiser4_key *item_key_by_coord(const coord_t * coord, reiser4_key * key); +extern reiser4_key *max_item_key_by_coord(const coord_t *, reiser4_key *); +extern reiser4_key *unit_key_by_coord(const coord_t * coord, reiser4_key * key); +extern reiser4_key *max_unit_key_by_coord(const coord_t * coord, + reiser4_key * key); +extern void obtain_item_plugin(const coord_t * coord); + +#if defined(REISER4_DEBUG) +extern int znode_is_loaded(const znode * node); +#endif + +/* return plugin of item at @coord */ +static inline item_plugin *item_plugin_by_coord(const coord_t * + coord /* coord to query */ ) +{ + assert("nikita-330", coord != NULL); + assert("nikita-331", coord->node != NULL); + assert("nikita-332", znode_is_loaded(coord->node)); + + if (unlikely(!coord_is_iplug_set(coord))) + obtain_item_plugin(coord); + return coord_iplug(coord); +} + +/* this returns true if item is of internal type */ +static inline int item_is_internal(const coord_t * item) +{ + assert("vs-483", coord_is_existing_item(item)); + return plugin_of_group(item_plugin_by_coord(item), INTERNAL_ITEM_TYPE); +} + +extern void item_body_by_coord_hard(coord_t * coord); +extern void *item_body_by_coord_easy(const coord_t * coord); +#if REISER4_DEBUG +extern int item_body_is_valid(const coord_t * coord); +#endif + +/* return pointer to item body */ +static inline void *item_body_by_coord(const coord_t * + coord /* coord to query */ ) +{ + assert("nikita-324", coord != NULL); + assert("nikita-325", coord->node != NULL); + assert("nikita-326", znode_is_loaded(coord->node)); + + if (coord->offset == INVALID_OFFSET) + item_body_by_coord_hard((coord_t *) coord); + assert("nikita-3201", item_body_is_valid(coord)); + assert("nikita-3550", coord->body_v == znode_times_locked(coord->node)); + return item_body_by_coord_easy(coord); +} + +/* __REISER4_ITEM_H__ */ +#endif +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/item/sde.c b/fs/reiser4/plugin/item/sde.c new file mode 100644 index 000000000000..c15abe3f0ce6 --- /dev/null +++ b/fs/reiser4/plugin/item/sde.c @@ -0,0 +1,186 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry implementation */ +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../coord.h" +#include "sde.h" +#include "item.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../carry.h" +#include "../../tree.h" +#include "../../inode.h" + +#include /* for struct inode */ +#include /* for struct dentry */ + +/* ->extract_key() method of simple directory item plugin. */ +int extract_key_de(const coord_t * coord /* coord of item */ , + reiser4_key * key /* resulting key */ ) +{ + directory_entry_format *dent; + + assert("nikita-1458", coord != NULL); + assert("nikita-1459", key != NULL); + + dent = (directory_entry_format *) item_body_by_coord(coord); + assert("nikita-1158", item_length_by_coord(coord) >= (int)sizeof *dent); + return extract_key_from_id(&dent->id, key); +} + +int +update_key_de(const coord_t * coord, const reiser4_key * key, + lock_handle * lh UNUSED_ARG) +{ + directory_entry_format *dent; + obj_key_id obj_id; + int result; + + assert("nikita-2342", coord != NULL); + assert("nikita-2343", key != NULL); + + dent = (directory_entry_format *) item_body_by_coord(coord); + result = build_obj_key_id(key, &obj_id); + if (result == 0) { + dent->id = obj_id; + znode_make_dirty(coord->node); + } + return 0; +} + +char *extract_dent_name(const coord_t * coord, directory_entry_format * dent, + char *buf) +{ + reiser4_key key; + + unit_key_by_coord(coord, &key); + if (get_key_type(&key) != KEY_FILE_NAME_MINOR) + reiser4_print_address("oops", znode_get_block(coord->node)); + if (!is_longname_key(&key)) { + if (is_dot_key(&key)) + return (char *)"."; + else + return extract_name_from_key(&key, buf); + } else + return (char *)dent->name; +} + +/* ->extract_name() method of simple directory item plugin. */ +char *extract_name_de(const coord_t * coord /* coord of item */ , char *buf) +{ + directory_entry_format *dent; + + assert("nikita-1460", coord != NULL); + + dent = (directory_entry_format *) item_body_by_coord(coord); + return extract_dent_name(coord, dent, buf); +} + +/* ->extract_file_type() method of simple directory item plugin. */ +unsigned extract_file_type_de(const coord_t * coord UNUSED_ARG /* coord of + * item */ ) +{ + assert("nikita-1764", coord != NULL); + /* we don't store file type in the directory entry yet. + + But see comments at kassign.h:obj_key_id + */ + return DT_UNKNOWN; +} + +int add_entry_de(struct inode *dir /* directory of item */ , + coord_t * coord /* coord of item */ , + lock_handle * lh /* insertion lock handle */ , + const struct dentry *de /* name to add */ , + reiser4_dir_entry_desc * entry /* parameters of new directory + * entry */ ) +{ + reiser4_item_data data; + directory_entry_format *dent; + int result; + const char *name; + int len; + int longname; + + name = de->d_name.name; + len = de->d_name.len; + assert("nikita-1163", strlen(name) == len); + + longname = is_longname(name, len); + + data.length = sizeof *dent; + if (longname) + data.length += len + 1; + data.data = NULL; + data.user = 0; + data.iplug = item_plugin_by_id(SIMPLE_DIR_ENTRY_ID); + + inode_add_bytes(dir, data.length); + + result = insert_by_coord(coord, &data, &entry->key, lh, 0 /*flags */ ); + if (result != 0) + return result; + + dent = (directory_entry_format *) item_body_by_coord(coord); + build_inode_key_id(entry->obj, &dent->id); + if (longname) { + memcpy(dent->name, name, len); + put_unaligned(0, &dent->name[len]); + } + return 0; +} + +int rem_entry_de(struct inode *dir /* directory of item */ , + const struct qstr *name UNUSED_ARG, + coord_t * coord /* coord of item */ , + lock_handle * lh UNUSED_ARG /* lock handle for + * removal */ , + reiser4_dir_entry_desc * entry UNUSED_ARG /* parameters of + * directory entry + * being removed */ ) +{ + coord_t shadow; + int result; + int length; + + length = item_length_by_coord(coord); + if (inode_get_bytes(dir) < length) { + warning("nikita-2627", "Dir is broke: %llu: %llu", + (unsigned long long)get_inode_oid(dir), + inode_get_bytes(dir)); + + return RETERR(-EIO); + } + + /* cut_node() is supposed to take pointers to _different_ + coords, because it will modify them without respect to + possible aliasing. To work around this, create temporary copy + of @coord. + */ + coord_dup(&shadow, coord); + result = + kill_node_content(coord, &shadow, NULL, NULL, NULL, NULL, NULL, 0); + if (result == 0) { + inode_sub_bytes(dir, length); + } + return result; +} + +int max_name_len_de(const struct inode *dir) +{ + return reiser4_tree_by_inode(dir)->nplug->max_item_size() - + sizeof(directory_entry_format) - 2; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/sde.h b/fs/reiser4/plugin/item/sde.h new file mode 100644 index 000000000000..f26762a1c287 --- /dev/null +++ b/fs/reiser4/plugin/item/sde.h @@ -0,0 +1,66 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry. */ + +#if !defined( __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ ) +#define __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" + +#include +#include /* for struct dentry */ + +typedef struct directory_entry_format { + /* key of object stat-data. It's not necessary to store whole + key here, because it's always key of stat-data, so minor + packing locality and offset can be omitted here. But this + relies on particular key allocation scheme for stat-data, so, + for extensibility sake, whole key can be stored here. + + We store key as array of bytes, because we don't want 8-byte + alignment of dir entries. + */ + obj_key_id id; + /* file name. Null terminated string. */ + d8 name[0]; +} directory_entry_format; + +void print_de(const char *prefix, coord_t * coord); +int extract_key_de(const coord_t * coord, reiser4_key * key); +int update_key_de(const coord_t * coord, const reiser4_key * key, + lock_handle * lh); +char *extract_name_de(const coord_t * coord, char *buf); +unsigned extract_file_type_de(const coord_t * coord); +int add_entry_de(struct inode *dir, coord_t * coord, + lock_handle * lh, const struct dentry *name, + reiser4_dir_entry_desc * entry); +int rem_entry_de(struct inode *dir, const struct qstr *name, coord_t * coord, + lock_handle * lh, reiser4_dir_entry_desc * entry); +int max_name_len_de(const struct inode *dir); + +int de_rem_and_shrink(struct inode *dir, coord_t * coord, int length); + +char *extract_dent_name(const coord_t * coord, + directory_entry_format * dent, char *buf); + +#if REISER4_LARGE_KEY +#define DE_NAME_BUF_LEN (24) +#else +#define DE_NAME_BUF_LEN (16) +#endif + +/* __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/static_stat.c b/fs/reiser4/plugin/item/static_stat.c new file mode 100644 index 000000000000..d75d3530ac57 --- /dev/null +++ b/fs/reiser4/plugin/item/static_stat.c @@ -0,0 +1,1114 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* stat data manipulation. */ + +#include "../../forward.h" +#include "../../super.h" +#include "../../vfs_ops.h" +#include "../../inode.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../object.h" +#include "../plugin.h" +#include "../plugin_header.h" +#include "static_stat.h" +#include "item.h" + +#include +#include + +/* see static_stat.h for explanation */ + +/* helper function used while we are dumping/loading inode/plugin state + to/from the stat-data. */ + +static void move_on(int *length /* space remaining in stat-data */ , + char **area /* current coord in stat data */ , + int size_of /* how many bytes to move forward */ ) +{ + assert("nikita-615", length != NULL); + assert("nikita-616", area != NULL); + + *length -= size_of; + *area += size_of; + + assert("nikita-617", *length >= 0); +} + +/* helper function used while loading inode/plugin state from stat-data. + Complain if there is less space in stat-data than was expected. + Can only happen on disk corruption. */ +static int not_enough_space(struct inode *inode /* object being processed */ , + const char *where /* error message */ ) +{ + assert("nikita-618", inode != NULL); + + warning("nikita-619", "Not enough space in %llu while loading %s", + (unsigned long long)get_inode_oid(inode), where); + + return RETERR(-EINVAL); +} + +/* helper function used while loading inode/plugin state from + stat-data. Call it if invalid plugin id was found. */ +static int unknown_plugin(reiser4_plugin_id id /* invalid id */ , + struct inode *inode /* object being processed */ ) +{ + warning("nikita-620", "Unknown plugin %i in %llu", + id, (unsigned long long)get_inode_oid(inode)); + + return RETERR(-EINVAL); +} + +/* this is installed as ->init_inode() method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). + Copies data from on-disk stat-data format into inode. + Handles stat-data extensions. */ +/* was sd_load */ +int init_inode_static_sd(struct inode *inode /* object being processed */ , + char *sd /* stat-data body */ , + int len /* length of stat-data */ ) +{ + int result; + int bit; + int chunk; + __u16 mask; + __u64 bigmask; + reiser4_stat_data_base *sd_base; + reiser4_inode *state; + + assert("nikita-625", inode != NULL); + assert("nikita-626", sd != NULL); + + result = 0; + sd_base = (reiser4_stat_data_base *) sd; + state = reiser4_inode_data(inode); + mask = le16_to_cpu(get_unaligned(&sd_base->extmask)); + bigmask = mask; + reiser4_inode_set_flag(inode, REISER4_SDLEN_KNOWN); + + move_on(&len, &sd, sizeof *sd_base); + for (bit = 0, chunk = 0; + mask != 0 || bit <= LAST_IMPORTANT_SD_EXTENSION; + ++bit, mask >>= 1) { + if (((bit + 1) % 16) != 0) { + /* handle extension */ + sd_ext_plugin *sdplug; + + if (bit >= LAST_SD_EXTENSION) { + warning("vpf-1904", + "No such extension %i in inode %llu", + bit, + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + + sdplug = sd_ext_plugin_by_id(bit); + if (sdplug == NULL) { + warning("nikita-627", + "No such extension %i in inode %llu", + bit, + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + if (mask & 1) { + assert("nikita-628", sdplug->present); + /* alignment is not supported in node layout + plugin yet. + result = align( inode, &len, &sd, + sdplug -> alignment ); + if( result != 0 ) + return result; */ + result = sdplug->present(inode, &sd, &len); + } else if (sdplug->absent != NULL) + result = sdplug->absent(inode); + if (result) + break; + /* else, we are looking at the last bit in 16-bit + portion of bitmask */ + } else if (mask & 1) { + /* next portion of bitmask */ + if (len < (int)sizeof(d16)) { + warning("nikita-629", + "No space for bitmap in inode %llu", + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + mask = le16_to_cpu(get_unaligned((d16 *)sd)); + bigmask <<= 16; + bigmask |= mask; + move_on(&len, &sd, sizeof(d16)); + ++chunk; + if (chunk == 3) { + if (!(mask & 0x8000)) { + /* clear last bit */ + mask &= ~0x8000; + continue; + } + /* too much */ + warning("nikita-630", + "Too many extensions in %llu", + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + } else + /* bitmask exhausted */ + break; + } + state->extmask = bigmask; + /* common initialisations */ + if (len - (bit / 16 * sizeof(d16)) > 0) { + /* alignment in save_len_static_sd() is taken into account + -edward */ + warning("nikita-631", "unused space in inode %llu", + (unsigned long long)get_inode_oid(inode)); + } + + return result; +} + +/* estimates size of stat-data required to store inode. + Installed as ->save_len() method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). */ +/* was sd_len */ +int save_len_static_sd(struct inode *inode /* object being processed */ ) +{ + unsigned int result; + __u64 mask; + int bit; + + assert("nikita-632", inode != NULL); + + result = sizeof(reiser4_stat_data_base); + mask = reiser4_inode_data(inode)->extmask; + for (bit = 0; mask != 0; ++bit, mask >>= 1) { + if (mask & 1) { + sd_ext_plugin *sdplug; + + sdplug = sd_ext_plugin_by_id(bit); + assert("nikita-633", sdplug != NULL); + /* + no aligment support + result += + reiser4_round_up(result, sdplug -> alignment) - + result; + */ + result += sdplug->save_len(inode); + } + } + result += bit / 16 * sizeof(d16); + return result; +} + +/* saves inode into stat-data. + Installed as ->save() method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). */ +/* was sd_save */ +int save_static_sd(struct inode *inode /* object being processed */ , + char **area /* where to save stat-data */ ) +{ + int result; + __u64 emask; + int bit; + unsigned int len; + reiser4_stat_data_base *sd_base; + + assert("nikita-634", inode != NULL); + assert("nikita-635", area != NULL); + + result = 0; + emask = reiser4_inode_data(inode)->extmask; + sd_base = (reiser4_stat_data_base *) * area; + put_unaligned(cpu_to_le16((__u16)(emask & 0xffff)), &sd_base->extmask); + /*cputod16((unsigned)(emask & 0xffff), &sd_base->extmask);*/ + + *area += sizeof *sd_base; + len = 0xffffffffu; + for (bit = 0; emask != 0; ++bit, emask >>= 1) { + if (emask & 1) { + if ((bit + 1) % 16 != 0) { + sd_ext_plugin *sdplug; + sdplug = sd_ext_plugin_by_id(bit); + assert("nikita-636", sdplug != NULL); + /* no alignment support yet + align( inode, &len, area, + sdplug -> alignment ); */ + result = sdplug->save(inode, area); + if (result) + break; + } else { + put_unaligned(cpu_to_le16((__u16)(emask & 0xffff)), + (d16 *)(*area)); + /*cputod16((unsigned)(emask & 0xffff), + (d16 *) * area);*/ + *area += sizeof(d16); + } + } + } + return result; +} + +/* stat-data extension handling functions. */ + +static int present_lw_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + if (*len >= (int)sizeof(reiser4_light_weight_stat)) { + reiser4_light_weight_stat *sd_lw; + + sd_lw = (reiser4_light_weight_stat *) * area; + + inode->i_mode = le16_to_cpu(get_unaligned(&sd_lw->mode)); + set_nlink(inode, le32_to_cpu(get_unaligned(&sd_lw->nlink))); + inode->i_size = le64_to_cpu(get_unaligned(&sd_lw->size)); + if ((inode->i_mode & S_IFMT) == (S_IFREG | S_IFIFO)) { + inode->i_mode &= ~S_IFIFO; + warning("", "partially converted file is encountered"); + reiser4_inode_set_flag(inode, REISER4_PART_MIXED); + } + move_on(len, area, sizeof *sd_lw); + return 0; + } else + return not_enough_space(inode, "lw sd"); +} + +static int save_len_lw_sd(struct inode *inode UNUSED_ARG /* object being + * processed */ ) +{ + return sizeof(reiser4_light_weight_stat); +} + +static int save_lw_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_light_weight_stat *sd; + mode_t delta; + + assert("nikita-2705", inode != NULL); + assert("nikita-2706", area != NULL); + assert("nikita-2707", *area != NULL); + + sd = (reiser4_light_weight_stat *) * area; + + delta = (reiser4_inode_get_flag(inode, + REISER4_PART_MIXED) ? S_IFIFO : 0); + put_unaligned(cpu_to_le16(inode->i_mode | delta), &sd->mode); + put_unaligned(cpu_to_le32(inode->i_nlink), &sd->nlink); + put_unaligned(cpu_to_le64((__u64) inode->i_size), &sd->size); + *area += sizeof *sd; + return 0; +} + +static int present_unix_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + assert("nikita-637", inode != NULL); + assert("nikita-638", area != NULL); + assert("nikita-639", *area != NULL); + assert("nikita-640", len != NULL); + assert("nikita-641", *len > 0); + + if (*len >= (int)sizeof(reiser4_unix_stat)) { + reiser4_unix_stat *sd; + + sd = (reiser4_unix_stat *) * area; + + i_uid_write(inode, le32_to_cpu(get_unaligned(&sd->uid))); + i_gid_write(inode, le32_to_cpu(get_unaligned(&sd->gid))); + inode->i_atime.tv_sec = le32_to_cpu(get_unaligned(&sd->atime)); + inode->i_mtime.tv_sec = le32_to_cpu(get_unaligned(&sd->mtime)); + inode->i_ctime.tv_sec = le32_to_cpu(get_unaligned(&sd->ctime)); + if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) + inode->i_rdev = le64_to_cpu(get_unaligned(&sd->u.rdev)); + else + inode_set_bytes(inode, (loff_t) le64_to_cpu(get_unaligned(&sd->u.bytes))); + move_on(len, area, sizeof *sd); + return 0; + } else + return not_enough_space(inode, "unix sd"); +} + +static int absent_unix_sd(struct inode *inode /* object being processed */ ) +{ + i_uid_write(inode, get_super_private(inode->i_sb)->default_uid); + i_gid_write(inode, get_super_private(inode->i_sb)->default_gid); + inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); + inode_set_bytes(inode, inode->i_size); + /* mark inode as lightweight, so that caller (lookup_common) will + complete initialisation by copying [ug]id from a parent. */ + reiser4_inode_set_flag(inode, REISER4_LIGHT_WEIGHT); + return 0; +} + +/* Audited by: green(2002.06.14) */ +static int save_len_unix_sd(struct inode *inode UNUSED_ARG /* object being + * processed */ ) +{ + return sizeof(reiser4_unix_stat); +} + +static int save_unix_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_unix_stat *sd; + + assert("nikita-642", inode != NULL); + assert("nikita-643", area != NULL); + assert("nikita-644", *area != NULL); + + sd = (reiser4_unix_stat *) * area; + put_unaligned(cpu_to_le32(i_uid_read(inode)), &sd->uid); + put_unaligned(cpu_to_le32(i_gid_read(inode)), &sd->gid); + put_unaligned(cpu_to_le32((__u32) inode->i_atime.tv_sec), &sd->atime); + put_unaligned(cpu_to_le32((__u32) inode->i_ctime.tv_sec), &sd->ctime); + put_unaligned(cpu_to_le32((__u32) inode->i_mtime.tv_sec), &sd->mtime); + if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) + put_unaligned(cpu_to_le64(inode->i_rdev), &sd->u.rdev); + else + put_unaligned(cpu_to_le64((__u64) inode_get_bytes(inode)), &sd->u.bytes); + *area += sizeof *sd; + return 0; +} + +static int +present_large_times_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + if (*len >= (int)sizeof(reiser4_large_times_stat)) { + reiser4_large_times_stat *sd_lt; + + sd_lt = (reiser4_large_times_stat *) * area; + + inode->i_atime.tv_nsec = le32_to_cpu(get_unaligned(&sd_lt->atime)); + inode->i_mtime.tv_nsec = le32_to_cpu(get_unaligned(&sd_lt->mtime)); + inode->i_ctime.tv_nsec = le32_to_cpu(get_unaligned(&sd_lt->ctime)); + + move_on(len, area, sizeof *sd_lt); + return 0; + } else + return not_enough_space(inode, "large times sd"); +} + +static int +save_len_large_times_sd(struct inode *inode UNUSED_ARG + /* object being processed */ ) +{ + return sizeof(reiser4_large_times_stat); +} + +static int +save_large_times_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_large_times_stat *sd; + + assert("nikita-2817", inode != NULL); + assert("nikita-2818", area != NULL); + assert("nikita-2819", *area != NULL); + + sd = (reiser4_large_times_stat *) * area; + + put_unaligned(cpu_to_le32((__u32) inode->i_atime.tv_nsec), &sd->atime); + put_unaligned(cpu_to_le32((__u32) inode->i_ctime.tv_nsec), &sd->ctime); + put_unaligned(cpu_to_le32((__u32) inode->i_mtime.tv_nsec), &sd->mtime); + + *area += sizeof *sd; + return 0; +} + +/* symlink stat data extension */ + +/* allocate memory for symlink target and attach it to inode->i_private */ +static int +symlink_target_to_inode(struct inode *inode, const char *target, int len) +{ + assert("vs-845", inode->i_private == NULL); + assert("vs-846", !reiser4_inode_get_flag(inode, + REISER4_GENERIC_PTR_USED)); + /* FIXME-VS: this is prone to deadlock. Not more than other similar + places, though */ + inode->i_private = kmalloc((size_t) len + 1, + reiser4_ctx_gfp_mask_get()); + if (!inode->i_private) + return RETERR(-ENOMEM); + + memcpy((char *)(inode->i_private), target, (size_t) len); + ((char *)(inode->i_private))[len] = 0; + reiser4_inode_set_flag(inode, REISER4_GENERIC_PTR_USED); + return 0; +} + +/* this is called on read_inode. There is nothing to do actually, but some + sanity checks */ +static int present_symlink_sd(struct inode *inode, char **area, int *len) +{ + int result; + int length; + reiser4_symlink_stat *sd; + + length = (int)inode->i_size; + /* + * *len is number of bytes in stat data item from *area to the end of + * item. It must be not less than size of symlink + 1 for ending 0 + */ + if (length > *len) + return not_enough_space(inode, "symlink"); + + if (*(*area + length) != 0) { + warning("vs-840", "Symlink is not zero terminated"); + return RETERR(-EIO); + } + + sd = (reiser4_symlink_stat *) * area; + result = symlink_target_to_inode(inode, sd->body, length); + + move_on(len, area, length + 1); + return result; +} + +static int save_len_symlink_sd(struct inode *inode) +{ + return inode->i_size + 1; +} + +/* this is called on create and update stat data. Do nothing on update but + update @area */ +static int save_symlink_sd(struct inode *inode, char **area) +{ + int result; + int length; + reiser4_symlink_stat *sd; + + length = (int)inode->i_size; + /* inode->i_size must be set already */ + assert("vs-841", length); + + result = 0; + sd = (reiser4_symlink_stat *) * area; + if (!reiser4_inode_get_flag(inode, REISER4_GENERIC_PTR_USED)) { + const char *target; + + target = (const char *)(inode->i_private); + inode->i_private = NULL; + + result = symlink_target_to_inode(inode, target, length); + + /* copy symlink to stat data */ + memcpy(sd->body, target, (size_t) length); + (*area)[length] = 0; + } else { + /* there is nothing to do in update but move area */ + assert("vs-844", + !memcmp(inode->i_private, sd->body, + (size_t) length + 1)); + } + + *area += (length + 1); + return result; +} + +static int present_flags_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + assert("nikita-645", inode != NULL); + assert("nikita-646", area != NULL); + assert("nikita-647", *area != NULL); + assert("nikita-648", len != NULL); + assert("nikita-649", *len > 0); + + if (*len >= (int)sizeof(reiser4_flags_stat)) { + reiser4_flags_stat *sd; + + sd = (reiser4_flags_stat *) * area; + inode->i_flags = le32_to_cpu(get_unaligned(&sd->flags)); + move_on(len, area, sizeof *sd); + return 0; + } else + return not_enough_space(inode, "generation and attrs"); +} + +/* Audited by: green(2002.06.14) */ +static int save_len_flags_sd(struct inode *inode UNUSED_ARG /* object being + * processed */ ) +{ + return sizeof(reiser4_flags_stat); +} + +static int save_flags_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_flags_stat *sd; + + assert("nikita-650", inode != NULL); + assert("nikita-651", area != NULL); + assert("nikita-652", *area != NULL); + + sd = (reiser4_flags_stat *) * area; + put_unaligned(cpu_to_le32(inode->i_flags), &sd->flags); + *area += sizeof *sd; + return 0; +} + +static int absent_plugin_sd(struct inode *inode); +static int present_plugin_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */, + int is_pset /* 1 if plugin set, 0 if heir set. */) +{ + reiser4_plugin_stat *sd; + reiser4_plugin *plugin; + reiser4_inode *info; + int i; + __u16 mask; + int result; + int num_of_plugins; + + assert("nikita-653", inode != NULL); + assert("nikita-654", area != NULL); + assert("nikita-655", *area != NULL); + assert("nikita-656", len != NULL); + assert("nikita-657", *len > 0); + + if (*len < (int)sizeof(reiser4_plugin_stat)) + return not_enough_space(inode, "plugin"); + + sd = (reiser4_plugin_stat *) * area; + info = reiser4_inode_data(inode); + + mask = 0; + num_of_plugins = le16_to_cpu(get_unaligned(&sd->plugins_no)); + move_on(len, area, sizeof *sd); + result = 0; + for (i = 0; i < num_of_plugins; ++i) { + reiser4_plugin_slot *slot; + reiser4_plugin_type type; + pset_member memb; + + slot = (reiser4_plugin_slot *) * area; + if (*len < (int)sizeof *slot) + return not_enough_space(inode, "additional plugin"); + + memb = le16_to_cpu(get_unaligned(&slot->pset_memb)); + type = aset_member_to_type_unsafe(memb); + + if (type == REISER4_PLUGIN_TYPES) { + warning("nikita-3502", + "wrong %s member (%i) for %llu", is_pset ? + "pset" : "hset", memb, + (unsigned long long)get_inode_oid(inode)); + return RETERR(-EINVAL); + } + plugin = plugin_by_disk_id(reiser4_tree_by_inode(inode), + type, &slot->id); + if (plugin == NULL) + return unknown_plugin(le16_to_cpu(get_unaligned(&slot->id)), inode); + + /* plugin is loaded into inode, mark this into inode's + bitmask of loaded non-standard plugins */ + if (!(mask & (1 << memb))) { + mask |= (1 << memb); + } else { + warning("nikita-658", "duplicate plugin for %llu", + (unsigned long long)get_inode_oid(inode)); + return RETERR(-EINVAL); + } + move_on(len, area, sizeof *slot); + /* load plugin data, if any */ + if (plugin->h.pops != NULL && plugin->h.pops->load) + result = plugin->h.pops->load(inode, plugin, area, len); + else + result = aset_set_unsafe(is_pset ? &info->pset : + &info->hset, memb, plugin); + if (result) + return result; + } + if (is_pset) { + /* if object plugin wasn't loaded from stat-data, guess it by + mode bits */ + plugin = file_plugin_to_plugin(inode_file_plugin(inode)); + if (plugin == NULL) + result = absent_plugin_sd(inode); + info->plugin_mask = mask; + } else + info->heir_mask = mask; + + return result; +} + +static int present_pset_sd(struct inode *inode, char **area, int *len) { + return present_plugin_sd(inode, area, len, 1 /* pset */); +} + +/* Determine object plugin for @inode based on i_mode. + + Many objects in reiser4 file system are controlled by standard object + plugins that emulate traditional unix objects: unix file, directory, symlink, fifo, and so on. + + For such files we don't explicitly store plugin id in object stat + data. Rather required plugin is guessed from mode bits, where file "type" + is encoded (see stat(2)). +*/ +static int +guess_plugin_by_mode(struct inode *inode /* object to guess plugins for */ ) +{ + int fplug_id; + int dplug_id; + reiser4_inode *info; + + assert("nikita-736", inode != NULL); + + dplug_id = fplug_id = -1; + + switch (inode->i_mode & S_IFMT) { + case S_IFSOCK: + case S_IFBLK: + case S_IFCHR: + case S_IFIFO: + fplug_id = SPECIAL_FILE_PLUGIN_ID; + break; + case S_IFLNK: + fplug_id = SYMLINK_FILE_PLUGIN_ID; + break; + case S_IFDIR: + fplug_id = DIRECTORY_FILE_PLUGIN_ID; + dplug_id = HASHED_DIR_PLUGIN_ID; + break; + default: + warning("nikita-737", "wrong file mode: %o", inode->i_mode); + return RETERR(-EIO); + case S_IFREG: + fplug_id = UNIX_FILE_PLUGIN_ID; + break; + } + info = reiser4_inode_data(inode); + set_plugin(&info->pset, PSET_FILE, (fplug_id >= 0) ? + plugin_by_id(REISER4_FILE_PLUGIN_TYPE, fplug_id) : NULL); + set_plugin(&info->pset, PSET_DIR, (dplug_id >= 0) ? + plugin_by_id(REISER4_DIR_PLUGIN_TYPE, dplug_id) : NULL); + return 0; +} + +/* Audited by: green(2002.06.14) */ +static int absent_plugin_sd(struct inode *inode /* object being processed */ ) +{ + int result; + + assert("nikita-659", inode != NULL); + + result = guess_plugin_by_mode(inode); + /* if mode was wrong, guess_plugin_by_mode() returns "regular file", + but setup_inode_ops() will call make_bad_inode(). + Another, more logical but bit more complex solution is to add + "bad-file plugin". */ + /* FIXME-VS: activate was called here */ + return result; +} + +/* helper function for plugin_sd_save_len(): calculate how much space + required to save state of given plugin */ +/* Audited by: green(2002.06.14) */ +static int len_for(reiser4_plugin * plugin /* plugin to save */ , + struct inode *inode /* object being processed */ , + pset_member memb, + int len, int is_pset) +{ + reiser4_inode *info; + assert("nikita-661", inode != NULL); + + if (plugin == NULL) + return len; + + info = reiser4_inode_data(inode); + if (is_pset ? + info->plugin_mask & (1 << memb) : + info->heir_mask & (1 << memb)) { + len += sizeof(reiser4_plugin_slot); + if (plugin->h.pops && plugin->h.pops->save_len != NULL) { + /* + * non-standard plugin, call method + * commented as it is incompatible with alignment + * policy in save_plug() -edward + * + * len = reiser4_round_up(len, + * plugin->h.pops->alignment); + */ + len += plugin->h.pops->save_len(inode, plugin); + } + } + return len; +} + +/* calculate how much space is required to save state of all plugins, + associated with inode */ +static int save_len_plugin_sd(struct inode *inode /* object being processed */, + int is_pset) +{ + int len; + int last; + reiser4_inode *state; + pset_member memb; + + assert("nikita-663", inode != NULL); + + state = reiser4_inode_data(inode); + + /* common case: no non-standard plugins */ + if (is_pset ? state->plugin_mask == 0 : state->heir_mask == 0) + return 0; + len = sizeof(reiser4_plugin_stat); + last = PSET_LAST; + + for (memb = 0; memb < last; ++memb) { + len = len_for(aset_get(is_pset ? state->pset : state->hset, memb), + inode, memb, len, is_pset); + } + assert("nikita-664", len > (int)sizeof(reiser4_plugin_stat)); + return len; +} + +static int save_len_pset_sd(struct inode *inode) { + return save_len_plugin_sd(inode, 1 /* pset */); +} + +/* helper function for plugin_sd_save(): save plugin, associated with + inode. */ +static int save_plug(reiser4_plugin * plugin /* plugin to save */ , + struct inode *inode /* object being processed */ , + int memb /* what element of pset is saved */ , + char **area /* position in stat-data */ , + int *count /* incremented if plugin were actually saved. */, + int is_pset /* 1 for plugin set, 0 for heir set */) +{ + reiser4_plugin_slot *slot; + int fake_len; + int result; + + assert("nikita-665", inode != NULL); + assert("nikita-666", area != NULL); + assert("nikita-667", *area != NULL); + + if (plugin == NULL) + return 0; + + if (is_pset ? + !(reiser4_inode_data(inode)->plugin_mask & (1 << memb)) : + !(reiser4_inode_data(inode)->heir_mask & (1 << memb))) + return 0; + slot = (reiser4_plugin_slot *) * area; + put_unaligned(cpu_to_le16(memb), &slot->pset_memb); + put_unaligned(cpu_to_le16(plugin->h.id), &slot->id); + fake_len = (int)0xffff; + move_on(&fake_len, area, sizeof *slot); + ++*count; + result = 0; + if (plugin->h.pops != NULL) { + if (plugin->h.pops->save != NULL) + result = plugin->h.pops->save(inode, plugin, area); + } + return result; +} + +/* save state of all non-standard plugins associated with inode */ +static int save_plugin_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */, + int is_pset /* 1 for pset, 0 for hset */) +{ + int fake_len; + int result = 0; + int num_of_plugins; + reiser4_plugin_stat *sd; + reiser4_inode *state; + pset_member memb; + + assert("nikita-669", inode != NULL); + assert("nikita-670", area != NULL); + assert("nikita-671", *area != NULL); + + state = reiser4_inode_data(inode); + if (is_pset ? state->plugin_mask == 0 : state->heir_mask == 0) + return 0; + sd = (reiser4_plugin_stat *) * area; + fake_len = (int)0xffff; + move_on(&fake_len, area, sizeof *sd); + + num_of_plugins = 0; + for (memb = 0; memb < PSET_LAST; ++memb) { + result = save_plug(aset_get(is_pset ? state->pset : state->hset, + memb), + inode, memb, area, &num_of_plugins, is_pset); + if (result != 0) + break; + } + + put_unaligned(cpu_to_le16((__u16)num_of_plugins), &sd->plugins_no); + return result; +} + +static int save_pset_sd(struct inode *inode, char **area) { + return save_plugin_sd(inode, area, 1 /* pset */); +} + +static int present_hset_sd(struct inode *inode, char **area, int *len) { + return present_plugin_sd(inode, area, len, 0 /* hset */); +} + +static int save_len_hset_sd(struct inode *inode) { + return save_len_plugin_sd(inode, 0 /* pset */); +} + +static int save_hset_sd(struct inode *inode, char **area) { + return save_plugin_sd(inode, area, 0 /* hset */); +} + +/* helper function for crypto_sd_present(), crypto_sd_save. + Extract crypto info from stat-data and attach it to inode */ +static int extract_crypto_info (struct inode * inode, + reiser4_crypto_stat * sd) +{ + struct reiser4_crypto_info * info; + assert("edward-11", !inode_crypto_info(inode)); + assert("edward-1413", + !reiser4_inode_get_flag(inode, REISER4_CRYPTO_STAT_LOADED)); + /* create and attach a crypto-stat without secret key loaded */ + info = reiser4_alloc_crypto_info(inode); + if (IS_ERR(info)) + return PTR_ERR(info); + info->keysize = le16_to_cpu(get_unaligned(&sd->keysize)); + memcpy(info->keyid, sd->keyid, inode_digest_plugin(inode)->fipsize); + reiser4_attach_crypto_info(inode, info); + reiser4_inode_set_flag(inode, REISER4_CRYPTO_STAT_LOADED); + return 0; +} + +/* crypto stat-data extension */ + +static int present_crypto_sd(struct inode *inode, char **area, int *len) +{ + int result; + reiser4_crypto_stat *sd; + digest_plugin *dplug = inode_digest_plugin(inode); + + assert("edward-06", dplug != NULL); + assert("edward-684", dplug->fipsize); + assert("edward-07", area != NULL); + assert("edward-08", *area != NULL); + assert("edward-09", len != NULL); + assert("edward-10", *len > 0); + + if (*len < (int)sizeof(reiser4_crypto_stat)) { + return not_enough_space(inode, "crypto-sd"); + } + /* *len is number of bytes in stat data item from *area to the end of + item. It must be not less than size of this extension */ + assert("edward-75", sizeof(*sd) + dplug->fipsize <= *len); + + sd = (reiser4_crypto_stat *) * area; + result = extract_crypto_info(inode, sd); + move_on(len, area, sizeof(*sd) + dplug->fipsize); + + return result; +} + +static int save_len_crypto_sd(struct inode *inode) +{ + return sizeof(reiser4_crypto_stat) + + inode_digest_plugin(inode)->fipsize; +} + +static int save_crypto_sd(struct inode *inode, char **area) +{ + int result = 0; + reiser4_crypto_stat *sd; + struct reiser4_crypto_info * info = inode_crypto_info(inode); + digest_plugin *dplug = inode_digest_plugin(inode); + + assert("edward-12", dplug != NULL); + assert("edward-13", area != NULL); + assert("edward-14", *area != NULL); + assert("edward-15", info != NULL); + assert("edward-1414", info->keyid != NULL); + assert("edward-1415", info->keysize != 0); + assert("edward-76", reiser4_inode_data(inode) != NULL); + + if (!reiser4_inode_get_flag(inode, REISER4_CRYPTO_STAT_LOADED)) { + /* file is just created */ + sd = (reiser4_crypto_stat *) *area; + /* copy everything but private key to the disk stat-data */ + put_unaligned(cpu_to_le16(info->keysize), &sd->keysize); + memcpy(sd->keyid, info->keyid, (size_t) dplug->fipsize); + reiser4_inode_set_flag(inode, REISER4_CRYPTO_STAT_LOADED); + } + *area += (sizeof(*sd) + dplug->fipsize); + return result; +} + +static int eio(struct inode *inode, char **area, int *len) +{ + return RETERR(-EIO); +} + +sd_ext_plugin sd_ext_plugins[LAST_SD_EXTENSION] = { + [LIGHT_WEIGHT_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = LIGHT_WEIGHT_STAT, + .pops = NULL, + .label = "light-weight sd", + .desc = "sd for light-weight files", + .linkage = {NULL,NULL} + }, + .present = present_lw_sd, + .absent = NULL, + .save_len = save_len_lw_sd, + .save = save_lw_sd, + .alignment = 8 + }, + [UNIX_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = UNIX_STAT, + .pops = NULL, + .label = "unix-sd", + .desc = "unix stat-data fields", + .linkage = {NULL,NULL} + }, + .present = present_unix_sd, + .absent = absent_unix_sd, + .save_len = save_len_unix_sd, + .save = save_unix_sd, + .alignment = 8 + }, + [LARGE_TIMES_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = LARGE_TIMES_STAT, + .pops = NULL, + .label = "64time-sd", + .desc = "nanosecond resolution for times", + .linkage = {NULL,NULL} + }, + .present = present_large_times_sd, + .absent = NULL, + .save_len = save_len_large_times_sd, + .save = save_large_times_sd, + .alignment = 8 + }, + [SYMLINK_STAT] = { + /* stat data of symlink has this extension */ + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = SYMLINK_STAT, + .pops = NULL, + .label = "symlink-sd", + .desc = + "stat data is appended with symlink name", + .linkage = {NULL,NULL} + }, + .present = present_symlink_sd, + .absent = NULL, + .save_len = save_len_symlink_sd, + .save = save_symlink_sd, + .alignment = 8 + }, + [PLUGIN_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = PLUGIN_STAT, + .pops = NULL, + .label = "plugin-sd", + .desc = "plugin stat-data fields", + .linkage = {NULL,NULL} + }, + .present = present_pset_sd, + .absent = absent_plugin_sd, + .save_len = save_len_pset_sd, + .save = save_pset_sd, + .alignment = 8 + }, + [HEIR_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = HEIR_STAT, + .pops = NULL, + .label = "heir-plugin-sd", + .desc = "heir plugin stat-data fields", + .linkage = {NULL,NULL} + }, + .present = present_hset_sd, + .absent = NULL, + .save_len = save_len_hset_sd, + .save = save_hset_sd, + .alignment = 8 + }, + [FLAGS_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = FLAGS_STAT, + .pops = NULL, + .label = "flags-sd", + .desc = "inode bit flags", + .linkage = {NULL, NULL} + }, + .present = present_flags_sd, + .absent = NULL, + .save_len = save_len_flags_sd, + .save = save_flags_sd, + .alignment = 8 + }, + [CAPABILITIES_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = CAPABILITIES_STAT, + .pops = NULL, + .label = "capabilities-sd", + .desc = "capabilities", + .linkage = {NULL, NULL} + }, + .present = eio, + .absent = NULL, + .save_len = save_len_flags_sd, + .save = save_flags_sd, + .alignment = 8 + }, + [CRYPTO_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = CRYPTO_STAT, + .pops = NULL, + .label = "crypto-sd", + .desc = "secret key size and id", + .linkage = {NULL, NULL} + }, + .present = present_crypto_sd, + .absent = NULL, + .save_len = save_len_crypto_sd, + .save = save_crypto_sd, + .alignment = 8 + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/static_stat.h b/fs/reiser4/plugin/item/static_stat.h new file mode 100644 index 000000000000..dd20eb3f2d7d --- /dev/null +++ b/fs/reiser4/plugin/item/static_stat.h @@ -0,0 +1,224 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* This describes the static_stat item, used to hold all information needed by the stat() syscall. + +In the case where each file has not less than the fields needed by the +stat() syscall, it is more compact to store those fields in this +struct. + +If this item does not exist, then all stats are dynamically resolved. +At the moment, we either resolve all stats dynamically or all of them +statically. If you think this is not fully optimal, and the rest of +reiser4 is working, then fix it...:-) + +*/ + +#if !defined( __FS_REISER4_PLUGIN_ITEM_STATIC_STAT_H__ ) +#define __FS_REISER4_PLUGIN_ITEM_STATIC_STAT_H__ + +#include "../../forward.h" +#include "../../dformat.h" + +#include /* for struct inode */ + +/* Stat data layout: goals and implementation. + + We want to be able to have lightweight files which have complete flexibility in what semantic metadata is attached to + them, including not having semantic metadata attached to them. + + There is one problem with doing that, which is that if in fact you have exactly the same metadata for most files you + want to store, then it takes more space to store that metadata in a dynamically sized structure than in a statically + sized structure because the statically sized structure knows without recording it what the names and lengths of the + attributes are. + + This leads to a natural compromise, which is to special case those files which have simply the standard unix file + attributes, and only employ the full dynamic stat data mechanism for those files that differ from the standard unix + file in their use of file attributes. + + Yet this compromise deserves to be compromised a little. + + We accommodate the case where you have no more than the standard unix file attributes by using an "extension + bitmask": each bit in it indicates presence or absence of or particular stat data extension (see sd_ext_bits enum). + + If the first bit of the extension bitmask bit is 0, we have light-weight file whose attributes are either inherited + from parent directory (as uid, gid) or initialised to some sane values. + + To capitalize on existing code infrastructure, extensions are + implemented as plugins of type REISER4_SD_EXT_PLUGIN_TYPE. + Each stat-data extension plugin implements four methods: + + ->present() called by sd_load() when this extension is found in stat-data + ->absent() called by sd_load() when this extension is not found in stat-data + ->save_len() called by sd_len() to calculate total length of stat-data + ->save() called by sd_save() to store extension data into stat-data + + Implementation is in fs/reiser4/plugin/item/static_stat.c +*/ + +/* stat-data extension. Please order this by presumed frequency of use */ +typedef enum { + /* support for light-weight files */ + LIGHT_WEIGHT_STAT, + /* data required to implement unix stat(2) call. Layout is in + reiser4_unix_stat. If this is not present, file is light-weight */ + UNIX_STAT, + /* this contains additional set of 32bit [anc]time fields to implement + nanosecond resolution. Layout is in reiser4_large_times_stat. Usage + if this extension is governed by 32bittimes mount option. */ + LARGE_TIMES_STAT, + /* stat data has link name included */ + SYMLINK_STAT, + /* on-disk slots of non-standard plugins for main plugin table + (@reiser4_inode->pset), that is, plugins that cannot be deduced + from file mode bits), for example, aggregation, interpolation etc. */ + PLUGIN_STAT, + /* this extension contains persistent inode flags. These flags are + single bits: immutable, append, only, etc. Layout is in + reiser4_flags_stat. */ + FLAGS_STAT, + /* this extension contains capabilities sets, associated with this + file. Layout is in reiser4_capabilities_stat */ + CAPABILITIES_STAT, + /* this extension contains size and public id of the secret key. + Layout is in reiser4_crypto_stat */ + CRYPTO_STAT, + /* on-disk slots of non-default plugins for inheritance, which + are extracted to special plugin table (@reiser4_inode->hset). + By default, children of the object will inherit plugins from + its main plugin table (pset). */ + HEIR_STAT, + LAST_SD_EXTENSION, + /* + * init_inode_static_sd() iterates over extension mask until all + * non-zero bits are processed. This means, that neither ->present(), + * nor ->absent() methods will be called for stat-data extensions that + * go after last present extension. But some basic extensions, we want + * either ->absent() or ->present() method to be called, because these + * extensions set up something in inode even when they are not + * present. This is what LAST_IMPORTANT_SD_EXTENSION is for: for all + * extensions before and including LAST_IMPORTANT_SD_EXTENSION either + * ->present(), or ->absent() method will be called, independently of + * what other extensions are present. + */ + LAST_IMPORTANT_SD_EXTENSION = PLUGIN_STAT +} sd_ext_bits; + +/* minimal stat-data. This allows to support light-weight files. */ +typedef struct reiser4_stat_data_base { + /* 0 */ __le16 extmask; + /* 2 */ +} PACKED reiser4_stat_data_base; + +typedef struct reiser4_light_weight_stat { + /* 0 */ __le16 mode; + /* 2 */ __le32 nlink; + /* 6 */ __le64 size; + /* size in bytes */ + /* 14 */ +} PACKED reiser4_light_weight_stat; + +typedef struct reiser4_unix_stat { + /* owner id */ + /* 0 */ __le32 uid; + /* group id */ + /* 4 */ __le32 gid; + /* access time */ + /* 8 */ __le32 atime; + /* modification time */ + /* 12 */ __le32 mtime; + /* change time */ + /* 16 */ __le32 ctime; + union { + /* minor:major for device files */ + /* 20 */ __le64 rdev; + /* bytes used by file */ + /* 20 */ __le64 bytes; + } u; + /* 28 */ +} PACKED reiser4_unix_stat; + +/* symlink stored as part of inode */ +typedef struct reiser4_symlink_stat { + char body[0]; +} PACKED reiser4_symlink_stat; + +typedef struct reiser4_plugin_slot { + /* 0 */ __le16 pset_memb; + /* 2 */ __le16 id; + /* 4 *//* here plugin stores its persistent state */ +} PACKED reiser4_plugin_slot; + +/* stat-data extension for files with non-standard plugin. */ +typedef struct reiser4_plugin_stat { + /* number of additional plugins, associated with this object */ + /* 0 */ __le16 plugins_no; + /* 2 */ reiser4_plugin_slot slot[0]; + /* 2 */ +} PACKED reiser4_plugin_stat; + +/* stat-data extension for inode flags. Currently it is just fixed-width 32 + * bit mask. If need arise, this can be replaced with variable width + * bitmask. */ +typedef struct reiser4_flags_stat { + /* 0 */ __le32 flags; + /* 4 */ +} PACKED reiser4_flags_stat; + +typedef struct reiser4_capabilities_stat { + /* 0 */ __le32 effective; + /* 8 */ __le32 permitted; + /* 16 */ +} PACKED reiser4_capabilities_stat; + +typedef struct reiser4_cluster_stat { +/* this defines cluster size (an attribute of cryptcompress objects) as PAGE_SIZE << cluster shift */ + /* 0 */ d8 cluster_shift; + /* 1 */ +} PACKED reiser4_cluster_stat; + +typedef struct reiser4_crypto_stat { + /* secret key size, bits */ + /* 0 */ d16 keysize; + /* secret key id */ + /* 2 */ d8 keyid[0]; + /* 2 */ +} PACKED reiser4_crypto_stat; + +typedef struct reiser4_large_times_stat { + /* access time */ + /* 0 */ d32 atime; + /* modification time */ + /* 4 */ d32 mtime; + /* change time */ + /* 8 */ d32 ctime; + /* 12 */ +} PACKED reiser4_large_times_stat; + +/* this structure is filled by sd_item_stat */ +typedef struct sd_stat { + int dirs; + int files; + int others; +} sd_stat; + +/* plugin->item.common.* */ +extern void print_sd(const char *prefix, coord_t * coord); +extern void item_stat_static_sd(const coord_t * coord, void *vp); + +/* plugin->item.s.sd.* */ +extern int init_inode_static_sd(struct inode *inode, char *sd, int len); +extern int save_len_static_sd(struct inode *inode); +extern int save_static_sd(struct inode *inode, char **area); + +/* __FS_REISER4_PLUGIN_ITEM_STATIC_STAT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/tail.c b/fs/reiser4/plugin/item/tail.c new file mode 100644 index 000000000000..436a67ae1c61 --- /dev/null +++ b/fs/reiser4/plugin/item/tail.c @@ -0,0 +1,810 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../inode.h" +#include "../../page_cache.h" +#include "../../carry.h" +#include "../../vfs_ops.h" + +#include +#include +#include + +/* plugin->u.item.b.max_key_inside */ +reiser4_key *max_key_inside_tail(const coord_t *coord, reiser4_key *key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, get_key_offset(reiser4_max_key())); + return key; +} + +/* plugin->u.item.b.can_contain_key */ +int can_contain_key_tail(const coord_t *coord, const reiser4_key *key, + const reiser4_item_data *data) +{ + reiser4_key item_key; + + if (item_plugin_by_coord(coord) != data->iplug) + return 0; + + item_key_by_coord(coord, &item_key); + if (get_key_locality(key) != get_key_locality(&item_key) || + get_key_objectid(key) != get_key_objectid(&item_key)) + return 0; + + return 1; +} + +/* plugin->u.item.b.mergeable + first item is of tail type */ +/* Audited by: green(2002.06.14) */ +int mergeable_tail(const coord_t *p1, const coord_t *p2) +{ + reiser4_key key1, key2; + + assert("vs-535", plugin_of_group(item_plugin_by_coord(p1), + UNIX_FILE_METADATA_ITEM_TYPE)); + assert("vs-365", item_id_by_coord(p1) == FORMATTING_ID); + + if (item_id_by_coord(p2) != FORMATTING_ID) { + /* second item is of another type */ + return 0; + } + + item_key_by_coord(p1, &key1); + item_key_by_coord(p2, &key2); + if (get_key_locality(&key1) != get_key_locality(&key2) || + get_key_objectid(&key1) != get_key_objectid(&key2) + || get_key_type(&key1) != get_key_type(&key2)) { + /* items of different objects */ + return 0; + } + if (get_key_offset(&key1) + nr_units_tail(p1) != get_key_offset(&key2)) { + /* not adjacent items */ + return 0; + } + return 1; +} + +/* plugin->u.item.b.print + plugin->u.item.b.check */ + +/* plugin->u.item.b.nr_units */ +pos_in_node_t nr_units_tail(const coord_t * coord) +{ + return item_length_by_coord(coord); +} + +/* plugin->u.item.b.lookup */ +lookup_result +lookup_tail(const reiser4_key * key, lookup_bias bias, coord_t * coord) +{ + reiser4_key item_key; + __u64 lookuped, offset; + unsigned nr_units; + + item_key_by_coord(coord, &item_key); + offset = get_key_offset(item_key_by_coord(coord, &item_key)); + nr_units = nr_units_tail(coord); + + /* key we are looking for must be greater than key of item @coord */ + assert("vs-416", keygt(key, &item_key)); + + /* offset we are looking for */ + lookuped = get_key_offset(key); + + if (lookuped >= offset && lookuped < offset + nr_units) { + /* byte we are looking for is in this item */ + coord->unit_pos = lookuped - offset; + coord->between = AT_UNIT; + return CBK_COORD_FOUND; + } + + /* set coord after last unit */ + coord->unit_pos = nr_units - 1; + coord->between = AFTER_UNIT; + return bias == + FIND_MAX_NOT_MORE_THAN ? CBK_COORD_FOUND : CBK_COORD_NOTFOUND; +} + +/* plugin->u.item.b.paste */ +int +paste_tail(coord_t *coord, reiser4_item_data *data, + carry_plugin_info *info UNUSED_ARG) +{ + unsigned old_item_length; + char *item; + + /* length the item had before resizing has been performed */ + old_item_length = item_length_by_coord(coord) - data->length; + + /* tail items never get pasted in the middle */ + assert("vs-363", + (coord->unit_pos == 0 && coord->between == BEFORE_UNIT) || + (coord->unit_pos == old_item_length - 1 && + coord->between == AFTER_UNIT) || + (coord->unit_pos == 0 && old_item_length == 0 + && coord->between == AT_UNIT)); + + item = item_body_by_coord(coord); + if (coord->unit_pos == 0) + /* make space for pasted data when pasting at the beginning of + the item */ + memmove(item + data->length, item, old_item_length); + + if (coord->between == AFTER_UNIT) + coord->unit_pos++; + + if (data->data) { + assert("vs-554", data->user == 0 || data->user == 1); + if (data->user) { + assert("nikita-3035", reiser4_schedulable()); + /* copy from user space */ + if (__copy_from_user(item + coord->unit_pos, + (const char __user *)data->data, + (unsigned)data->length)) + return RETERR(-EFAULT); + } else + /* copy from kernel space */ + memcpy(item + coord->unit_pos, data->data, + (unsigned)data->length); + } else { + memset(item + coord->unit_pos, 0, (unsigned)data->length); + } + return 0; +} + +/* plugin->u.item.b.fast_paste */ + +/* plugin->u.item.b.can_shift + number of units is returned via return value, number of bytes via @size. For + tail items they coincide */ +int +can_shift_tail(unsigned free_space, coord_t * source UNUSED_ARG, + znode * target UNUSED_ARG, shift_direction direction UNUSED_ARG, + unsigned *size, unsigned want) +{ + /* make sure that that we do not want to shift more than we have */ + assert("vs-364", want > 0 + && want <= (unsigned)item_length_by_coord(source)); + + *size = min(want, free_space); + return *size; +} + +/* plugin->u.item.b.copy_units */ +void +copy_units_tail(coord_t * target, coord_t * source, + unsigned from, unsigned count, + shift_direction where_is_free_space, + unsigned free_space UNUSED_ARG) +{ + /* make sure that item @target is expanded already */ + assert("vs-366", (unsigned)item_length_by_coord(target) >= count); + assert("vs-370", free_space >= count); + + if (where_is_free_space == SHIFT_LEFT) { + /* append item @target with @count first bytes of @source */ + assert("vs-365", from == 0); + + memcpy((char *)item_body_by_coord(target) + + item_length_by_coord(target) - count, + (char *)item_body_by_coord(source), count); + } else { + /* target item is moved to right already */ + reiser4_key key; + + assert("vs-367", + (unsigned)item_length_by_coord(source) == from + count); + + memcpy((char *)item_body_by_coord(target), + (char *)item_body_by_coord(source) + from, count); + + /* new units are inserted before first unit in an item, + therefore, we have to update item key */ + item_key_by_coord(source, &key); + set_key_offset(&key, get_key_offset(&key) + from); + + node_plugin_by_node(target->node)->update_item_key(target, &key, + NULL /*info */); + } +} + +/* plugin->u.item.b.create_hook */ + +/* item_plugin->b.kill_hook + this is called when @count units starting from @from-th one are going to be removed + */ +int +kill_hook_tail(const coord_t * coord, pos_in_node_t from, + pos_in_node_t count, struct carry_kill_data *kdata) +{ + reiser4_key key; + loff_t start, end; + + assert("vs-1577", kdata); + assert("vs-1579", kdata->inode); + + item_key_by_coord(coord, &key); + start = get_key_offset(&key) + from; + end = start + count; + fake_kill_hook_tail(kdata->inode, start, end, kdata->params.truncate); + return 0; +} + +/* plugin->u.item.b.shift_hook */ + +/* helper for kill_units_tail and cut_units_tail */ +static int +do_cut_or_kill(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + pos_in_node_t count; + + /* this method is only called to remove part of item */ + assert("vs-374", (to - from + 1) < item_length_by_coord(coord)); + /* tails items are never cut from the middle of an item */ + assert("vs-396", ergo(from != 0, to == coord_last_unit_pos(coord))); + assert("vs-1558", ergo(from == 0, to < coord_last_unit_pos(coord))); + + count = to - from + 1; + + if (smallest_removed) { + /* store smallest key removed */ + item_key_by_coord(coord, smallest_removed); + set_key_offset(smallest_removed, + get_key_offset(smallest_removed) + from); + } + if (new_first) { + /* head of item is cut */ + assert("vs-1529", from == 0); + + item_key_by_coord(coord, new_first); + set_key_offset(new_first, + get_key_offset(new_first) + from + count); + } + + if (REISER4_DEBUG) + memset((char *)item_body_by_coord(coord) + from, 0, count); + return count; +} + +/* plugin->u.item.b.cut_units */ +int +cut_units_tail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *cdata UNUSED_ARG, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + return do_cut_or_kill(coord, from, to, smallest_removed, new_first); +} + +/* plugin->u.item.b.kill_units */ +int +kill_units_tail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *kdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + kill_hook_tail(coord, from, to - from + 1, kdata); + return do_cut_or_kill(coord, from, to, smallest_removed, new_first); +} + +/* plugin->u.item.b.unit_key */ +reiser4_key *unit_key_tail(const coord_t * coord, reiser4_key * key) +{ + assert("vs-375", coord_is_existing_unit(coord)); + + item_key_by_coord(coord, key); + set_key_offset(key, (get_key_offset(key) + coord->unit_pos)); + + return key; +} + +/* plugin->u.item.b.estimate + plugin->u.item.b.item_data_by_flow */ + +/* tail redpage function. It is called from readpage_tail(). */ +static int do_readpage_tail(uf_coord_t *uf_coord, struct page *page) +{ + tap_t tap; + int result; + coord_t coord; + lock_handle lh; + int count, mapped; + struct inode *inode; + char *pagedata; + + /* saving passed coord in order to do not move it by tap. */ + init_lh(&lh); + copy_lh(&lh, uf_coord->lh); + inode = page->mapping->host; + coord_dup(&coord, &uf_coord->coord); + + reiser4_tap_init(&tap, &coord, &lh, ZNODE_READ_LOCK); + + if ((result = reiser4_tap_load(&tap))) + goto out_tap_done; + + /* lookup until page is filled up. */ + for (mapped = 0; mapped < PAGE_SIZE; ) { + /* number of bytes to be copied to page */ + count = item_length_by_coord(&coord) - coord.unit_pos; + if (count > PAGE_SIZE - mapped) + count = PAGE_SIZE - mapped; + + /* attach @page to address space and get data address */ + pagedata = kmap_atomic(page); + + /* copy tail item to page */ + memcpy(pagedata + mapped, + ((char *)item_body_by_coord(&coord) + coord.unit_pos), + count); + mapped += count; + + flush_dcache_page(page); + + /* dettach page from address space */ + kunmap_atomic(pagedata); + + /* Getting next tail item. */ + if (mapped < PAGE_SIZE) { + /* + * unlock page in order to avoid keep it locked + * during tree lookup, which takes long term locks + */ + unlock_page(page); + + /* getting right neighbour. */ + result = go_dir_el(&tap, RIGHT_SIDE, 0); + + /* lock page back */ + lock_page(page); + if (PageUptodate(page)) { + /* + * another thread read the page, we have + * nothing to do + */ + result = 0; + goto out_unlock_page; + } + + if (result) { + if (result == -E_NO_NEIGHBOR) { + /* + * rigth neighbor is not a formatted + * node + */ + result = 0; + goto done; + } else { + goto out_tap_relse; + } + } else { + if (!inode_file_plugin(inode)-> + owns_item(inode, &coord)) { + /* item of another file is found */ + result = 0; + goto done; + } + } + } + } + + done: + if (mapped != PAGE_SIZE) + zero_user_segment(page, mapped, PAGE_SIZE); + SetPageUptodate(page); + out_unlock_page: + unlock_page(page); + out_tap_relse: + reiser4_tap_relse(&tap); + out_tap_done: + reiser4_tap_done(&tap); + return result; +} + +/* + * plugin->s.file.readpage + * + * reiser4_read_dispatch->read_unix_file->page_cache_readahead-> + * ->reiser4_readpage_dispatch->readpage_unix_file->readpage_tail + * or + * filemap_fault->reiser4_readpage_dispatch->readpage_unix_file->readpage_tail + * + * At the beginning: coord->node is read locked, zloaded, page is locked, + * coord is set to existing unit inside of tail item. + */ +int readpage_tail(void *vp, struct page *page) +{ + uf_coord_t *uf_coord = vp; + ON_DEBUG(coord_t * coord = &uf_coord->coord); + ON_DEBUG(reiser4_key key); + + assert("umka-2515", PageLocked(page)); + assert("umka-2516", !PageUptodate(page)); + assert("umka-2517", !jprivate(page) && !PagePrivate(page)); + assert("umka-2518", page->mapping && page->mapping->host); + + assert("umka-2519", znode_is_loaded(coord->node)); + assert("umka-2520", item_is_tail(coord)); + assert("umka-2521", coord_is_existing_unit(coord)); + assert("umka-2522", znode_is_rlocked(coord->node)); + assert("umka-2523", + page->mapping->host->i_ino == + get_key_objectid(item_key_by_coord(coord, &key))); + + return do_readpage_tail(uf_coord, page); +} + +/** + * overwrite_tail + * @flow: + * @coord: + * + * Overwrites tail item or its part by user data. Returns number of bytes + * written or error code. + */ +static int overwrite_tail(flow_t *flow, coord_t *coord) +{ + unsigned count; + + assert("vs-570", flow->user == 1); + assert("vs-946", flow->data); + assert("vs-947", coord_is_existing_unit(coord)); + assert("vs-948", znode_is_write_locked(coord->node)); + assert("nikita-3036", reiser4_schedulable()); + + count = item_length_by_coord(coord) - coord->unit_pos; + if (count > flow->length) + count = flow->length; + + if (__copy_from_user((char *)item_body_by_coord(coord) + coord->unit_pos, + (const char __user *)flow->data, count)) + return RETERR(-EFAULT); + + znode_make_dirty(coord->node); + return count; +} + +/** + * insert_first_tail + * @inode: + * @flow: + * @coord: + * @lh: + * + * Returns number of bytes written or error code. + */ +static ssize_t insert_first_tail(struct inode *inode, flow_t *flow, + coord_t *coord, lock_handle *lh) +{ + int result; + loff_t to_write; + struct unix_file_info *uf_info; + + if (get_key_offset(&flow->key) != 0) { + /* + * file is empty and we have to write not to the beginning of + * file. Create a hole at the beginning of file. On success + * insert_flow returns 0 as number of written bytes which is + * what we have to return on padding a file with holes + */ + flow->data = NULL; + flow->length = get_key_offset(&flow->key); + set_key_offset(&flow->key, 0); + /* + * holes in files built of tails are stored just like if there + * were real data which are all zeros. + */ + inode_add_bytes(inode, flow->length); + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + + uf_info = unix_file_inode_data(inode); + + /* + * first item insertion is only possible when writing to empty + * file or performing tail conversion + */ + assert("", (uf_info->container == UF_CONTAINER_EMPTY || + (reiser4_inode_get_flag(inode, + REISER4_PART_MIXED) && + reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)))); + /* if file was empty - update its state */ + if (result == 0 && uf_info->container == UF_CONTAINER_EMPTY) + uf_info->container = UF_CONTAINER_TAILS; + return result; + } + + inode_add_bytes(inode, flow->length); + + to_write = flow->length; + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + return (to_write - flow->length) ? (to_write - flow->length) : result; +} + +/** + * append_tail + * @inode: + * @flow: + * @coord: + * @lh: + * + * Returns number of bytes written or error code. + */ +static ssize_t append_tail(struct inode *inode, + flow_t *flow, coord_t *coord, lock_handle *lh) +{ + int result; + reiser4_key append_key; + loff_t to_write; + + if (!keyeq(&flow->key, append_key_tail(coord, &append_key))) { + flow->data = NULL; + flow->length = get_key_offset(&flow->key) - get_key_offset(&append_key); + set_key_offset(&flow->key, get_key_offset(&append_key)); + /* + * holes in files built of tails are stored just like if there + * were real data which are all zeros. + */ + inode_add_bytes(inode, flow->length); + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + return result; + } + + inode_add_bytes(inode, flow->length); + + to_write = flow->length; + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + return (to_write - flow->length) ? (to_write - flow->length) : result; +} + +/** + * write_tail_reserve_space - reserve space for tail write operation + * @inode: + * + * Estimates and reserves space which may be required for writing one flow to a + * file + */ +static int write_extent_reserve_space(struct inode *inode) +{ + __u64 count; + reiser4_tree *tree; + + /* + * to write one flow to a file by tails we have to reserve disk space for: + + * 1. find_file_item may have to insert empty node to the tree (empty + * leaf node between two extent items). This requires 1 block and + * number of blocks which are necessary to perform insertion of an + * internal item into twig level. + * + * 2. flow insertion + * + * 3. stat data update + */ + tree = reiser4_tree_by_inode(inode); + count = estimate_one_insert_item(tree) + + estimate_insert_flow(tree->height) + + estimate_one_insert_item(tree); + grab_space_enable(); + return reiser4_grab_space(count, 0 /* flags */); +} + +#define PAGE_PER_FLOW 4 + +static loff_t faultin_user_pages(const char __user *buf, size_t count) +{ + loff_t faulted; + int to_fault; + + if (count > PAGE_PER_FLOW * PAGE_SIZE) + count = PAGE_PER_FLOW * PAGE_SIZE; + faulted = 0; + while (count > 0) { + to_fault = PAGE_SIZE; + if (count < to_fault) + to_fault = count; + fault_in_pages_readable(buf + faulted, to_fault); + count -= to_fault; + faulted += to_fault; + } + return faulted; +} + +ssize_t reiser4_write_tail_noreserve(struct file *file, + struct inode * inode, + const char __user *buf, + size_t count, loff_t *pos) +{ + struct hint hint; + int result; + flow_t flow; + coord_t *coord; + lock_handle *lh; + znode *loaded; + + assert("edward-1548", inode != NULL); + + result = load_file_hint(file, &hint); + BUG_ON(result != 0); + + flow.length = faultin_user_pages(buf, count); + flow.user = 1; + memcpy(&flow.data, &buf, sizeof(buf)); + flow.op = WRITE_OP; + key_by_inode_and_offset_common(inode, *pos, &flow.key); + + result = find_file_item(&hint, &flow.key, ZNODE_WRITE_LOCK, inode); + if (IS_CBKERR(result)) + return result; + + coord = &hint.ext_coord.coord; + lh = hint.ext_coord.lh; + + result = zload(coord->node); + BUG_ON(result != 0); + loaded = coord->node; + + if (coord->between == AFTER_UNIT) { + /* append with data or hole */ + result = append_tail(inode, &flow, coord, lh); + } else if (coord->between == AT_UNIT) { + /* overwrite */ + result = overwrite_tail(&flow, coord); + } else { + /* no items of this file yet. insert data or hole */ + result = insert_first_tail(inode, &flow, coord, lh); + } + zrelse(loaded); + if (result < 0) { + done_lh(lh); + return result; + } + + /* seal and unlock znode */ + hint.ext_coord.valid = 0; + if (hint.ext_coord.valid) + reiser4_set_hint(&hint, &flow.key, ZNODE_WRITE_LOCK); + else + reiser4_unset_hint(&hint); + + save_file_hint(file, &hint); + return result; +} + +/** + * reiser4_write_tail - write method of tail item plugin + * @file: file to write to + * @buf: address of user-space buffer + * @count: number of bytes to write + * @pos: position in file to write to + * + * Returns number of written bytes or error code. + */ +ssize_t reiser4_write_tail(struct file *file, + struct inode * inode, + const char __user *buf, + size_t count, loff_t *pos) +{ + if (write_extent_reserve_space(inode)) + return RETERR(-ENOSPC); + return reiser4_write_tail_noreserve(file, inode, buf, count, pos); +} + +#if REISER4_DEBUG + +static int +coord_matches_key_tail(const coord_t * coord, const reiser4_key * key) +{ + reiser4_key item_key; + + assert("vs-1356", coord_is_existing_unit(coord)); + assert("vs-1354", keylt(key, append_key_tail(coord, &item_key))); + assert("vs-1355", keyge(key, item_key_by_coord(coord, &item_key))); + return get_key_offset(key) == + get_key_offset(&item_key) + coord->unit_pos; + +} + +#endif + +/* plugin->u.item.s.file.read */ +int reiser4_read_tail(struct file *file UNUSED_ARG, flow_t *f, hint_t *hint) +{ + unsigned count; + int item_length; + coord_t *coord; + uf_coord_t *uf_coord; + + uf_coord = &hint->ext_coord; + coord = &uf_coord->coord; + + assert("vs-571", f->user == 1); + assert("vs-571", f->data); + assert("vs-967", coord && coord->node); + assert("vs-1117", znode_is_rlocked(coord->node)); + assert("vs-1118", znode_is_loaded(coord->node)); + + assert("nikita-3037", reiser4_schedulable()); + assert("vs-1357", coord_matches_key_tail(coord, &f->key)); + + /* calculate number of bytes to read off the item */ + item_length = item_length_by_coord(coord); + count = item_length_by_coord(coord) - coord->unit_pos; + if (count > f->length) + count = f->length; + + /* user page has to be brought in so that major page fault does not + * occur here when longtem lock is held */ + if (__copy_to_user((char __user *)f->data, + ((char *)item_body_by_coord(coord) + coord->unit_pos), + count)) + return RETERR(-EFAULT); + + /* probably mark_page_accessed() should only be called if + * coord->unit_pos is zero. */ + mark_page_accessed(znode_page(coord->node)); + move_flow_forward(f, count); + + coord->unit_pos += count; + if (item_length == coord->unit_pos) { + coord->unit_pos--; + coord->between = AFTER_UNIT; + } + reiser4_set_hint(hint, &f->key, ZNODE_READ_LOCK); + return 0; +} + +/* + plugin->u.item.s.file.append_key + key of first byte which is the next to last byte by addressed by this item +*/ +reiser4_key *append_key_tail(const coord_t * coord, reiser4_key * key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, get_key_offset(key) + item_length_by_coord(coord)); + return key; +} + +/* plugin->u.item.s.file.init_coord_extension */ +void init_coord_extension_tail(uf_coord_t * uf_coord, loff_t lookuped) +{ + uf_coord->valid = 1; +} + +/* + plugin->u.item.s.file.get_block +*/ +int +get_block_address_tail(const coord_t * coord, sector_t lblock, sector_t * block) +{ + assert("nikita-3252", znode_get_level(coord->node) == LEAF_LEVEL); + + if (reiser4_blocknr_is_fake(znode_get_block(coord->node))) + /* if node has'nt obtainet its block number yet, return 0. + * Lets avoid upsetting users with some cosmic numbers beyond + * the device capacity.*/ + *block = 0; + else + *block = *znode_get_block(coord->node); + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/tail.h b/fs/reiser4/plugin/item/tail.h new file mode 100644 index 000000000000..d0eacbd27126 --- /dev/null +++ b/fs/reiser4/plugin/item/tail.h @@ -0,0 +1,59 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined( __REISER4_TAIL_H__ ) +#define __REISER4_TAIL_H__ + +struct tail_coord_extension { + int not_used; +}; + +struct cut_list; + +/* plugin->u.item.b.* */ +reiser4_key *max_key_inside_tail(const coord_t *, reiser4_key *); +int can_contain_key_tail(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data *); +int mergeable_tail(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_tail(const coord_t *); +lookup_result lookup_tail(const reiser4_key *, lookup_bias, coord_t *); +int paste_tail(coord_t *, reiser4_item_data *, carry_plugin_info *); +int can_shift_tail(unsigned free_space, coord_t * source, + znode * target, shift_direction, unsigned *size, + unsigned want); +void copy_units_tail(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction, unsigned free_space); +int kill_hook_tail(const coord_t *, pos_in_node_t from, pos_in_node_t count, + struct carry_kill_data *); +int cut_units_tail(coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_tail(coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +reiser4_key *unit_key_tail(const coord_t *, reiser4_key *); + +/* plugin->u.item.s.* */ +ssize_t reiser4_write_tail_noreserve(struct file *file, struct inode * inode, + const char __user *buf, size_t count, + loff_t *pos); +ssize_t reiser4_write_tail(struct file *file, struct inode * inode, + const char __user *buf, size_t count, loff_t *pos); +int reiser4_read_tail(struct file *, flow_t *, hint_t *); +int readpage_tail(void *vp, struct page *page); +reiser4_key *append_key_tail(const coord_t *, reiser4_key *); +void init_coord_extension_tail(uf_coord_t *, loff_t offset); +int get_block_address_tail(const coord_t *, sector_t, sector_t *); + +/* __REISER4_TAIL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/Makefile b/fs/reiser4/plugin/node/Makefile new file mode 100644 index 000000000000..36e87ff49210 --- /dev/null +++ b/fs/reiser4/plugin/node/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_REISER4_FS) += node_plugins.o + +node_plugins-objs := \ + node.o \ + node40.o \ + node41.o diff --git a/fs/reiser4/plugin/node/node.c b/fs/reiser4/plugin/node/node.c new file mode 100644 index 000000000000..aca83732bcd5 --- /dev/null +++ b/fs/reiser4/plugin/node/node.c @@ -0,0 +1,170 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Node plugin interface. + + Description: The tree provides the abstraction of flows, which it + internally fragments into items which it stores in nodes. + + A key_atom is a piece of data bound to a single key. + + For reasonable space efficiency to be achieved it is often + necessary to store key_atoms in the nodes in the form of items, where + an item is a sequence of key_atoms of the same or similar type. It is + more space-efficient, because the item can implement (very) + efficient compression of key_atom's bodies using internal knowledge + about their semantics, and it can often avoid having a key for each + key_atom. Each type of item has specific operations implemented by its + item handler (see balance.c). + + Rationale: the rest of the code (specifically balancing routines) + accesses leaf level nodes through this interface. This way we can + implement various block layouts and even combine various layouts + within the same tree. Balancing/allocating algorithms should not + care about peculiarities of splitting/merging specific item types, + but rather should leave that to the item's item handler. + + Items, including those that provide the abstraction of flows, have + the property that if you move them in part or in whole to another + node, the balancing code invokes their is_left_mergeable() + item_operation to determine if they are mergeable with their new + neighbor in the node you have moved them to. For some items the + is_left_mergeable() function always returns null. + + When moving the bodies of items from one node to another: + + if a partial item is shifted to another node the balancing code invokes + an item handler method to handle the item splitting. + + if the balancing code needs to merge with an item in the node it + is shifting to, it will invoke an item handler method to handle + the item merging. + + if it needs to move whole item bodies unchanged, the balancing code uses xmemcpy() + adjusting the item headers after the move is done using the node handler. +*/ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "../item/item.h" +#include "node.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../reiser4.h" + +/** + * leftmost_key_in_node - get the smallest key in node + * @node: + * @key: store result here + * + * Stores the leftmost key of @node in @key. + */ +reiser4_key *leftmost_key_in_node(const znode *node, reiser4_key *key) +{ + assert("nikita-1634", node != NULL); + assert("nikita-1635", key != NULL); + + if (!node_is_empty(node)) { + coord_t first_item; + + coord_init_first_unit(&first_item, (znode *) node); + item_key_by_coord(&first_item, key); + } else + *key = *reiser4_max_key(); + return key; +} + +node_plugin node_plugins[LAST_NODE_ID] = { + [NODE40_ID] = { + .h = { + .type_id = REISER4_NODE_PLUGIN_TYPE, + .id = NODE40_ID, + .pops = NULL, + .label = "unified", + .desc = "unified node layout", + .linkage = {NULL, NULL} + }, + .item_overhead = item_overhead_node40, + .free_space = free_space_node40, + .lookup = lookup_node40, + .num_of_items = num_of_items_node40, + .item_by_coord = item_by_coord_node40, + .length_by_coord = length_by_coord_node40, + .plugin_by_coord = plugin_by_coord_node40, + .key_at = key_at_node40, + .estimate = estimate_node40, + .check = check_node40, + .parse = parse_node40, + .init = init_node40, +#ifdef GUESS_EXISTS + .guess = guess_node40, +#endif + .change_item_size = change_item_size_node40, + .create_item = create_item_node40, + .update_item_key = update_item_key_node40, + .cut_and_kill = kill_node40, + .cut = cut_node40, + .shift = shift_node40, + .shrink_item = shrink_item_node40, + .fast_insert = fast_insert_node40, + .fast_paste = fast_paste_node40, + .fast_cut = fast_cut_node40, + .max_item_size = max_item_size_node40, + .prepare_removal = prepare_removal_node40, + .set_item_plugin = set_item_plugin_node40 + }, + [NODE41_ID] = { + .h = { + .type_id = REISER4_NODE_PLUGIN_TYPE, + .id = NODE41_ID, + .pops = NULL, + .label = "node41", + .desc = "node41 layout", + .linkage = {NULL, NULL} + }, + .item_overhead = item_overhead_node40, + .free_space = free_space_node40, + .lookup = lookup_node40, + .num_of_items = num_of_items_node40, + .item_by_coord = item_by_coord_node40, + .length_by_coord = length_by_coord_node40, + .plugin_by_coord = plugin_by_coord_node40, + .key_at = key_at_node40, + .estimate = estimate_node40, + .check = NULL, + .parse = parse_node41, + .init = init_node41, +#ifdef GUESS_EXISTS + .guess = guess_node41, +#endif + .change_item_size = change_item_size_node40, + .create_item = create_item_node40, + .update_item_key = update_item_key_node40, + .cut_and_kill = kill_node40, + .cut = cut_node40, + .shift = shift_node41, + .shrink_item = shrink_item_node40, + .fast_insert = fast_insert_node40, + .fast_paste = fast_paste_node40, + .fast_cut = fast_cut_node40, + .max_item_size = max_item_size_node41, + .prepare_removal = prepare_removal_node40, + .set_item_plugin = set_item_plugin_node40, + .csum = csum_node41 + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node.h b/fs/reiser4/plugin/node/node.h new file mode 100644 index 000000000000..a4cda2c8ab61 --- /dev/null +++ b/fs/reiser4/plugin/node/node.h @@ -0,0 +1,275 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* We need a definition of the default node layout here. */ + +/* Generally speaking, it is best to have free space in the middle of the + node so that two sets of things can grow towards it, and to have the + item bodies on the left so that the last one of them grows into free + space. We optimize for the case where we append new items to the end + of the node, or grow the last item, because it hurts nothing to so + optimize and it is a common special case to do massive insertions in + increasing key order (and one of cases more likely to have a real user + notice the delay time for). + + formatted leaf default layout: (leaf1) + + |node header:item bodies:free space:key + pluginid + item offset| + + We grow towards the middle, optimizing layout for the case where we + append new items to the end of the node. The node header is fixed + length. Keys, and item offsets plus pluginids for the items + corresponding to them are in increasing key order, and are fixed + length. Item offsets are relative to start of node (16 bits creating + a node size limit of 64k, 12 bits might be a better choice....). Item + bodies are in decreasing key order. Item bodies have a variable size. + There is a one to one to one mapping of keys to item offsets to item + bodies. Item offsets consist of pointers to the zeroth byte of the + item body. Item length equals the start of the next item minus the + start of this item, except the zeroth item whose length equals the end + of the node minus the start of that item (plus a byte). In other + words, the item length is not recorded anywhere, and it does not need + to be since it is computable. + + Leaf variable length items and keys layout : (lvar) + + |node header:key offset + item offset + pluginid triplets:free space:key bodies:item bodies| + + We grow towards the middle, optimizing layout for the case where we + append new items to the end of the node. The node header is fixed + length. Keys and item offsets for the items corresponding to them are + in increasing key order, and keys are variable length. Item offsets + are relative to start of node (16 bits). Item bodies are in + decreasing key order. Item bodies have a variable size. There is a + one to one to one mapping of keys to item offsets to item bodies. + Item offsets consist of pointers to the zeroth byte of the item body. + Item length equals the start of the next item's key minus the start of + this item, except the zeroth item whose length equals the end of the + node minus the start of that item (plus a byte). + + leaf compressed keys layout: (lcomp) + + |node header:key offset + key inherit + item offset pairs:free space:key bodies:item bodies| + + We grow towards the middle, optimizing layout for the case where we + append new items to the end of the node. The node header is fixed + length. Keys and item offsets for the items corresponding to them are + in increasing key order, and keys are variable length. The "key + inherit" field indicates how much of the key prefix is identical to + the previous key (stem compression as described in "Managing + Gigabytes" is used). key_inherit is a one byte integer. The + intra-node searches performed through this layout are linear searches, + and this is theorized to not hurt performance much due to the high + cost of processor stalls on modern CPUs, and the small number of keys + in a single node. Item offsets are relative to start of node (16 + bits). Item bodies are in decreasing key order. Item bodies have a + variable size. There is a one to one to one mapping of keys to item + offsets to item bodies. Item offsets consist of pointers to the + zeroth byte of the item body. Item length equals the start of the + next item minus the start of this item, except the zeroth item whose + length equals the end of the node minus the start of that item (plus a + byte). In other words, item length and key length is not recorded + anywhere, and it does not need to be since it is computable. + + internal node default layout: (idef1) + + just like ldef1 except that item bodies are either blocknrs of + children or extents, and moving them may require updating parent + pointers in the nodes that they point to. +*/ + +/* There is an inherent 3-way tradeoff between optimizing and + exchanging disks between different architectures and code + complexity. This is optimal and simple and inexchangeable. + Someone else can do the code for exchanging disks and make it + complex. It would not be that hard. Using other than the PAGE_SIZE + might be suboptimal. +*/ + +#if !defined( __REISER4_NODE_H__ ) +#define __REISER4_NODE_H__ + +#define LEAF40_NODE_SIZE PAGE_CACHE_SIZE + +#include "../../dformat.h" +#include "../plugin_header.h" + +#include + +typedef enum { + NS_FOUND = 0, + NS_NOT_FOUND = -ENOENT +} node_search_result; + +/* Maximal possible space overhead for creation of new item in a node */ +#define REISER4_NODE_MAX_OVERHEAD ( sizeof( reiser4_key ) + 32 ) + +typedef enum { + REISER4_NODE_DKEYS = (1 << 0), + REISER4_NODE_TREE_STABLE = (1 << 1) +} reiser4_node_check_flag; + +/* cut and cut_and_kill have too long list of parameters. This structure is just to safe some space on stack */ +struct cut_list { + coord_t *from; + coord_t *to; + const reiser4_key *from_key; + const reiser4_key *to_key; + reiser4_key *smallest_removed; + carry_plugin_info *info; + __u32 flags; + struct inode *inode; /* this is to pass list of eflushed jnodes down to extent_kill_hook */ + lock_handle *left; + lock_handle *right; +}; + +struct carry_cut_data; +struct carry_kill_data; + +/* The responsibility of the node plugin is to store and give access + to the sequence of items within the node. */ +typedef struct node_plugin { + /* generic plugin fields */ + plugin_header h; + + /* calculates the amount of space that will be required to store an + item which is in addition to the space consumed by the item body. + (the space consumed by the item body can be gotten by calling + item->estimate) */ + size_t(*item_overhead) (const znode * node, flow_t * f); + + /* returns free space by looking into node (i.e., without using + znode->free_space). */ + size_t(*free_space) (znode * node); + /* search within the node for the one item which might + contain the key, invoking item->search_within to search within + that item to see if it is in there */ + node_search_result(*lookup) (znode * node, const reiser4_key * key, + lookup_bias bias, coord_t * coord); + /* number of items in node */ + int (*num_of_items) (const znode * node); + + /* store information about item in @coord in @data */ + /* break into several node ops, don't add any more uses of this before doing so */ + /*int ( *item_at )( const coord_t *coord, reiser4_item_data *data ); */ + char *(*item_by_coord) (const coord_t * coord); + int (*length_by_coord) (const coord_t * coord); + item_plugin *(*plugin_by_coord) (const coord_t * coord); + + /* store item key in @key */ + reiser4_key *(*key_at) (const coord_t * coord, reiser4_key * key); + /* conservatively estimate whether unit of what size can fit + into node. This estimation should be performed without + actually looking into the node's content (free space is saved in + znode). */ + size_t(*estimate) (znode * node); + + /* performs every consistency check the node plugin author could + imagine. Optional. */ + int (*check) (const znode * node, __u32 flags, const char **error); + + /* Called when node is read into memory and node plugin is + already detected. This should read some data into znode (like free + space counter) and, optionally, check data consistency. + */ + int (*parse) (znode * node); + /* This method is called on a new node to initialise plugin specific + data (header, etc.) */ + int (*init) (znode * node); + /* Check whether @node content conforms to this plugin format. + Probably only useful after support for old V3.x formats is added. + Uncomment after 4.0 only. + */ + /* int ( *guess )( const znode *node ); */ +#if REISER4_DEBUG + void (*print) (const char *prefix, const znode * node, __u32 flags); +#endif + /* change size of @item by @by bytes. @item->node has enough free + space. When @by > 0 - free space is appended to end of item. When + @by < 0 - item is truncated - it is assumed that last @by bytes if + the item are freed already */ + void (*change_item_size) (coord_t * item, int by); + + /* create new item @length bytes long in coord @target */ + int (*create_item) (coord_t * target, const reiser4_key * key, + reiser4_item_data * data, carry_plugin_info * info); + + /* update key of item. */ + void (*update_item_key) (coord_t * target, const reiser4_key * key, + carry_plugin_info * info); + + int (*cut_and_kill) (struct carry_kill_data *, carry_plugin_info *); + int (*cut) (struct carry_cut_data *, carry_plugin_info *); + + /* + * shrink item pointed to by @coord by @delta bytes. + */ + int (*shrink_item) (coord_t * coord, int delta); + + /* copy as much as possible but not more than up to @stop from + @stop->node to @target. If (pend == append) then data from beginning of + @stop->node are copied to the end of @target. If (pend == prepend) then + data from the end of @stop->node are copied to the beginning of + @target. Copied data are removed from @stop->node. Information + about what to do on upper level is stored in @todo */ + int (*shift) (coord_t * stop, znode * target, shift_direction pend, + int delete_node, int including_insert_coord, + carry_plugin_info * info); + /* return true if this node allows skip carry() in some situations + (see fs/reiser4/tree.c:insert_by_coord()). Reiser3.x format + emulation doesn't. + + This will speedup insertions that doesn't require updates to the + parent, by bypassing initialisation of carry() structures. It's + believed that majority of insertions will fit there. + + */ + int (*fast_insert) (const coord_t * coord); + int (*fast_paste) (const coord_t * coord); + int (*fast_cut) (const coord_t * coord); + /* this limits max size of item which can be inserted into a node and + number of bytes item in a node may be appended with */ + int (*max_item_size) (void); + int (*prepare_removal) (znode * empty, carry_plugin_info * info); + /* change plugin id of items which are in a node already. Currently it is Used in tail conversion for regular + * files */ + int (*set_item_plugin) (coord_t * coord, item_id); + /* calculate and check/update znode's checksum + (if @check is true, then check, otherwise update) */ + int (*csum)(znode *node, int check); +} node_plugin; + +typedef enum { + NODE40_ID, /* standard unified node layout used for both, + leaf and internal nodes */ + NODE41_ID, /* node layout with a checksum */ + LAST_NODE_ID +} reiser4_node_id; + +extern reiser4_key *leftmost_key_in_node(const znode * node, reiser4_key * key); +#if REISER4_DEBUG +extern void print_node_content(const char *prefix, const znode * node, + __u32 flags); +#endif + +extern void indent_znode(const znode * node); + +typedef struct common_node_header { + /* + * identifier of node plugin. Must be located at the very beginning of + * a node. + */ + __le16 plugin_id; +} common_node_header; + +/* __REISER4_NODE_H__ */ +#endif +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/node/node40.c b/fs/reiser4/plugin/node/node40.c new file mode 100644 index 000000000000..47c83091b687 --- /dev/null +++ b/fs/reiser4/plugin/node/node40.c @@ -0,0 +1,3073 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "../item/item.h" +#include "node.h" +#include "node40.h" +#include "../plugin.h" +#include "../../jnode.h" +#include "../../znode.h" +#include "../../pool.h" +#include "../../carry.h" +#include "../../tap.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../reiser4.h" + +#include +#include +#include + +/* leaf 40 format: + + [node header | item 0, item 1, .., item N-1 | free space | item_head N-1, .. item_head 1, item head 0 ] + plugin_id (16) key + free_space (16) pluginid (16) + free_space_start (16) offset (16) + level (8) + num_items (16) + magic (32) + flush_time (32) +*/ +/* NIKITA-FIXME-HANS: I told you guys not less than 10 times to not call it r4fs. Change to "ReIs". */ +/* magic number that is stored in ->magic field of node header */ +static const __u32 REISER4_NODE40_MAGIC = 0x52344653; /* (*(__u32 *)"R4FS"); */ + +static int prepare_for_update(znode * left, znode * right, + carry_plugin_info * info); + +/* header of node of reiser40 format is at the beginning of node */ +static inline node40_header *node40_node_header(const znode * node /* node to + * query */ ) +{ + assert("nikita-567", node != NULL); + assert("nikita-568", znode_page(node) != NULL); + assert("nikita-569", zdata(node) != NULL); + return (node40_header *) zdata(node); +} + +/* functions to get/set fields of node40_header */ +#define nh40_get_magic(nh) le32_to_cpu(get_unaligned(&(nh)->magic)) +#define nh40_get_free_space(nh) le16_to_cpu(get_unaligned(&(nh)->free_space)) +#define nh40_get_free_space_start(nh) le16_to_cpu(get_unaligned(&(nh)->free_space_start)) +#define nh40_get_level(nh) get_unaligned(&(nh)->level) +#define nh40_get_num_items(nh) le16_to_cpu(get_unaligned(&(nh)->nr_items)) +#define nh40_get_flush_id(nh) le64_to_cpu(get_unaligned(&(nh)->flush_id)) + +#define nh40_set_magic(nh, value) put_unaligned(cpu_to_le32(value), &(nh)->magic) +#define nh40_set_free_space(nh, value) put_unaligned(cpu_to_le16(value), &(nh)->free_space) +#define nh40_set_free_space_start(nh, value) put_unaligned(cpu_to_le16(value), &(nh)->free_space_start) +#define nh40_set_level(nh, value) put_unaligned(value, &(nh)->level) +#define nh40_set_num_items(nh, value) put_unaligned(cpu_to_le16(value), &(nh)->nr_items) +#define nh40_set_mkfs_id(nh, value) put_unaligned(cpu_to_le32(value), &(nh)->mkfs_id) + +/* plugin field of node header should be read/set by + plugin_by_disk_id/save_disk_plugin */ + +/* array of item headers is at the end of node */ +static inline item_header40 *node40_ih_at(const znode * node, unsigned pos) +{ + return (item_header40 *) (zdata(node) + znode_size(node)) - pos - 1; +} + +/* ( page_address( node -> pg ) + PAGE_CACHE_SIZE ) - pos - 1 + */ +static inline item_header40 *node40_ih_at_coord(const coord_t * coord) +{ + return (item_header40 *) (zdata(coord->node) + + znode_size(coord->node)) - (coord->item_pos) - + 1; +} + +/* functions to get/set fields of item_header40 */ +#define ih40_get_offset(ih) le16_to_cpu(get_unaligned(&(ih)->offset)) + +#define ih40_set_offset(ih, value) put_unaligned(cpu_to_le16(value), &(ih)->offset) + +/* plugin field of item header should be read/set by + plugin_by_disk_id/save_disk_plugin */ + +/* plugin methods */ + +/* plugin->u.node.item_overhead + look for description of this method in plugin/node/node.h */ +size_t +item_overhead_node40(const znode * node UNUSED_ARG, flow_t * f UNUSED_ARG) +{ + return sizeof(item_header40); +} + +/* plugin->u.node.free_space + look for description of this method in plugin/node/node.h */ +size_t free_space_node40(znode * node) +{ + assert("nikita-577", node != NULL); + assert("nikita-578", znode_is_loaded(node)); + assert("nikita-579", zdata(node) != NULL); + + return nh40_get_free_space(node40_node_header(node)); +} + +/* private inline version of node40_num_of_items() for use in this file. This + is necessary, because address of node40_num_of_items() is taken and it is + never inlined as a result. */ +static inline short node40_num_of_items_internal(const znode * node) +{ + return nh40_get_num_items(node40_node_header(node)); +} + +#if REISER4_DEBUG +static inline void check_num_items(const znode * node) +{ + assert("nikita-2749", + node40_num_of_items_internal(node) == node->nr_items); + assert("nikita-2746", znode_is_write_locked(node)); +} +#else +#define check_num_items(node) noop +#endif + +/* plugin->u.node.num_of_items + look for description of this method in plugin/node/node.h */ +int num_of_items_node40(const znode * node) +{ + return node40_num_of_items_internal(node); +} + +static void +node40_set_num_items(znode * node, node40_header * nh, unsigned value) +{ + assert("nikita-2751", node != NULL); + assert("nikita-2750", nh == node40_node_header(node)); + + check_num_items(node); + nh40_set_num_items(nh, value); + node->nr_items = value; + check_num_items(node); +} + +/* plugin->u.node.item_by_coord + look for description of this method in plugin/node/node.h */ +char *item_by_coord_node40(const coord_t * coord) +{ + item_header40 *ih; + char *p; + + /* @coord is set to existing item */ + assert("nikita-596", coord != NULL); + assert("vs-255", coord_is_existing_item(coord)); + + ih = node40_ih_at_coord(coord); + p = zdata(coord->node) + ih40_get_offset(ih); + return p; +} + +/* plugin->u.node.length_by_coord + look for description of this method in plugin/node/node.h */ +int length_by_coord_node40(const coord_t * coord) +{ + item_header40 *ih; + int result; + + /* @coord is set to existing item */ + assert("vs-256", coord != NULL); + assert("vs-257", coord_is_existing_item(coord)); + + ih = node40_ih_at_coord(coord); + if ((int)coord->item_pos == + node40_num_of_items_internal(coord->node) - 1) + result = + nh40_get_free_space_start(node40_node_header(coord->node)) - + ih40_get_offset(ih); + else + result = ih40_get_offset(ih - 1) - ih40_get_offset(ih); + + return result; +} + +static pos_in_node_t +node40_item_length(const znode * node, pos_in_node_t item_pos) +{ + item_header40 *ih; + pos_in_node_t result; + + /* @coord is set to existing item */ + assert("vs-256", node != NULL); + assert("vs-257", node40_num_of_items_internal(node) > item_pos); + + ih = node40_ih_at(node, item_pos); + if (item_pos == node40_num_of_items_internal(node) - 1) + result = + nh40_get_free_space_start(node40_node_header(node)) - + ih40_get_offset(ih); + else + result = ih40_get_offset(ih - 1) - ih40_get_offset(ih); + + return result; +} + +/* plugin->u.node.plugin_by_coord + look for description of this method in plugin/node/node.h */ +item_plugin *plugin_by_coord_node40(const coord_t * coord) +{ + item_header40 *ih; + item_plugin *result; + + /* @coord is set to existing item */ + assert("vs-258", coord != NULL); + assert("vs-259", coord_is_existing_item(coord)); + + ih = node40_ih_at_coord(coord); + /* pass NULL in stead of current tree. This is time critical call. */ + result = item_plugin_by_disk_id(NULL, &ih->plugin_id); + return result; +} + +/* plugin->u.node.key_at + look for description of this method in plugin/node/node.h */ +reiser4_key *key_at_node40(const coord_t * coord, reiser4_key * key) +{ + item_header40 *ih; + + assert("nikita-1765", coord_is_existing_item(coord)); + + /* @coord is set to existing item */ + ih = node40_ih_at_coord(coord); + memcpy(key, &ih->key, sizeof(reiser4_key)); + return key; +} + +/* VS-FIXME-HANS: please review whether the below are properly disabled when debugging is disabled */ + +#define NODE_INCSTAT(n, counter) \ + reiser4_stat_inc_at_level(znode_get_level(n), node.lookup.counter) + +#define NODE_ADDSTAT(n, counter, val) \ + reiser4_stat_add_at_level(znode_get_level(n), node.lookup.counter, val) + +/* plugin->u.node.lookup + look for description of this method in plugin/node/node.h */ +node_search_result lookup_node40(znode * node /* node to query */ , + const reiser4_key * key /* key to look for */ , + lookup_bias bias /* search bias */ , + coord_t * coord /* resulting coord */ ) +{ + int left; + int right; + int found; + int items; + + item_header40 *lefth; + item_header40 *righth; + + item_plugin *iplug; + item_header40 *bstop; + item_header40 *ih; + cmp_t order; + + assert("nikita-583", node != NULL); + assert("nikita-584", key != NULL); + assert("nikita-585", coord != NULL); + assert("nikita-2693", znode_is_any_locked(node)); + cassert(REISER4_SEQ_SEARCH_BREAK > 2); + + items = node_num_items(node); + + if (unlikely(items == 0)) { + coord_init_first_unit(coord, node); + return NS_NOT_FOUND; + } + + /* binary search for item that can contain given key */ + left = 0; + right = items - 1; + coord->node = node; + coord_clear_iplug(coord); + found = 0; + + lefth = node40_ih_at(node, left); + righth = node40_ih_at(node, right); + + /* It is known that for small arrays sequential search is on average + more efficient than binary. This is because sequential search is + coded as tight loop that can be better optimized by compilers and + for small array size gain from this optimization makes sequential + search the winner. Another, maybe more important, reason for this, + is that sequential array is more CPU cache friendly, whereas binary + search effectively destroys CPU caching. + + Critical here is the notion of "smallness". Reasonable value of + REISER4_SEQ_SEARCH_BREAK can be found by playing with code in + fs/reiser4/ulevel/ulevel.c:test_search(). + + Don't try to further optimize sequential search by scanning from + right to left in attempt to use more efficient loop termination + condition (comparison with 0). This doesn't work. + + */ + + while (right - left >= REISER4_SEQ_SEARCH_BREAK) { + int median; + item_header40 *medianh; + + median = (left + right) / 2; + medianh = node40_ih_at(node, median); + + assert("nikita-1084", median >= 0); + assert("nikita-1085", median < items); + switch (keycmp(key, &medianh->key)) { + case LESS_THAN: + right = median; + righth = medianh; + break; + default: + wrong_return_value("nikita-586", "keycmp"); + case GREATER_THAN: + left = median; + lefth = medianh; + break; + case EQUAL_TO: + do { + --median; + /* headers are ordered from right to left */ + ++medianh; + } while (median >= 0 && keyeq(key, &medianh->key)); + right = left = median + 1; + ih = lefth = righth = medianh - 1; + found = 1; + break; + } + } + /* sequential scan. Item headers, and, therefore, keys are stored at + the rightmost part of a node from right to left. We are trying to + access memory from left to right, and hence, scan in _descending_ + order of item numbers. + */ + if (!found) { + for (left = right, ih = righth; left >= 0; ++ih, --left) { + cmp_t comparison; + + prefetchkey(&(ih + 1)->key); + comparison = keycmp(&ih->key, key); + if (comparison == GREATER_THAN) + continue; + if (comparison == EQUAL_TO) { + found = 1; + do { + --left; + ++ih; + } while (left >= 0 && keyeq(&ih->key, key)); + ++left; + --ih; + } else { + assert("nikita-1256", comparison == LESS_THAN); + } + break; + } + if (unlikely(left < 0)) + left = 0; + } + + assert("nikita-3212", right >= left); + assert("nikita-3214", + equi(found, keyeq(&node40_ih_at(node, left)->key, key))); + + coord_set_item_pos(coord, left); + coord->unit_pos = 0; + coord->between = AT_UNIT; + + /* key < leftmost key in a mode or node is corrupted and keys + are not sorted */ + bstop = node40_ih_at(node, (unsigned)left); + order = keycmp(&bstop->key, key); + if (unlikely(order == GREATER_THAN)) { + if (unlikely(left != 0)) { + /* screw up */ + warning("nikita-587", "Key less than %i key in a node", + left); + reiser4_print_key("key", key); + reiser4_print_key("min", &bstop->key); + print_coord_content("coord", coord); + return RETERR(-EIO); + } else { + coord->between = BEFORE_UNIT; + return NS_NOT_FOUND; + } + } + /* left <= key, ok */ + iplug = item_plugin_by_disk_id(znode_get_tree(node), &bstop->plugin_id); + + if (unlikely(iplug == NULL)) { + warning("nikita-588", "Unknown plugin %i", + le16_to_cpu(get_unaligned(&bstop->plugin_id))); + reiser4_print_key("key", key); + print_coord_content("coord", coord); + return RETERR(-EIO); + } + + coord_set_iplug(coord, iplug); + + /* if exact key from item header was found by binary search, no + further checks are necessary. */ + if (found) { + assert("nikita-1259", order == EQUAL_TO); + return NS_FOUND; + } + if (iplug->b.max_key_inside != NULL) { + reiser4_key max_item_key; + + /* key > max_item_key --- outside of an item */ + if (keygt(key, iplug->b.max_key_inside(coord, &max_item_key))) { + coord->unit_pos = 0; + coord->between = AFTER_ITEM; + /* FIXME-VS: key we are looking for does not fit into + found item. Return NS_NOT_FOUND then. Without that + the following case does not work: there is extent of + file 10000, 10001. File 10000, 10002 has been just + created. When writing to position 0 in that file - + traverse_tree will stop here on twig level. When we + want it to go down to leaf level + */ + return NS_NOT_FOUND; + } + } + + if (iplug->b.lookup != NULL) { + return iplug->b.lookup(key, bias, coord); + } else { + assert("nikita-1260", order == LESS_THAN); + coord->between = AFTER_UNIT; + return (bias == FIND_EXACT) ? NS_NOT_FOUND : NS_FOUND; + } +} + +#undef NODE_ADDSTAT +#undef NODE_INCSTAT + +/* plugin->u.node.estimate + look for description of this method in plugin/node/node.h */ +size_t estimate_node40(znode * node) +{ + size_t result; + + assert("nikita-597", node != NULL); + + result = free_space_node40(node) - sizeof(item_header40); + + return (result > 0) ? result : 0; +} + +/* plugin->u.node.check + look for description of this method in plugin/node/node.h */ +int check_node40(const znode * node /* node to check */ , + __u32 flags /* check flags */ , + const char **error /* where to store error message */ ) +{ + int nr_items; + int i; + reiser4_key prev; + unsigned old_offset; + tree_level level; + coord_t coord; + int result; + + assert("nikita-580", node != NULL); + assert("nikita-581", error != NULL); + assert("nikita-2948", znode_is_loaded(node)); + + if (ZF_ISSET(node, JNODE_HEARD_BANSHEE)) + return 0; + + assert("nikita-582", zdata(node) != NULL); + + nr_items = node40_num_of_items_internal(node); + if (nr_items < 0) { + *error = "Negative number of items"; + return -1; + } + + if (flags & REISER4_NODE_DKEYS) + prev = *znode_get_ld_key((znode *) node); + else + prev = *reiser4_min_key(); + + old_offset = 0; + coord_init_zero(&coord); + coord.node = (znode *) node; + coord.unit_pos = 0; + coord.between = AT_UNIT; + level = znode_get_level(node); + for (i = 0; i < nr_items; i++) { + item_header40 *ih; + reiser4_key unit_key; + unsigned j; + + ih = node40_ih_at(node, (unsigned)i); + coord_set_item_pos(&coord, i); + if ((ih40_get_offset(ih) >= + znode_size(node) - nr_items * sizeof(item_header40)) || + (ih40_get_offset(ih) < sizeof(node40_header))) { + *error = "Offset is out of bounds"; + return -1; + } + if (ih40_get_offset(ih) <= old_offset) { + *error = "Offsets are in wrong order"; + return -1; + } + if ((i == 0) && (ih40_get_offset(ih) != sizeof(node40_header))) { + *error = "Wrong offset of first item"; + return -1; + } + old_offset = ih40_get_offset(ih); + + if (keygt(&prev, &ih->key)) { + *error = "Keys are in wrong order"; + return -1; + } + if (!keyeq(&ih->key, unit_key_by_coord(&coord, &unit_key))) { + *error = "Wrong key of first unit"; + return -1; + } + prev = ih->key; + for (j = 0; j < coord_num_units(&coord); ++j) { + coord.unit_pos = j; + unit_key_by_coord(&coord, &unit_key); + if (keygt(&prev, &unit_key)) { + *error = "Unit keys are in wrong order"; + return -1; + } + prev = unit_key; + } + coord.unit_pos = 0; + if (level != TWIG_LEVEL && item_is_extent(&coord)) { + *error = "extent on the wrong level"; + return -1; + } + if (level == LEAF_LEVEL && item_is_internal(&coord)) { + *error = "internal item on the wrong level"; + return -1; + } + if (level != LEAF_LEVEL && + !item_is_internal(&coord) && !item_is_extent(&coord)) { + *error = "wrong item on the internal level"; + return -1; + } + if (level > TWIG_LEVEL && !item_is_internal(&coord)) { + *error = "non-internal item on the internal level"; + return -1; + } +#if REISER4_DEBUG + if (item_plugin_by_coord(&coord)->b.check + && item_plugin_by_coord(&coord)->b.check(&coord, error)) + return -1; +#endif + if (i) { + coord_t prev_coord; + /* two neighboring items can not be mergeable */ + coord_dup(&prev_coord, &coord); + coord_prev_item(&prev_coord); + if (are_items_mergeable(&prev_coord, &coord)) { + *error = "mergeable items in one node"; + return -1; + } + + } + } + + if ((flags & REISER4_NODE_DKEYS) && !node_is_empty(node)) { + coord_t coord; + item_plugin *iplug; + + coord_init_last_unit(&coord, node); + iplug = item_plugin_by_coord(&coord); + if ((item_is_extent(&coord) || item_is_tail(&coord)) && + iplug->s.file.append_key != NULL) { + reiser4_key mkey; + + iplug->s.file.append_key(&coord, &mkey); + set_key_offset(&mkey, get_key_offset(&mkey) - 1); + read_lock_dk(current_tree); + result = keygt(&mkey, znode_get_rd_key((znode *) node)); + read_unlock_dk(current_tree); + if (result) { + *error = "key of rightmost item is too large"; + return -1; + } + } + } + if (flags & REISER4_NODE_DKEYS) { + read_lock_tree(current_tree); + read_lock_dk(current_tree); + + flags |= REISER4_NODE_TREE_STABLE; + + if (keygt(&prev, znode_get_rd_key((znode *) node))) { + if (flags & REISER4_NODE_TREE_STABLE) { + *error = "Last key is greater than rdkey"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + } + if (keygt + (znode_get_ld_key((znode *) node), + znode_get_rd_key((znode *) node))) { + *error = "ldkey is greater than rdkey"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + if (ZF_ISSET(node, JNODE_LEFT_CONNECTED) && + (node->left != NULL) && + !ZF_ISSET(node->left, JNODE_HEARD_BANSHEE) && + ergo(flags & REISER4_NODE_TREE_STABLE, + !keyeq(znode_get_rd_key(node->left), + znode_get_ld_key((znode *) node))) + && ergo(!(flags & REISER4_NODE_TREE_STABLE), + keygt(znode_get_rd_key(node->left), + znode_get_ld_key((znode *) node)))) { + *error = "left rdkey or ldkey is wrong"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && + (node->right != NULL) && + !ZF_ISSET(node->right, JNODE_HEARD_BANSHEE) && + ergo(flags & REISER4_NODE_TREE_STABLE, + !keyeq(znode_get_rd_key((znode *) node), + znode_get_ld_key(node->right))) + && ergo(!(flags & REISER4_NODE_TREE_STABLE), + keygt(znode_get_rd_key((znode *) node), + znode_get_ld_key(node->right)))) { + *error = "rdkey or right ldkey is wrong"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + } + + return 0; +} + +int parse_node40_common(znode *node, const __u32 magic) +{ + node40_header *header; + int result; + d8 level; + + header = node40_node_header((znode *) node); + result = -EIO; + level = nh40_get_level(header); + if (unlikely(((__u8) znode_get_level(node)) != level)) + warning("nikita-494", "Wrong level found in node: %i != %i", + znode_get_level(node), level); + else if (unlikely(nh40_get_magic(header) != magic)) + warning("nikita-495", + "Wrong magic in tree node: want %x, got %x", + magic, nh40_get_magic(header)); + else { + node->nr_items = node40_num_of_items_internal(node); + result = 0; + } + return RETERR(result); +} + +/* + * plugin->u.node.parse + * look for description of this method in plugin/node/node.h + */ +int parse_node40(znode *node /* node to parse */) +{ + return parse_node40_common(node, REISER4_NODE40_MAGIC); +} + +/* + * common part of ->init_node() for all nodes, + * which contain node40_header at the beginning + */ +int init_node40_common(znode *node, node_plugin *nplug, + size_t node_header_size, const __u32 magic) +{ + node40_header *header40; + + assert("nikita-570", node != NULL); + assert("nikita-572", zdata(node) != NULL); + + header40 = node40_node_header(node); + memset(header40, 0, sizeof(node40_header)); + + nh40_set_free_space(header40, znode_size(node) - node_header_size); + nh40_set_free_space_start(header40, node_header_size); + /* + * sane hypothesis: 0 in CPU format is 0 in disk format + */ + save_plugin_id(node_plugin_to_plugin(nplug), + &header40->common_header.plugin_id); + nh40_set_level(header40, znode_get_level(node)); + nh40_set_magic(header40, magic); + nh40_set_mkfs_id(header40, reiser4_mkfs_id(reiser4_get_current_sb())); + /* + * nr_items: 0 + * flags: 0 + */ + return 0; +} + +/* + * plugin->u.node.init + * look for description of this method in plugin/node/node.h + */ +int init_node40(znode *node /* node to initialise */) +{ + return init_node40_common(node, node_plugin_by_id(NODE40_ID), + sizeof(node40_header), REISER4_NODE40_MAGIC); +} + +#ifdef GUESS_EXISTS +int guess_node40_common(const znode *node, reiser4_node_id id, + const __u32 magic) +{ + node40_header *header; + + assert("nikita-1058", node != NULL); + header = node40_node_header(node); + return (nh40_get_magic(header) == magic) && + (id == plugin_by_disk_id(znode_get_tree(node), + REISER4_NODE_PLUGIN_TYPE, + &header->common_header.plugin_id)->h.id); +} + +int guess_node40(const znode *node /* node to guess plugin of */) +{ + return guess_node40_common(node, NODE40_ID, REISER4_NODE40_MAGIC); +} +#endif + +/* plugin->u.node.chage_item_size + look for description of this method in plugin/node/node.h */ +void change_item_size_node40(coord_t * coord, int by) +{ + node40_header *nh; + item_header40 *ih; + char *item_data; + int item_length; + unsigned i; + + /* make sure that @item is coord of existing item */ + assert("vs-210", coord_is_existing_item(coord)); + + nh = node40_node_header(coord->node); + + item_data = item_by_coord_node40(coord); + item_length = length_by_coord_node40(coord); + + /* move item bodies */ + ih = node40_ih_at_coord(coord); + memmove(item_data + item_length + by, item_data + item_length, + nh40_get_free_space_start(node40_node_header(coord->node)) - + (ih40_get_offset(ih) + item_length)); + + /* update offsets of moved items */ + for (i = coord->item_pos + 1; i < nh40_get_num_items(nh); i++) { + ih = node40_ih_at(coord->node, i); + ih40_set_offset(ih, ih40_get_offset(ih) + by); + } + + /* update node header */ + nh40_set_free_space(nh, nh40_get_free_space(nh) - by); + nh40_set_free_space_start(nh, nh40_get_free_space_start(nh) + by); +} + +static int should_notify_parent(const znode * node) +{ + /* FIXME_JMACD This looks equivalent to znode_is_root(), right? -josh */ + return !disk_addr_eq(znode_get_block(node), + &znode_get_tree(node)->root_block); +} + +/* plugin->u.node.create_item + look for description of this method in plugin/node/node.h */ +int +create_item_node40(coord_t *target, const reiser4_key *key, + reiser4_item_data *data, carry_plugin_info *info) +{ + node40_header *nh; + item_header40 *ih; + unsigned offset; + unsigned i; + + nh = node40_node_header(target->node); + + assert("vs-212", coord_is_between_items(target)); + /* node must have enough free space */ + assert("vs-254", + free_space_node40(target->node) >= + data->length + sizeof(item_header40)); + assert("vs-1410", data->length >= 0); + + if (coord_set_to_right(target)) + /* there are not items to the right of @target, so, new item + will be inserted after last one */ + coord_set_item_pos(target, nh40_get_num_items(nh)); + + if (target->item_pos < nh40_get_num_items(nh)) { + /* there are items to be moved to prepare space for new + item */ + ih = node40_ih_at_coord(target); + /* new item will start at this offset */ + offset = ih40_get_offset(ih); + + memmove(zdata(target->node) + offset + data->length, + zdata(target->node) + offset, + nh40_get_free_space_start(nh) - offset); + /* update headers of moved items */ + for (i = target->item_pos; i < nh40_get_num_items(nh); i++) { + ih = node40_ih_at(target->node, i); + ih40_set_offset(ih, ih40_get_offset(ih) + data->length); + } + + /* @ih is set to item header of the last item, move item headers */ + memmove(ih - 1, ih, + sizeof(item_header40) * (nh40_get_num_items(nh) - + target->item_pos)); + } else { + /* new item will start at this offset */ + offset = nh40_get_free_space_start(nh); + } + + /* make item header for the new item */ + ih = node40_ih_at_coord(target); + memcpy(&ih->key, key, sizeof(reiser4_key)); + ih40_set_offset(ih, offset); + save_plugin_id(item_plugin_to_plugin(data->iplug), &ih->plugin_id); + + /* update node header */ + nh40_set_free_space(nh, + nh40_get_free_space(nh) - data->length - + sizeof(item_header40)); + nh40_set_free_space_start(nh, + nh40_get_free_space_start(nh) + data->length); + node40_set_num_items(target->node, nh, nh40_get_num_items(nh) + 1); + + /* FIXME: check how does create_item work when between is set to BEFORE_UNIT */ + target->unit_pos = 0; + target->between = AT_UNIT; + coord_clear_iplug(target); + + /* initialize item */ + if (data->iplug->b.init != NULL) { + data->iplug->b.init(target, NULL, data); + } + /* copy item body */ + if (data->iplug->b.paste != NULL) { + data->iplug->b.paste(target, data, info); + } else if (data->data != NULL) { + if (data->user) { + /* AUDIT: Are we really should not check that pointer + from userspace was valid and data bytes were + available? How will we return -EFAULT of some kind + without this check? */ + assert("nikita-3038", reiser4_schedulable()); + /* copy data from user space */ + if (__copy_from_user(zdata(target->node) + offset, + (const char __user *)data->data, + (unsigned)data->length)) + return RETERR(-EFAULT); + } else + /* copy from kernel space */ + memcpy(zdata(target->node) + offset, data->data, + (unsigned)data->length); + } + + if (target->item_pos == 0) { + /* left delimiting key has to be updated */ + prepare_for_update(NULL, target->node, info); + } + + if (item_plugin_by_coord(target)->b.create_hook != NULL) { + item_plugin_by_coord(target)->b.create_hook(target, data->arg); + } + + return 0; +} + +/* plugin->u.node.update_item_key + look for description of this method in plugin/node/node.h */ +void +update_item_key_node40(coord_t * target, const reiser4_key * key, + carry_plugin_info * info) +{ + item_header40 *ih; + + ih = node40_ih_at_coord(target); + memcpy(&ih->key, key, sizeof(reiser4_key)); + + if (target->item_pos == 0) { + prepare_for_update(NULL, target->node, info); + } +} + +/* this bits encode cut mode */ +#define CMODE_TAIL 1 +#define CMODE_WHOLE 2 +#define CMODE_HEAD 4 + +struct cut40_info { + int mode; + pos_in_node_t tail_removed; /* position of item which gets tail removed */ + pos_in_node_t first_removed; /* position of first the leftmost item among items removed completely */ + pos_in_node_t removed_count; /* number of items removed completely */ + pos_in_node_t head_removed; /* position of item which gets head removed */ + + pos_in_node_t freed_space_start; + pos_in_node_t freed_space_end; + pos_in_node_t first_moved; + pos_in_node_t head_removed_location; +}; + +static void init_cinfo(struct cut40_info *cinfo) +{ + cinfo->mode = 0; + cinfo->tail_removed = MAX_POS_IN_NODE; + cinfo->first_removed = MAX_POS_IN_NODE; + cinfo->removed_count = MAX_POS_IN_NODE; + cinfo->head_removed = MAX_POS_IN_NODE; + cinfo->freed_space_start = MAX_POS_IN_NODE; + cinfo->freed_space_end = MAX_POS_IN_NODE; + cinfo->first_moved = MAX_POS_IN_NODE; + cinfo->head_removed_location = MAX_POS_IN_NODE; +} + +/* complete cut_node40/kill_node40 content by removing the gap created by */ +static void compact(znode * node, struct cut40_info *cinfo) +{ + node40_header *nh; + item_header40 *ih; + pos_in_node_t freed; + pos_in_node_t pos, nr_items; + + assert("vs-1526", (cinfo->freed_space_start != MAX_POS_IN_NODE && + cinfo->freed_space_end != MAX_POS_IN_NODE && + cinfo->first_moved != MAX_POS_IN_NODE)); + assert("vs-1523", cinfo->freed_space_end >= cinfo->freed_space_start); + + nh = node40_node_header(node); + nr_items = nh40_get_num_items(nh); + + /* remove gap made up by removal */ + memmove(zdata(node) + cinfo->freed_space_start, + zdata(node) + cinfo->freed_space_end, + nh40_get_free_space_start(nh) - cinfo->freed_space_end); + + /* update item headers of moved items - change their locations */ + pos = cinfo->first_moved; + ih = node40_ih_at(node, pos); + if (cinfo->head_removed_location != MAX_POS_IN_NODE) { + assert("vs-1580", pos == cinfo->head_removed); + ih40_set_offset(ih, cinfo->head_removed_location); + pos++; + ih--; + } + + freed = cinfo->freed_space_end - cinfo->freed_space_start; + for (; pos < nr_items; pos++, ih--) { + assert("vs-1581", ih == node40_ih_at(node, pos)); + ih40_set_offset(ih, ih40_get_offset(ih) - freed); + } + + /* free space start moved to right */ + nh40_set_free_space_start(nh, nh40_get_free_space_start(nh) - freed); + + if (cinfo->removed_count != MAX_POS_IN_NODE) { + /* number of items changed. Remove item headers of those items */ + ih = node40_ih_at(node, nr_items - 1); + memmove(ih + cinfo->removed_count, ih, + sizeof(item_header40) * (nr_items - + cinfo->removed_count - + cinfo->first_removed)); + freed += sizeof(item_header40) * cinfo->removed_count; + node40_set_num_items(node, nh, nr_items - cinfo->removed_count); + } + + /* total amount of free space increased */ + nh40_set_free_space(nh, nh40_get_free_space(nh) + freed); +} + +int shrink_item_node40(coord_t * coord, int delta) +{ + node40_header *nh; + item_header40 *ih; + pos_in_node_t pos; + pos_in_node_t nr_items; + char *end; + znode *node; + int off; + + assert("nikita-3487", coord != NULL); + assert("nikita-3488", delta >= 0); + + node = coord->node; + nh = node40_node_header(node); + nr_items = nh40_get_num_items(nh); + + ih = node40_ih_at_coord(coord); + assert("nikita-3489", delta <= length_by_coord_node40(coord)); + off = ih40_get_offset(ih) + length_by_coord_node40(coord); + end = zdata(node) + off; + + /* remove gap made up by removal */ + memmove(end - delta, end, nh40_get_free_space_start(nh) - off); + + /* update item headers of moved items - change their locations */ + pos = coord->item_pos + 1; + ih = node40_ih_at(node, pos); + for (; pos < nr_items; pos++, ih--) { + assert("nikita-3490", ih == node40_ih_at(node, pos)); + ih40_set_offset(ih, ih40_get_offset(ih) - delta); + } + + /* free space start moved to left */ + nh40_set_free_space_start(nh, nh40_get_free_space_start(nh) - delta); + /* total amount of free space increased */ + nh40_set_free_space(nh, nh40_get_free_space(nh) + delta); + /* + * This method does _not_ changes number of items. Hence, it cannot + * make node empty. Also it doesn't remove items at all, which means + * that no keys have to be updated either. + */ + return 0; +} + +/* + * Evaluate cut mode, if key range has been specified. + * + * This is for the case when units are not minimal objects + * addressed by keys. + * + * This doesn't work when range contains objects with + * non-unique keys (e.g. directory items). + */ +static int parse_cut_by_key_range(struct cut40_info *cinfo, + const struct cut_kill_params *params) +{ + reiser4_key min_from_key, max_to_key; + const reiser4_key *from_key = params->from_key; + const reiser4_key *to_key = params->to_key; + /* + * calculate minimal key stored in first item + * of items to be cut (params->from) + */ + item_key_by_coord(params->from, &min_from_key); + /* + * calculate maximal key stored in last item + * of items to be cut (params->to) + */ + max_item_key_by_coord(params->to, &max_to_key); + + if (params->from->item_pos == params->to->item_pos) { + if (keylt(&min_from_key, from_key) + && keylt(to_key, &max_to_key)) + return 1; + + if (keygt(from_key, &min_from_key)) { + /* tail of item is to be cut cut */ + cinfo->tail_removed = params->from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else if (keylt(to_key, &max_to_key)) { + /* head of item is to be cut */ + cinfo->head_removed = params->from->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + /* item is removed completely */ + cinfo->first_removed = params->from->item_pos; + cinfo->removed_count = 1; + cinfo->mode |= CMODE_WHOLE; + } + } else { + cinfo->first_removed = params->from->item_pos + 1; + cinfo->removed_count = + params->to->item_pos - params->from->item_pos - 1; + + if (keygt(from_key, &min_from_key)) { + /* first item is not cut completely */ + cinfo->tail_removed = params->from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else { + cinfo->first_removed--; + cinfo->removed_count++; + } + if (keylt(to_key, &max_to_key)) { + /* last item is not cut completely */ + cinfo->head_removed = params->to->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + cinfo->removed_count++; + } + if (cinfo->removed_count) + cinfo->mode |= CMODE_WHOLE; + } + return 0; +} + +/* + * Evaluate cut mode, if the key range hasn't been specified. + * In this case the range can include objects with non-unique + * keys (e.g. directory entries). + * + * This doesn't work when units are not the minimal objects + * addressed by keys (e.g. bytes in file's body stored in + * unformatted nodes). + */ +static int parse_cut_by_coord_range(struct cut40_info *cinfo, + const struct cut_kill_params *params) +{ + coord_t *from = params->from; + coord_t *to = params->to; + + if (from->item_pos == to->item_pos) { + /* + * cut is performed on only one item + */ + if (from->unit_pos > 0 && + to->unit_pos < coord_last_unit_pos(to)) + /* + * cut from the middle of item + */ + return 1; + if (from->unit_pos > 0) { + /* + * tail of item is to be cut + */ + cinfo->tail_removed = params->from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else if (to->unit_pos < coord_last_unit_pos(to)) { + /* + * head of item is to be cut + */ + cinfo->head_removed = params->from->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + /* + * item is removed completely + */ + assert("edward-1631", + from->unit_pos == 0 && + to->unit_pos == coord_last_unit_pos(to)); + + cinfo->first_removed = params->from->item_pos; + cinfo->removed_count = 1; + cinfo->mode |= CMODE_WHOLE; + } + } else { + cinfo->first_removed = from->item_pos + 1; + cinfo->removed_count = + to->item_pos - from->item_pos - 1; + + if (from->unit_pos > 0) { + /* + * first item is not cut completely + */ + cinfo->tail_removed = from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else { + cinfo->first_removed--; + cinfo->removed_count++; + } + if (to->unit_pos < coord_last_unit_pos(to)) { + /* + * last item is not cut completely + */ + cinfo->head_removed = to->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + cinfo->removed_count++; + } + if (cinfo->removed_count) + cinfo->mode |= CMODE_WHOLE; + } + return 0; +} + +/* + * this is used by cut_node40 and kill_node40. It analyses input parameters + * and calculates cut mode. There are 2 types of cut. First is when a unit is + * removed from the middle of an item. In this case this function returns 1. + * All the rest fits into second case: 0 or 1 of items getting tail cut, 0 or + * more items removed completely and 0 or 1 item getting head cut. Function + * returns 0 in this case + */ +static int parse_cut(struct cut40_info *cinfo, + const struct cut_kill_params *params) +{ + init_cinfo(cinfo); + if (params->from_key == NULL) { + /* + * cut key range is not defined in input parameters + */ + assert("vs-1513", params->to_key == NULL); + return parse_cut_by_coord_range(cinfo, params); + } else + return parse_cut_by_key_range(cinfo, params); +} + +static void +call_kill_hooks(znode * node, pos_in_node_t from, pos_in_node_t count, + carry_kill_data * kdata) +{ + coord_t coord; + item_plugin *iplug; + pos_in_node_t pos; + + coord.node = node; + coord.unit_pos = 0; + coord.between = AT_UNIT; + for (pos = 0; pos < count; pos++) { + coord_set_item_pos(&coord, from + pos); + coord.unit_pos = 0; + coord.between = AT_UNIT; + iplug = item_plugin_by_coord(&coord); + if (iplug->b.kill_hook) { + iplug->b.kill_hook(&coord, 0, coord_num_units(&coord), + kdata); + } + } +} + +/* this is used to kill item partially */ +static pos_in_node_t +kill_units(coord_t * coord, pos_in_node_t from, pos_in_node_t to, void *data, + reiser4_key * smallest_removed, reiser4_key * new_first_key) +{ + struct carry_kill_data *kdata; + item_plugin *iplug; + + kdata = data; + iplug = item_plugin_by_coord(coord); + + assert("vs-1524", iplug->b.kill_units); + return iplug->b.kill_units(coord, from, to, kdata, smallest_removed, + new_first_key); +} + +/* call item plugin to cut tail of file */ +static pos_in_node_t +kill_tail(coord_t * coord, void *data, reiser4_key * smallest_removed) +{ + struct carry_kill_data *kdata; + pos_in_node_t to; + + kdata = data; + to = coord_last_unit_pos(coord); + return kill_units(coord, coord->unit_pos, to, kdata, smallest_removed, + NULL); +} + +/* call item plugin to cut head of item */ +static pos_in_node_t +kill_head(coord_t * coord, void *data, reiser4_key * smallest_removed, + reiser4_key * new_first_key) +{ + return kill_units(coord, 0, coord->unit_pos, data, smallest_removed, + new_first_key); +} + +/* this is used to cut item partially */ +static pos_in_node_t +cut_units(coord_t * coord, pos_in_node_t from, pos_in_node_t to, void *data, + reiser4_key * smallest_removed, reiser4_key * new_first_key) +{ + carry_cut_data *cdata; + item_plugin *iplug; + + cdata = data; + iplug = item_plugin_by_coord(coord); + assert("vs-302", iplug->b.cut_units); + return iplug->b.cut_units(coord, from, to, cdata, smallest_removed, + new_first_key); +} + +/* call item plugin to cut tail of file */ +static pos_in_node_t +cut_tail(coord_t * coord, void *data, reiser4_key * smallest_removed) +{ + carry_cut_data *cdata; + pos_in_node_t to; + + cdata = data; + to = coord_last_unit_pos(cdata->params.from); + return cut_units(coord, coord->unit_pos, to, data, smallest_removed, NULL); +} + +/* call item plugin to cut head of item */ +static pos_in_node_t +cut_head(coord_t * coord, void *data, reiser4_key * smallest_removed, + reiser4_key * new_first_key) +{ + return cut_units(coord, 0, coord->unit_pos, data, smallest_removed, + new_first_key); +} + +/* this returns 1 of key of first item changed, 0 - if it did not */ +static int +prepare_for_compact(struct cut40_info *cinfo, + const struct cut_kill_params *params, int is_cut, + void *data, carry_plugin_info * info) +{ + znode *node; + item_header40 *ih; + pos_in_node_t freed; + pos_in_node_t item_pos; + coord_t coord; + reiser4_key new_first_key; + pos_in_node_t(*kill_units_f) (coord_t *, pos_in_node_t, pos_in_node_t, + void *, reiser4_key *, reiser4_key *); + pos_in_node_t(*kill_tail_f) (coord_t *, void *, reiser4_key *); + pos_in_node_t(*kill_head_f) (coord_t *, void *, reiser4_key *, + reiser4_key *); + int retval; + + retval = 0; + + node = params->from->node; + + assert("vs-184", node == params->to->node); + assert("vs-312", !node_is_empty(node)); + assert("vs-297", + coord_compare(params->from, params->to) != COORD_CMP_ON_RIGHT); + + if (is_cut) { + kill_units_f = cut_units; + kill_tail_f = cut_tail; + kill_head_f = cut_head; + } else { + kill_units_f = kill_units; + kill_tail_f = kill_tail; + kill_head_f = kill_head; + } + + if (parse_cut(cinfo, params) == 1) { + /* cut from the middle of item */ + freed = + kill_units_f(params->from, params->from->unit_pos, + params->to->unit_pos, data, + params->smallest_removed, NULL); + + item_pos = params->from->item_pos; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = + ih40_get_offset(ih) + node40_item_length(node, + item_pos) - freed; + cinfo->freed_space_end = cinfo->freed_space_start + freed; + cinfo->first_moved = item_pos + 1; + } else { + assert("vs-1521", (cinfo->tail_removed != MAX_POS_IN_NODE || + cinfo->first_removed != MAX_POS_IN_NODE || + cinfo->head_removed != MAX_POS_IN_NODE)); + + switch (cinfo->mode) { + case CMODE_TAIL: + /* one item gets cut partially from its end */ + assert("vs-1562", + cinfo->tail_removed == params->from->item_pos); + + freed = + kill_tail_f(params->from, data, + params->smallest_removed); + + item_pos = cinfo->tail_removed; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = + ih40_get_offset(ih) + node40_item_length(node, + item_pos) - + freed; + cinfo->freed_space_end = + cinfo->freed_space_start + freed; + cinfo->first_moved = cinfo->tail_removed + 1; + break; + + case CMODE_WHOLE: + /* one or more items get removed completely */ + assert("vs-1563", + cinfo->first_removed == params->from->item_pos); + assert("vs-1564", cinfo->removed_count > 0 + && cinfo->removed_count != MAX_POS_IN_NODE); + + /* call kill hook for all items removed completely */ + if (is_cut == 0) + call_kill_hooks(node, cinfo->first_removed, + cinfo->removed_count, data); + + item_pos = cinfo->first_removed; + ih = node40_ih_at(node, item_pos); + + if (params->smallest_removed) + memcpy(params->smallest_removed, &ih->key, + sizeof(reiser4_key)); + + cinfo->freed_space_start = ih40_get_offset(ih); + + item_pos += (cinfo->removed_count - 1); + ih -= (cinfo->removed_count - 1); + cinfo->freed_space_end = + ih40_get_offset(ih) + node40_item_length(node, + item_pos); + cinfo->first_moved = item_pos + 1; + if (cinfo->first_removed == 0) + /* key of first item of the node changes */ + retval = 1; + break; + + case CMODE_HEAD: + /* one item gets cut partially from its head */ + assert("vs-1565", + cinfo->head_removed == params->from->item_pos); + + freed = + kill_head_f(params->to, data, + params->smallest_removed, + &new_first_key); + + item_pos = cinfo->head_removed; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = ih40_get_offset(ih); + cinfo->freed_space_end = ih40_get_offset(ih) + freed; + cinfo->first_moved = cinfo->head_removed + 1; + + /* item head is removed, therefore, item key changed */ + coord.node = node; + coord_set_item_pos(&coord, item_pos); + coord.unit_pos = 0; + coord.between = AT_UNIT; + update_item_key_node40(&coord, &new_first_key, NULL); + if (item_pos == 0) + /* key of first item of the node changes */ + retval = 1; + break; + + case CMODE_TAIL | CMODE_WHOLE: + /* one item gets cut from its end and one or more items get removed completely */ + assert("vs-1566", + cinfo->tail_removed == params->from->item_pos); + assert("vs-1567", + cinfo->first_removed == cinfo->tail_removed + 1); + assert("vs-1564", cinfo->removed_count > 0 + && cinfo->removed_count != MAX_POS_IN_NODE); + + freed = + kill_tail_f(params->from, data, + params->smallest_removed); + + item_pos = cinfo->tail_removed; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = + ih40_get_offset(ih) + node40_item_length(node, + item_pos) - + freed; + + /* call kill hook for all items removed completely */ + if (is_cut == 0) + call_kill_hooks(node, cinfo->first_removed, + cinfo->removed_count, data); + + item_pos += cinfo->removed_count; + ih -= cinfo->removed_count; + cinfo->freed_space_end = + ih40_get_offset(ih) + node40_item_length(node, + item_pos); + cinfo->first_moved = item_pos + 1; + break; + + case CMODE_WHOLE | CMODE_HEAD: + /* one or more items get removed completely and one item gets cut partially from its head */ + assert("vs-1568", + cinfo->first_removed == params->from->item_pos); + assert("vs-1564", cinfo->removed_count > 0 + && cinfo->removed_count != MAX_POS_IN_NODE); + assert("vs-1569", + cinfo->head_removed == + cinfo->first_removed + cinfo->removed_count); + + /* call kill hook for all items removed completely */ + if (is_cut == 0) + call_kill_hooks(node, cinfo->first_removed, + cinfo->removed_count, data); + + item_pos = cinfo->first_removed; + ih = node40_ih_at(node, item_pos); + + if (params->smallest_removed) + memcpy(params->smallest_removed, &ih->key, + sizeof(reiser4_key)); + + freed = + kill_head_f(params->to, data, NULL, &new_first_key); + + cinfo->freed_space_start = ih40_get_offset(ih); + + ih = node40_ih_at(node, cinfo->head_removed); + /* this is the most complex case. Item which got head removed and items which are to be moved + intact change their location differently. */ + cinfo->freed_space_end = ih40_get_offset(ih) + freed; + cinfo->first_moved = cinfo->head_removed; + cinfo->head_removed_location = cinfo->freed_space_start; + + /* item head is removed, therefore, item key changed */ + coord.node = node; + coord_set_item_pos(&coord, cinfo->head_removed); + coord.unit_pos = 0; + coord.between = AT_UNIT; + update_item_key_node40(&coord, &new_first_key, NULL); + + assert("vs-1579", cinfo->first_removed == 0); + /* key of first item of the node changes */ + retval = 1; + break; + + case CMODE_TAIL | CMODE_HEAD: + /* one item get cut from its end and its neighbor gets cut from its tail */ + impossible("vs-1576", "this can not happen currently"); + break; + + case CMODE_TAIL | CMODE_WHOLE | CMODE_HEAD: + impossible("vs-1577", "this can not happen currently"); + break; + default: + impossible("vs-1578", "unexpected cut mode"); + break; + } + } + return retval; +} + +/* plugin->u.node.kill + return value is number of items removed completely */ +int kill_node40(struct carry_kill_data *kdata, carry_plugin_info * info) +{ + znode *node; + struct cut40_info cinfo; + int first_key_changed; + + node = kdata->params.from->node; + + first_key_changed = + prepare_for_compact(&cinfo, &kdata->params, 0 /* not cut */ , kdata, + info); + compact(node, &cinfo); + + if (info) { + /* it is not called by node40_shift, so we have to take care + of changes on upper levels */ + if (node_is_empty(node) + && !(kdata->flags & DELETE_RETAIN_EMPTY)) + /* all contents of node is deleted */ + prepare_removal_node40(node, info); + else if (first_key_changed) { + prepare_for_update(NULL, node, info); + } + } + + coord_clear_iplug(kdata->params.from); + coord_clear_iplug(kdata->params.to); + + znode_make_dirty(node); + return cinfo.removed_count == MAX_POS_IN_NODE ? 0 : cinfo.removed_count; +} + +/* plugin->u.node.cut + return value is number of items removed completely */ +int cut_node40(struct carry_cut_data *cdata, carry_plugin_info * info) +{ + znode *node; + struct cut40_info cinfo; + int first_key_changed; + + node = cdata->params.from->node; + + first_key_changed = + prepare_for_compact(&cinfo, &cdata->params, 1 /* not cut */ , cdata, + info); + compact(node, &cinfo); + + if (info) { + /* it is not called by node40_shift, so we have to take care + of changes on upper levels */ + if (node_is_empty(node)) + /* all contents of node is deleted */ + prepare_removal_node40(node, info); + else if (first_key_changed) { + prepare_for_update(NULL, node, info); + } + } + + coord_clear_iplug(cdata->params.from); + coord_clear_iplug(cdata->params.to); + + znode_make_dirty(node); + return cinfo.removed_count == MAX_POS_IN_NODE ? 0 : cinfo.removed_count; +} + +/* this structure is used by shift method of node40 plugin */ +struct shift_params { + shift_direction pend; /* when @pend == append - we are shifting to + left, when @pend == prepend - to right */ + coord_t wish_stop; /* when shifting to left this is last unit we + want shifted, when shifting to right - this + is set to unit we want to start shifting + from */ + znode *target; + int everything; /* it is set to 1 if everything we have to shift is + shifted, 0 - otherwise */ + + /* FIXME-VS: get rid of read_stop */ + + /* these are set by estimate_shift */ + coord_t real_stop; /* this will be set to last unit which will be + really shifted */ + + /* coordinate in source node before operation of unit which becomes + first after shift to left of last after shift to right */ + union { + coord_t future_first; + coord_t future_last; + } u; + + unsigned merging_units; /* number of units of first item which have to + be merged with last item of target node */ + unsigned merging_bytes; /* number of bytes in those units */ + + unsigned entire; /* items shifted in their entirety */ + unsigned entire_bytes; /* number of bytes in those items */ + + unsigned part_units; /* number of units of partially copied item */ + unsigned part_bytes; /* number of bytes in those units */ + + unsigned shift_bytes; /* total number of bytes in items shifted (item + headers not included) */ + +}; + +static int item_creation_overhead(coord_t *item) +{ + return node_plugin_by_coord(item)->item_overhead(item->node, NULL); +} + +/* how many units are there in @source starting from source->unit_pos + but not further than @stop_coord */ +static int +wanted_units(coord_t *source, coord_t *stop_coord, shift_direction pend) +{ + if (pend == SHIFT_LEFT) { + assert("vs-181", source->unit_pos == 0); + } else { + assert("vs-182", + source->unit_pos == coord_last_unit_pos(source)); + } + + if (source->item_pos != stop_coord->item_pos) { + /* @source and @stop_coord are different items */ + return coord_last_unit_pos(source) + 1; + } + + if (pend == SHIFT_LEFT) { + return stop_coord->unit_pos + 1; + } else { + return source->unit_pos - stop_coord->unit_pos + 1; + } +} + +/* this calculates what can be copied from @shift->wish_stop.node to + @shift->target */ +static void +estimate_shift(struct shift_params *shift, const reiser4_context * ctx) +{ + unsigned target_free_space, size; + pos_in_node_t stop_item; /* item which estimating should not consider */ + unsigned want; /* number of units of item we want shifted */ + coord_t source; /* item being estimated */ + item_plugin *iplug; + + /* shifting to left/right starts from first/last units of + @shift->wish_stop.node */ + if (shift->pend == SHIFT_LEFT) { + coord_init_first_unit(&source, shift->wish_stop.node); + } else { + coord_init_last_unit(&source, shift->wish_stop.node); + } + shift->real_stop = source; + + /* free space in target node and number of items in source */ + target_free_space = znode_free_space(shift->target); + + shift->everything = 0; + if (!node_is_empty(shift->target)) { + /* target node is not empty, check for boundary items + mergeability */ + coord_t to; + + /* item we try to merge @source with */ + if (shift->pend == SHIFT_LEFT) { + coord_init_last_unit(&to, shift->target); + } else { + coord_init_first_unit(&to, shift->target); + } + + if ((shift->pend == SHIFT_LEFT) ? are_items_mergeable(&to, + &source) : + are_items_mergeable(&source, &to)) { + /* how many units of @source do we want to merge to + item @to */ + want = + wanted_units(&source, &shift->wish_stop, + shift->pend); + + /* how many units of @source we can merge to item + @to */ + iplug = item_plugin_by_coord(&source); + if (iplug->b.can_shift != NULL) + shift->merging_units = + iplug->b.can_shift(target_free_space, + &source, shift->target, + shift->pend, &size, + want); + else { + shift->merging_units = 0; + size = 0; + } + shift->merging_bytes = size; + shift->shift_bytes += size; + /* update stop coord to be set to last unit of @source + we can merge to @target */ + if (shift->merging_units) + /* at least one unit can be shifted */ + shift->real_stop.unit_pos = + (shift->merging_units - source.unit_pos - + 1) * shift->pend; + else { + /* nothing can be shifted */ + if (shift->pend == SHIFT_LEFT) + coord_init_before_first_item(&shift-> + real_stop, + source. + node); + else + coord_init_after_last_item(&shift-> + real_stop, + source.node); + } + assert("nikita-2081", shift->real_stop.unit_pos + 1); + + if (shift->merging_units != want) { + /* we could not copy as many as we want, so, + there is no reason for estimating any + longer */ + return; + } + + target_free_space -= size; + coord_add_item_pos(&source, shift->pend); + } + } + + /* number of item nothing of which we want to shift */ + stop_item = shift->wish_stop.item_pos + shift->pend; + + /* calculate how many items can be copied into given free + space as whole */ + for (; source.item_pos != stop_item; + coord_add_item_pos(&source, shift->pend)) { + if (shift->pend == SHIFT_RIGHT) + source.unit_pos = coord_last_unit_pos(&source); + + /* how many units of @source do we want to copy */ + want = wanted_units(&source, &shift->wish_stop, shift->pend); + + if (want == coord_last_unit_pos(&source) + 1) { + /* we want this item to be copied entirely */ + size = + item_length_by_coord(&source) + + item_creation_overhead(&source); + if (size <= target_free_space) { + /* item fits into target node as whole */ + target_free_space -= size; + shift->shift_bytes += + size - item_creation_overhead(&source); + shift->entire_bytes += + size - item_creation_overhead(&source); + shift->entire++; + + /* update shift->real_stop coord to be set to + last unit of @source we can merge to + @target */ + shift->real_stop = source; + if (shift->pend == SHIFT_LEFT) + shift->real_stop.unit_pos = + coord_last_unit_pos(&shift-> + real_stop); + else + shift->real_stop.unit_pos = 0; + continue; + } + } + + /* we reach here only for an item which does not fit into + target node in its entirety. This item may be either + partially shifted, or not shifted at all. We will have to + create new item in target node, so decrease amout of free + space by an item creation overhead. We can reach here also + if stop coord is in this item */ + if (target_free_space >= + (unsigned)item_creation_overhead(&source)) { + target_free_space -= item_creation_overhead(&source); + iplug = item_plugin_by_coord(&source); + if (iplug->b.can_shift) { + shift->part_units = iplug->b.can_shift(target_free_space, + &source, + NULL, /* target */ + shift->pend, + &size, + want); + } else { + target_free_space = 0; + shift->part_units = 0; + size = 0; + } + } else { + target_free_space = 0; + shift->part_units = 0; + size = 0; + } + shift->part_bytes = size; + shift->shift_bytes += size; + + /* set @shift->real_stop to last unit of @source we can merge + to @shift->target */ + if (shift->part_units) { + shift->real_stop = source; + shift->real_stop.unit_pos = + (shift->part_units - source.unit_pos - + 1) * shift->pend; + assert("nikita-2082", shift->real_stop.unit_pos + 1); + } + + if (want != shift->part_units) + /* not everything wanted were shifted */ + return; + break; + } + + shift->everything = 1; +} + +static void +copy_units(coord_t * target, coord_t * source, unsigned from, unsigned count, + shift_direction dir, unsigned free_space) +{ + item_plugin *iplug; + + assert("nikita-1463", target != NULL); + assert("nikita-1464", source != NULL); + assert("nikita-1465", from + count <= coord_num_units(source)); + + iplug = item_plugin_by_coord(source); + assert("nikita-1468", iplug == item_plugin_by_coord(target)); + iplug->b.copy_units(target, source, from, count, dir, free_space); + + if (dir == SHIFT_RIGHT) { + /* FIXME-VS: this looks not necessary. update_item_key was + called already by copy_units method */ + reiser4_key split_key; + + assert("nikita-1469", target->unit_pos == 0); + + unit_key_by_coord(target, &split_key); + node_plugin_by_coord(target)->update_item_key(target, + &split_key, NULL); + } +} + +/* copy part of @shift->real_stop.node starting either from its beginning or + from its end and ending at @shift->real_stop to either the end or the + beginning of @shift->target */ +static void copy(struct shift_params *shift, size_t node_header_size) +{ + node40_header *nh; + coord_t from; + coord_t to; + item_header40 *from_ih, *to_ih; + int free_space_start; + int new_items; + unsigned old_items; + int old_offset; + unsigned i; + + nh = node40_node_header(shift->target); + free_space_start = nh40_get_free_space_start(nh); + old_items = nh40_get_num_items(nh); + new_items = shift->entire + (shift->part_units ? 1 : 0); + assert("vs-185", + shift->shift_bytes == + shift->merging_bytes + shift->entire_bytes + shift->part_bytes); + + from = shift->wish_stop; + + coord_init_first_unit(&to, shift->target); + + /* NOTE:NIKITA->VS not sure what I am doing: shift->target is empty, + hence to.between is set to EMPTY_NODE above. Looks like we want it + to be AT_UNIT. + + Oh, wonders of ->betweeness... + + */ + to.between = AT_UNIT; + + if (shift->pend == SHIFT_LEFT) { + /* copying to left */ + + coord_set_item_pos(&from, 0); + from_ih = node40_ih_at(from.node, 0); + + coord_set_item_pos(&to, + node40_num_of_items_internal(to.node) - 1); + if (shift->merging_units) { + /* expand last item, so that plugin methods will see + correct data */ + free_space_start += shift->merging_bytes; + nh40_set_free_space_start(nh, + (unsigned)free_space_start); + nh40_set_free_space(nh, + nh40_get_free_space(nh) - + shift->merging_bytes); + + /* appending last item of @target */ + copy_units(&to, &from, 0, /* starting from 0-th unit */ + shift->merging_units, SHIFT_LEFT, + shift->merging_bytes); + coord_inc_item_pos(&from); + from_ih--; + coord_inc_item_pos(&to); + } + + to_ih = node40_ih_at(shift->target, old_items); + if (shift->entire) { + /* copy @entire items entirely */ + + /* copy item headers */ + memcpy(to_ih - shift->entire + 1, + from_ih - shift->entire + 1, + shift->entire * sizeof(item_header40)); + /* update item header offset */ + old_offset = ih40_get_offset(from_ih); + /* AUDIT: Looks like if we calculate old_offset + free_space_start here instead of just old_offset, we can perform one "add" operation less per each iteration */ + for (i = 0; i < shift->entire; i++, to_ih--, from_ih--) + ih40_set_offset(to_ih, + ih40_get_offset(from_ih) - + old_offset + free_space_start); + + /* copy item bodies */ + memcpy(zdata(shift->target) + free_space_start, zdata(from.node) + old_offset, /*ih40_get_offset (from_ih), */ + shift->entire_bytes); + + coord_add_item_pos(&from, (int)shift->entire); + coord_add_item_pos(&to, (int)shift->entire); + } + + nh40_set_free_space_start(nh, + free_space_start + + shift->shift_bytes - + shift->merging_bytes); + nh40_set_free_space(nh, + nh40_get_free_space(nh) - + (shift->shift_bytes - shift->merging_bytes + + sizeof(item_header40) * new_items)); + + /* update node header */ + node40_set_num_items(shift->target, nh, old_items + new_items); + assert("vs-170", + nh40_get_free_space(nh) < znode_size(shift->target)); + + if (shift->part_units) { + /* copy heading part (@part units) of @source item as + a new item into @target->node */ + + /* copy item header of partially copied item */ + coord_set_item_pos(&to, + node40_num_of_items_internal(to.node) + - 1); + memcpy(to_ih, from_ih, sizeof(item_header40)); + ih40_set_offset(to_ih, + nh40_get_free_space_start(nh) - + shift->part_bytes); + if (item_plugin_by_coord(&to)->b.init) + item_plugin_by_coord(&to)->b.init(&to, &from, + NULL); + copy_units(&to, &from, 0, shift->part_units, SHIFT_LEFT, + shift->part_bytes); + } + + } else { + /* copying to right */ + + coord_set_item_pos(&from, + node40_num_of_items_internal(from.node) - 1); + from_ih = node40_ih_at_coord(&from); + + coord_set_item_pos(&to, 0); + + /* prepare space for new items */ + memmove(zdata(to.node) + node_header_size + + shift->shift_bytes, + zdata(to.node) + node_header_size, + free_space_start - node_header_size); + /* update item headers of moved items */ + to_ih = node40_ih_at(to.node, 0); + /* first item gets @merging_bytes longer. free space appears + at its beginning */ + if (!node_is_empty(to.node)) + ih40_set_offset(to_ih, + ih40_get_offset(to_ih) + + shift->shift_bytes - + shift->merging_bytes); + + for (i = 1; i < old_items; i++) + ih40_set_offset(to_ih - i, + ih40_get_offset(to_ih - i) + + shift->shift_bytes); + + /* move item headers to make space for new items */ + memmove(to_ih - old_items + 1 - new_items, + to_ih - old_items + 1, + sizeof(item_header40) * old_items); + to_ih -= (new_items - 1); + + nh40_set_free_space_start(nh, + free_space_start + + shift->shift_bytes); + nh40_set_free_space(nh, + nh40_get_free_space(nh) - + (shift->shift_bytes + + sizeof(item_header40) * new_items)); + + /* update node header */ + node40_set_num_items(shift->target, nh, old_items + new_items); + assert("vs-170", + nh40_get_free_space(nh) < znode_size(shift->target)); + + if (shift->merging_units) { + coord_add_item_pos(&to, new_items); + to.unit_pos = 0; + to.between = AT_UNIT; + /* prepend first item of @to */ + copy_units(&to, &from, + coord_last_unit_pos(&from) - + shift->merging_units + 1, + shift->merging_units, SHIFT_RIGHT, + shift->merging_bytes); + coord_dec_item_pos(&from); + from_ih++; + } + + if (shift->entire) { + /* copy @entire items entirely */ + + /* copy item headers */ + memcpy(to_ih, from_ih, + shift->entire * sizeof(item_header40)); + + /* update item header offset */ + old_offset = + ih40_get_offset(from_ih + shift->entire - 1); + /* AUDIT: old_offset + sizeof (node40_header) + shift->part_bytes calculation can be taken off the loop. */ + for (i = 0; i < shift->entire; i++, to_ih++, from_ih++) + ih40_set_offset(to_ih, + ih40_get_offset(from_ih) - + old_offset + + node_header_size + + shift->part_bytes); + /* copy item bodies */ + coord_add_item_pos(&from, -(int)(shift->entire - 1)); + memcpy(zdata(to.node) + node_header_size + + shift->part_bytes, item_by_coord_node40(&from), + shift->entire_bytes); + coord_dec_item_pos(&from); + } + + if (shift->part_units) { + coord_set_item_pos(&to, 0); + to.unit_pos = 0; + to.between = AT_UNIT; + /* copy heading part (@part units) of @source item as + a new item into @target->node */ + + /* copy item header of partially copied item */ + memcpy(to_ih, from_ih, sizeof(item_header40)); + ih40_set_offset(to_ih, node_header_size); + if (item_plugin_by_coord(&to)->b.init) + item_plugin_by_coord(&to)->b.init(&to, &from, + NULL); + copy_units(&to, &from, + coord_last_unit_pos(&from) - + shift->part_units + 1, shift->part_units, + SHIFT_RIGHT, shift->part_bytes); + } + } +} + +/* remove everything either before or after @fact_stop. Number of items + removed completely is returned */ +static int delete_copied(struct shift_params *shift) +{ + coord_t from; + coord_t to; + struct carry_cut_data cdata; + + if (shift->pend == SHIFT_LEFT) { + /* we were shifting to left, remove everything from the + beginning of @shift->wish_stop->node upto + @shift->wish_stop */ + coord_init_first_unit(&from, shift->real_stop.node); + to = shift->real_stop; + + /* store old coordinate of unit which will be first after + shift to left */ + shift->u.future_first = to; + coord_next_unit(&shift->u.future_first); + } else { + /* we were shifting to right, remove everything from + @shift->stop_coord upto to end of + @shift->stop_coord->node */ + from = shift->real_stop; + coord_init_last_unit(&to, from.node); + + /* store old coordinate of unit which will be last after + shift to right */ + shift->u.future_last = from; + coord_prev_unit(&shift->u.future_last); + } + + cdata.params.from = &from; + cdata.params.to = &to; + cdata.params.from_key = NULL; + cdata.params.to_key = NULL; + cdata.params.smallest_removed = NULL; + return cut_node40(&cdata, NULL); +} + +/* something was moved between @left and @right. Add carry operation to @info + list to have carry to update delimiting key between them */ +static int +prepare_for_update(znode * left, znode * right, carry_plugin_info * info) +{ + carry_op *op; + carry_node *cn; + + if (info == NULL) + /* nowhere to send operation to. */ + return 0; + + if (!should_notify_parent(right)) + return 0; + + op = node_post_carry(info, COP_UPDATE, right, 1); + if (IS_ERR(op) || op == NULL) + return op ? PTR_ERR(op) : -EIO; + + if (left != NULL) { + carry_node *reference; + + if (info->doing) + reference = insert_carry_node(info->doing, + info->todo, left); + else + reference = op->node; + assert("nikita-2992", reference != NULL); + cn = reiser4_add_carry(info->todo, POOLO_BEFORE, reference); + if (IS_ERR(cn)) + return PTR_ERR(cn); + cn->parent = 1; + cn->node = left; + if (ZF_ISSET(left, JNODE_ORPHAN)) + cn->left_before = 1; + op->u.update.left = cn; + } else + op->u.update.left = NULL; + return 0; +} + +/* plugin->u.node.prepare_removal + to delete a pointer to @empty from the tree add corresponding carry + operation (delete) to @info list */ +int prepare_removal_node40(znode * empty, carry_plugin_info * info) +{ + carry_op *op; + reiser4_tree *tree; + + if (!should_notify_parent(empty)) + return 0; + /* already on a road to Styx */ + if (ZF_ISSET(empty, JNODE_HEARD_BANSHEE)) + return 0; + op = node_post_carry(info, COP_DELETE, empty, 1); + if (IS_ERR(op) || op == NULL) + return RETERR(op ? PTR_ERR(op) : -EIO); + + op->u.delete.child = NULL; + op->u.delete.flags = 0; + + /* fare thee well */ + tree = znode_get_tree(empty); + read_lock_tree(tree); + write_lock_dk(tree); + znode_set_ld_key(empty, znode_get_rd_key(empty)); + if (znode_is_left_connected(empty) && empty->left) + znode_set_rd_key(empty->left, znode_get_rd_key(empty)); + write_unlock_dk(tree); + read_unlock_tree(tree); + + ZF_SET(empty, JNODE_HEARD_BANSHEE); + return 0; +} + +/* something were shifted from @insert_coord->node to @shift->target, update + @insert_coord correspondingly */ +static void +adjust_coord(coord_t * insert_coord, struct shift_params *shift, int removed, + int including_insert_coord) +{ + /* item plugin was invalidated by shifting */ + coord_clear_iplug(insert_coord); + + if (node_is_empty(shift->wish_stop.node)) { + assert("vs-242", shift->everything); + if (including_insert_coord) { + if (shift->pend == SHIFT_RIGHT) { + /* set @insert_coord before first unit of + @shift->target node */ + coord_init_before_first_item(insert_coord, + shift->target); + } else { + /* set @insert_coord after last in target node */ + coord_init_after_last_item(insert_coord, + shift->target); + } + } else { + /* set @insert_coord inside of empty node. There is + only one possible coord within an empty + node. init_first_unit will set that coord */ + coord_init_first_unit(insert_coord, + shift->wish_stop.node); + } + return; + } + + if (shift->pend == SHIFT_RIGHT) { + /* there was shifting to right */ + if (shift->everything) { + /* everything wanted was shifted */ + if (including_insert_coord) { + /* @insert_coord is set before first unit of + @to node */ + coord_init_before_first_item(insert_coord, + shift->target); + insert_coord->between = BEFORE_UNIT; + } else { + /* @insert_coord is set after last unit of + @insert->node */ + coord_init_last_unit(insert_coord, + shift->wish_stop.node); + insert_coord->between = AFTER_UNIT; + } + } + return; + } + + /* there was shifting to left */ + if (shift->everything) { + /* everything wanted was shifted */ + if (including_insert_coord) { + /* @insert_coord is set after last unit in @to node */ + coord_init_after_last_item(insert_coord, shift->target); + } else { + /* @insert_coord is set before first unit in the same + node */ + coord_init_before_first_item(insert_coord, + shift->wish_stop.node); + } + return; + } + + /* FIXME-VS: the code below is complicated because with between == + AFTER_ITEM unit_pos is set to 0 */ + + if (!removed) { + /* no items were shifted entirely */ + assert("vs-195", shift->merging_units == 0 + || shift->part_units == 0); + + if (shift->real_stop.item_pos == insert_coord->item_pos) { + if (shift->merging_units) { + if (insert_coord->between == AFTER_UNIT) { + assert("nikita-1441", + insert_coord->unit_pos >= + shift->merging_units); + insert_coord->unit_pos -= + shift->merging_units; + } else if (insert_coord->between == BEFORE_UNIT) { + assert("nikita-2090", + insert_coord->unit_pos > + shift->merging_units); + insert_coord->unit_pos -= + shift->merging_units; + } + + assert("nikita-2083", + insert_coord->unit_pos + 1); + } else { + if (insert_coord->between == AFTER_UNIT) { + assert("nikita-1442", + insert_coord->unit_pos >= + shift->part_units); + insert_coord->unit_pos -= + shift->part_units; + } else if (insert_coord->between == BEFORE_UNIT) { + assert("nikita-2089", + insert_coord->unit_pos > + shift->part_units); + insert_coord->unit_pos -= + shift->part_units; + } + + assert("nikita-2084", + insert_coord->unit_pos + 1); + } + } + return; + } + + /* we shifted to left and there was no enough space for everything */ + switch (insert_coord->between) { + case AFTER_UNIT: + case BEFORE_UNIT: + if (shift->real_stop.item_pos == insert_coord->item_pos) + insert_coord->unit_pos -= shift->part_units; + case AFTER_ITEM: + coord_add_item_pos(insert_coord, -removed); + break; + default: + impossible("nikita-2087", "not ready"); + } + assert("nikita-2085", insert_coord->unit_pos + 1); +} + +static int call_shift_hooks(struct shift_params *shift) +{ + unsigned i, shifted; + coord_t coord; + item_plugin *iplug; + + assert("vs-275", !node_is_empty(shift->target)); + + /* number of items shift touches */ + shifted = + shift->entire + (shift->merging_units ? 1 : 0) + + (shift->part_units ? 1 : 0); + + if (shift->pend == SHIFT_LEFT) { + /* moved items are at the end */ + coord_init_last_unit(&coord, shift->target); + coord.unit_pos = 0; + + assert("vs-279", shift->pend == 1); + for (i = 0; i < shifted; i++) { + unsigned from, count; + + iplug = item_plugin_by_coord(&coord); + if (i == 0 && shift->part_units) { + assert("vs-277", + coord_num_units(&coord) == + shift->part_units); + count = shift->part_units; + from = 0; + } else if (i == shifted - 1 && shift->merging_units) { + count = shift->merging_units; + from = coord_num_units(&coord) - count; + } else { + count = coord_num_units(&coord); + from = 0; + } + + if (iplug->b.shift_hook) { + iplug->b.shift_hook(&coord, from, count, + shift->wish_stop.node); + } + coord_add_item_pos(&coord, -shift->pend); + } + } else { + /* moved items are at the beginning */ + coord_init_first_unit(&coord, shift->target); + + assert("vs-278", shift->pend == -1); + for (i = 0; i < shifted; i++) { + unsigned from, count; + + iplug = item_plugin_by_coord(&coord); + if (i == 0 && shift->part_units) { + assert("vs-277", + coord_num_units(&coord) == + shift->part_units); + count = coord_num_units(&coord); + from = 0; + } else if (i == shifted - 1 && shift->merging_units) { + count = shift->merging_units; + from = 0; + } else { + count = coord_num_units(&coord); + from = 0; + } + + if (iplug->b.shift_hook) { + iplug->b.shift_hook(&coord, from, count, + shift->wish_stop.node); + } + coord_add_item_pos(&coord, -shift->pend); + } + } + + return 0; +} + +/* shift to left is completed. Return 1 if unit @old was moved to left neighbor */ +static int +unit_moved_left(const struct shift_params *shift, const coord_t * old) +{ + assert("vs-944", shift->real_stop.node == old->node); + + if (shift->real_stop.item_pos < old->item_pos) + return 0; + if (shift->real_stop.item_pos == old->item_pos) { + if (shift->real_stop.unit_pos < old->unit_pos) + return 0; + } + return 1; +} + +/* shift to right is completed. Return 1 if unit @old was moved to right + neighbor */ +static int +unit_moved_right(const struct shift_params *shift, const coord_t * old) +{ + assert("vs-944", shift->real_stop.node == old->node); + + if (shift->real_stop.item_pos > old->item_pos) + return 0; + if (shift->real_stop.item_pos == old->item_pos) { + if (shift->real_stop.unit_pos > old->unit_pos) + return 0; + } + return 1; +} + +/* coord @old was set in node from which shift was performed. What was shifted + is stored in @shift. Update @old correspondingly to performed shift */ +static coord_t *adjust_coord2(const struct shift_params *shift, + const coord_t * old, coord_t * new) +{ + coord_clear_iplug(new); + new->between = old->between; + + coord_clear_iplug(new); + if (old->node == shift->target) { + if (shift->pend == SHIFT_LEFT) { + /* coord which is set inside of left neighbor does not + change during shift to left */ + coord_dup(new, old); + return new; + } + new->node = old->node; + coord_set_item_pos(new, + old->item_pos + shift->entire + + (shift->part_units ? 1 : 0)); + new->unit_pos = old->unit_pos; + if (old->item_pos == 0 && shift->merging_units) + new->unit_pos += shift->merging_units; + return new; + } + + assert("vs-977", old->node == shift->wish_stop.node); + if (shift->pend == SHIFT_LEFT) { + if (unit_moved_left(shift, old)) { + /* unit @old moved to left neighbor. Calculate its + coordinate there */ + new->node = shift->target; + coord_set_item_pos(new, + node_num_items(shift->target) - + shift->entire - + (shift->part_units ? 1 : 0) + + old->item_pos); + + new->unit_pos = old->unit_pos; + if (shift->merging_units) { + coord_dec_item_pos(new); + if (old->item_pos == 0) { + /* unit_pos only changes if item got + merged */ + new->unit_pos = + coord_num_units(new) - + (shift->merging_units - + old->unit_pos); + } + } + } else { + /* unit @old did not move to left neighbor. + + Use _nocheck, because @old is outside of its node. + */ + coord_dup_nocheck(new, old); + coord_add_item_pos(new, + -shift->u.future_first.item_pos); + if (new->item_pos == 0) + new->unit_pos -= shift->u.future_first.unit_pos; + } + } else { + if (unit_moved_right(shift, old)) { + /* unit @old moved to right neighbor */ + new->node = shift->target; + coord_set_item_pos(new, + old->item_pos - + shift->real_stop.item_pos); + if (new->item_pos == 0) { + /* unit @old might change unit pos */ + coord_set_item_pos(new, + old->unit_pos - + shift->real_stop.unit_pos); + } + } else { + /* unit @old did not move to right neighbor, therefore + it did not change */ + coord_dup(new, old); + } + } + coord_set_iplug(new, item_plugin_by_coord(new)); + return new; +} + +/* this is called when shift is completed (something of source node is copied + to target and deleted in source) to update all taps set in current + context */ +static void update_taps(const struct shift_params *shift) +{ + tap_t *tap; + coord_t new; + + for_all_taps(tap) { + /* update only taps set to nodes participating in shift */ + if (tap->coord->node == shift->wish_stop.node + || tap->coord->node == shift->target) + tap_to_coord(tap, + adjust_coord2(shift, tap->coord, &new)); + } +} + +#if REISER4_DEBUG + +struct shift_check { + reiser4_key key; + __u16 plugin_id; + union { + __u64 bytes; + __u64 entries; + void *unused; + } u; +}; + +void *shift_check_prepare(const znode * left, const znode * right) +{ + pos_in_node_t i, nr_items; + int mergeable; + struct shift_check *data; + item_header40 *ih; + + if (node_is_empty(left) || node_is_empty(right)) + mergeable = 0; + else { + coord_t l, r; + + coord_init_last_unit(&l, left); + coord_init_first_unit(&r, right); + mergeable = are_items_mergeable(&l, &r); + } + nr_items = + node40_num_of_items_internal(left) + + node40_num_of_items_internal(right) - (mergeable ? 1 : 0); + data = + kmalloc(sizeof(struct shift_check) * nr_items, + reiser4_ctx_gfp_mask_get()); + if (data != NULL) { + coord_t coord; + pos_in_node_t item_pos; + + coord_init_first_unit(&coord, left); + i = 0; + + for (item_pos = 0; + item_pos < node40_num_of_items_internal(left); + item_pos++) { + + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + data[i].key = ih->key; + data[i].plugin_id = le16_to_cpu(get_unaligned(&ih->plugin_id)); + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + data[i].u.bytes = coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + data[i].u.bytes = + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + data[i].u.entries = coord_num_units(&coord); + break; + default: + data[i].u.unused = NULL; + break; + } + i++; + } + + coord_init_first_unit(&coord, right); + + if (mergeable) { + assert("vs-1609", i != 0); + + ih = node40_ih_at_coord(&coord); + + assert("vs-1589", + data[i - 1].plugin_id == + le16_to_cpu(get_unaligned(&ih->plugin_id))); + switch (data[i - 1].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + data[i - 1].u.bytes += coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + data[i - 1].u.bytes += + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + data[i - 1].u.entries += + coord_num_units(&coord); + break; + default: + impossible("vs-1605", "wrong mergeable item"); + break; + } + item_pos = 1; + } else + item_pos = 0; + for (; item_pos < node40_num_of_items_internal(right); + item_pos++) { + + assert("vs-1604", i < nr_items); + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + data[i].key = ih->key; + data[i].plugin_id = le16_to_cpu(get_unaligned(&ih->plugin_id)); + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + data[i].u.bytes = coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + data[i].u.bytes = + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + data[i].u.entries = coord_num_units(&coord); + break; + default: + data[i].u.unused = NULL; + break; + } + i++; + } + assert("vs-1606", i == nr_items); + } + return data; +} + +void shift_check(void *vp, const znode * left, const znode * right) +{ + pos_in_node_t i, nr_items; + coord_t coord; + __u64 last_bytes; + int mergeable; + item_header40 *ih; + pos_in_node_t item_pos; + struct shift_check *data; + + data = (struct shift_check *)vp; + + if (data == NULL) + return; + + if (node_is_empty(left) || node_is_empty(right)) + mergeable = 0; + else { + coord_t l, r; + + coord_init_last_unit(&l, left); + coord_init_first_unit(&r, right); + mergeable = are_items_mergeable(&l, &r); + } + + nr_items = + node40_num_of_items_internal(left) + + node40_num_of_items_internal(right) - (mergeable ? 1 : 0); + + i = 0; + last_bytes = 0; + + coord_init_first_unit(&coord, left); + + for (item_pos = 0; item_pos < node40_num_of_items_internal(left); + item_pos++) { + + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + assert("vs-1611", i == item_pos); + assert("vs-1590", keyeq(&ih->key, &data[i].key)); + assert("vs-1591", + le16_to_cpu(get_unaligned(&ih->plugin_id)) == data[i].plugin_id); + if ((i < (node40_num_of_items_internal(left) - 1)) + || !mergeable) { + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + assert("vs-1592", + data[i].u.bytes == + coord_num_units(&coord)); + break; + case EXTENT_POINTER_ID: + assert("vs-1593", + data[i].u.bytes == + reiser4_extent_size(&coord, + coord_num_units + (&coord))); + break; + case COMPOUND_DIR_ID: + assert("vs-1594", + data[i].u.entries == + coord_num_units(&coord)); + break; + default: + break; + } + } + if (item_pos == (node40_num_of_items_internal(left) - 1) + && mergeable) { + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + last_bytes = coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + last_bytes = + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + last_bytes = coord_num_units(&coord); + break; + default: + impossible("vs-1595", "wrong mergeable item"); + break; + } + } + i++; + } + + coord_init_first_unit(&coord, right); + if (mergeable) { + ih = node40_ih_at_coord(&coord); + + assert("vs-1589", + data[i - 1].plugin_id == le16_to_cpu(get_unaligned(&ih->plugin_id))); + assert("vs-1608", last_bytes != 0); + switch (data[i - 1].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + assert("vs-1596", + data[i - 1].u.bytes == + last_bytes + coord_num_units(&coord)); + break; + + case EXTENT_POINTER_ID: + assert("vs-1597", + data[i - 1].u.bytes == + last_bytes + reiser4_extent_size(&coord, + coord_num_units + (&coord))); + break; + + case COMPOUND_DIR_ID: + assert("vs-1598", + data[i - 1].u.bytes == + last_bytes + coord_num_units(&coord)); + break; + default: + impossible("vs-1599", "wrong mergeable item"); + break; + } + item_pos = 1; + } else + item_pos = 0; + + for (; item_pos < node40_num_of_items_internal(right); item_pos++) { + + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + assert("vs-1612", keyeq(&ih->key, &data[i].key)); + assert("vs-1613", + le16_to_cpu(get_unaligned(&ih->plugin_id)) == data[i].plugin_id); + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + assert("vs-1600", + data[i].u.bytes == coord_num_units(&coord)); + break; + case EXTENT_POINTER_ID: + assert("vs-1601", + data[i].u.bytes == + reiser4_extent_size(&coord, + coord_num_units + (&coord))); + break; + case COMPOUND_DIR_ID: + assert("vs-1602", + data[i].u.entries == coord_num_units(&coord)); + break; + default: + break; + } + i++; + } + + assert("vs-1603", i == nr_items); + kfree(data); +} + +#endif + +/* + * common part of ->shift() for all nodes, + * which contain node40_header at the beginning and + * the table of item headers at the end + */ +int shift_node40_common(coord_t *from, znode *to, + shift_direction pend, + int delete_child, /* if @from->node becomes empty, + * it will be deleted from the + * tree if this is set to 1 */ + int including_stop_coord, + carry_plugin_info *info, + size_t node_header_size) +{ + struct shift_params shift; + int result; + znode *left, *right; + znode *source; + int target_empty; + + assert("nikita-2161", coord_check(from)); + + memset(&shift, 0, sizeof(shift)); + shift.pend = pend; + shift.wish_stop = *from; + shift.target = to; + + assert("nikita-1473", znode_is_write_locked(from->node)); + assert("nikita-1474", znode_is_write_locked(to)); + + source = from->node; + + /* set @shift.wish_stop to rightmost/leftmost unit among units we want + shifted */ + if (pend == SHIFT_LEFT) { + result = coord_set_to_left(&shift.wish_stop); + left = to; + right = from->node; + } else { + result = coord_set_to_right(&shift.wish_stop); + left = from->node; + right = to; + } + + if (result) { + /* move insertion coord even if there is nothing to move */ + if (including_stop_coord) { + /* move insertion coord (@from) */ + if (pend == SHIFT_LEFT) { + /* after last item in target node */ + coord_init_after_last_item(from, to); + } else { + /* before first item in target node */ + coord_init_before_first_item(from, to); + } + } + + if (delete_child && node_is_empty(shift.wish_stop.node)) + result = + prepare_removal_node40(shift.wish_stop.node, info); + else + result = 0; + /* there is nothing to shift */ + assert("nikita-2078", coord_check(from)); + return result; + } + + target_empty = node_is_empty(to); + + /* when first node plugin with item body compression is implemented, + this must be changed to call node specific plugin */ + + /* shift->stop_coord is updated to last unit which really will be + shifted */ + estimate_shift(&shift, get_current_context()); + if (!shift.shift_bytes) { + /* we could not shift anything */ + assert("nikita-2079", coord_check(from)); + return 0; + } + + copy(&shift, node_header_size); + + /* result value of this is important. It is used by adjust_coord below */ + result = delete_copied(&shift); + + assert("vs-1610", result >= 0); + assert("vs-1471", + ((reiser4_context *) current->journal_info)->magic == + context_magic); + + /* item which has been moved from one node to another might want to do + something on that event. This can be done by item's shift_hook + method, which will be now called for every moved items */ + call_shift_hooks(&shift); + + assert("vs-1472", + ((reiser4_context *) current->journal_info)->magic == + context_magic); + + update_taps(&shift); + + assert("vs-1473", + ((reiser4_context *) current->journal_info)->magic == + context_magic); + + /* adjust @from pointer in accordance with @including_stop_coord flag + and amount of data which was really shifted */ + adjust_coord(from, &shift, result, including_stop_coord); + + if (target_empty) + /* + * items were shifted into empty node. Update delimiting key. + */ + result = prepare_for_update(NULL, left, info); + + /* add update operation to @info, which is the list of operations to + be performed on a higher level */ + result = prepare_for_update(left, right, info); + if (!result && node_is_empty(source) && delete_child) { + /* all contents of @from->node is moved to @to and @from->node + has to be removed from the tree, so, on higher level we + will be removing the pointer to node @from->node */ + result = prepare_removal_node40(source, info); + } + assert("nikita-2080", coord_check(from)); + return result ? result : (int)shift.shift_bytes; +} + +/* + * plugin->u.node.shift + * look for description of this method in plugin/node/node.h + */ +int shift_node40(coord_t *from, znode *to, + shift_direction pend, + int delete_child, /* if @from->node becomes empty, + * it will be deleted from the + * tree if this is set to 1 */ + int including_stop_coord, + carry_plugin_info *info) +{ + return shift_node40_common(from, to, pend, delete_child, + including_stop_coord, info, + sizeof(node40_header)); +} + +/* plugin->u.node.fast_insert() + look for description of this method in plugin/node/node.h */ +int fast_insert_node40(const coord_t * coord UNUSED_ARG /* node to query */ ) +{ + return 1; +} + +/* plugin->u.node.fast_paste() + look for description of this method in plugin/node/node.h */ +int fast_paste_node40(const coord_t * coord UNUSED_ARG /* node to query */ ) +{ + return 1; +} + +/* plugin->u.node.fast_cut() + look for description of this method in plugin/node/node.h */ +int fast_cut_node40(const coord_t * coord UNUSED_ARG /* node to query */ ) +{ + return 1; +} + +/* plugin->u.node.modify - not defined */ + +/* plugin->u.node.max_item_size */ +int max_item_size_node40(void) +{ + return reiser4_get_current_sb()->s_blocksize - sizeof(node40_header) - + sizeof(item_header40); +} + +/* plugin->u.node.set_item_plugin */ +int set_item_plugin_node40(coord_t *coord, item_id id) +{ + item_header40 *ih; + + ih = node40_ih_at_coord(coord); + put_unaligned(cpu_to_le16(id), &ih->plugin_id); + coord->iplugid = id; + return 0; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node40.h b/fs/reiser4/plugin/node/node40.h new file mode 100644 index 000000000000..5a0864d9ad46 --- /dev/null +++ b/fs/reiser4/plugin/node/node40.h @@ -0,0 +1,130 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined( __REISER4_NODE40_H__ ) +#define __REISER4_NODE40_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "node.h" + +#include + +/* format of node header for 40 node layouts. Keep bloat out of this struct. */ +typedef struct node40_header { + /* identifier of node plugin. Must be located at the very beginning + of a node. */ + common_node_header common_header; /* this is 16 bits */ + /* number of items. Should be first element in the node header, + because we haven't yet finally decided whether it shouldn't go into + common_header. + */ +/* NIKITA-FIXME-HANS: Create a macro such that if there is only one + * node format at compile time, and it is this one, accesses do not function dereference when + * accessing these fields (and otherwise they do). Probably 80% of users will only have one node format at a time throughout the life of reiser4. */ + d16 nr_items; + /* free space in node measured in bytes */ + d16 free_space; + /* offset to start of free space in node */ + d16 free_space_start; + /* for reiser4_fsck. When information about what is a free + block is corrupted, and we try to recover everything even + if marked as freed, then old versions of data may + duplicate newer versions, and this field allows us to + restore the newer version. Also useful for when users + who don't have the new trashcan installed on their linux distro + delete the wrong files and send us desperate emails + offering $25 for them back. */ + + /* magic field we need to tell formatted nodes NIKITA-FIXME-HANS: improve this comment */ + d32 magic; + /* flushstamp is made of mk_id and write_counter. mk_id is an + id generated randomly at mkreiserfs time. So we can just + skip all nodes with different mk_id. write_counter is d64 + incrementing counter of writes on disk. It is used for + choosing the newest data at fsck time. NIKITA-FIXME-HANS: why was field name changed but not comment? */ + + d32 mkfs_id; + d64 flush_id; + /* node flags to be used by fsck (reiser4ck or reiser4fsck?) + and repacker NIKITA-FIXME-HANS: say more or reference elsewhere that says more */ + d16 flags; + + /* 1 is leaf level, 2 is twig level, root is the numerically + largest level */ + d8 level; + + d8 pad; +} PACKED node40_header; + +/* item headers are not standard across all node layouts, pass + pos_in_node to functions instead */ +typedef struct item_header40 { + /* key of item */ + /* 0 */ reiser4_key key; + /* offset from start of a node measured in 8-byte chunks */ + /* 24 */ d16 offset; + /* 26 */ d16 flags; + /* 28 */ d16 plugin_id; +} PACKED item_header40; + +size_t item_overhead_node40(const znode * node, flow_t * aflow); +size_t free_space_node40(znode * node); +node_search_result lookup_node40(znode * node, const reiser4_key * key, + lookup_bias bias, coord_t * coord); +int num_of_items_node40(const znode * node); +char *item_by_coord_node40(const coord_t * coord); +int length_by_coord_node40(const coord_t * coord); +item_plugin *plugin_by_coord_node40(const coord_t * coord); +reiser4_key *key_at_node40(const coord_t * coord, reiser4_key * key); +size_t estimate_node40(znode * node); +int check_node40(const znode * node, __u32 flags, const char **error); +int parse_node40_common(znode *node, const __u32 magic); +int parse_node40(znode * node); +int init_node40_common(znode *node, node_plugin *nplug, + size_t node_header_size, const __u32 magic); +int init_node40(znode *node); + +#ifdef GUESS_EXISTS +int guess_node40_common(const znode *node, reiser4_node_id id, + const __u32 magic); +int guess_node40(const znode *node); +#endif + +void change_item_size_node40(coord_t * coord, int by); +int create_item_node40(coord_t * target, const reiser4_key * key, + reiser4_item_data * data, carry_plugin_info * info); +void update_item_key_node40(coord_t * target, const reiser4_key * key, + carry_plugin_info * info); +int kill_node40(struct carry_kill_data *, carry_plugin_info *); +int cut_node40(struct carry_cut_data *, carry_plugin_info *); +int shift_node40_common(coord_t *from, znode *to, shift_direction pend, + int delete_child, int including_stop_coord, + carry_plugin_info *info, size_t nh_size); +int shift_node40(coord_t *from, znode *to, shift_direction pend, + int delete_child, int including_stop_coord, + carry_plugin_info *info); +int fast_insert_node40(const coord_t * coord); +int fast_paste_node40(const coord_t * coord); +int fast_cut_node40(const coord_t * coord); +int max_item_size_node40(void); +int prepare_removal_node40(znode * empty, carry_plugin_info * info); +int set_item_plugin_node40(coord_t * coord, item_id id); +int shrink_item_node40(coord_t * coord, int delta); + +#if REISER4_DEBUG +void *shift_check_prepare(const znode *left, const znode *right); +void shift_check(void *vp, const znode *left, const znode *right); +#endif + +/* __REISER4_NODE40_H__ */ +#endif +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node41.c b/fs/reiser4/plugin/node/node41.c new file mode 100644 index 000000000000..b5c2cb537dc9 --- /dev/null +++ b/fs/reiser4/plugin/node/node41.c @@ -0,0 +1,137 @@ +/* + * Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README + */ + +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "../item/item.h" +#include "node.h" +#include "node41.h" +#include "../plugin.h" +#include "../../jnode.h" +#include "../../znode.h" +#include "../../pool.h" +#include "../../carry.h" +#include "../../tap.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../checksum.h" +#include "../../reiser4.h" + +#include +#include +#include + +/* + * node41 layout it almost the same as node40: + * node41_header is at the beginning and a table of item headers + * is at the end. Ther difference is that node41_header contains + * a 32-bit checksum (see node41.h) + */ + +static const __u32 REISER4_NODE41_MAGIC = 0x19051966; + +static inline node41_header *node41_node_header(const znode *node) +{ + assert("edward-1634", node != NULL); + assert("edward-1635", znode_page(node) != NULL); + assert("edward-1636", zdata(node) != NULL); + + return (node41_header *)zdata(node); +} + +int csum_node41(znode *node, int check) +{ + __u32 cpu_csum; + + cpu_csum = reiser4_crc32c(get_current_super_private()->csum_tfm, + ~0, + zdata(node), + sizeof(struct node40_header)); + cpu_csum = reiser4_crc32c(get_current_super_private()->csum_tfm, + cpu_csum, + zdata(node) + sizeof(struct node41_header), + reiser4_get_current_sb()->s_blocksize - + sizeof(node41_header)); + if (check) + return cpu_csum == nh41_get_csum(node41_node_header(node)); + else { + nh41_set_csum(node41_node_header(node), cpu_csum); + return 1; + } +} + +/* + * plugin->u.node.parse + * look for description of this method in plugin/node/node.h + */ +int parse_node41(znode *node /* node to parse */) +{ + int ret; + + ret = csum_node41(node, 1/* check */); + if (!ret) { + warning("edward-1645", + "block %llu: bad checksum. FSCK?", + *jnode_get_block(ZJNODE(node))); + reiser4_handle_error(); + return RETERR(-EIO); + } + return parse_node40_common(node, REISER4_NODE41_MAGIC); +} + +/* + * plugin->u.node.init + * look for description of this method in plugin/node/node.h + */ +int init_node41(znode *node /* node to initialise */) +{ + return init_node40_common(node, node_plugin_by_id(NODE41_ID), + sizeof(node41_header), REISER4_NODE41_MAGIC); +} + +/* + * plugin->u.node.shift + * look for description of this method in plugin/node/node.h + */ +int shift_node41(coord_t *from, znode *to, + shift_direction pend, + int delete_child, /* if @from->node becomes empty, + * it will be deleted from the + * tree if this is set to 1 */ + int including_stop_coord, + carry_plugin_info *info) +{ + return shift_node40_common(from, to, pend, delete_child, + including_stop_coord, info, + sizeof(node41_header)); +} + +#ifdef GUESS_EXISTS +int guess_node41(const znode *node /* node to guess plugin of */) +{ + return guess_node40_common(node, NODE41_ID, REISER4_NODE41_MAGIC); +} +#endif + +/* + * plugin->u.node.max_item_size + */ +int max_item_size_node41(void) +{ + return reiser4_get_current_sb()->s_blocksize - sizeof(node41_header) - + sizeof(item_header40); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node41.h b/fs/reiser4/plugin/node/node41.h new file mode 100644 index 000000000000..dfe9a97485fb --- /dev/null +++ b/fs/reiser4/plugin/node/node41.h @@ -0,0 +1,50 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined( __REISER4_NODE41_H__ ) +#define __REISER4_NODE41_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "node40.h" +#include + +/* + * node41 layout: the same as node40, but with 32-bit checksum + */ + +typedef struct node41_header { + node40_header head; + d32 csum; +} PACKED node41_header; + +/* + * functions to get/set fields of node41_header + */ +#define nh41_get_csum(nh) le32_to_cpu(get_unaligned(&(nh)->csum)) +#define nh41_set_csum(nh, value) put_unaligned(cpu_to_le32(value), &(nh)->csum) + +int init_node41(znode * node); +int parse_node41(znode *node); +int max_item_size_node41(void); +int shift_node41(coord_t *from, znode *to, shift_direction pend, + int delete_child, int including_stop_coord, + carry_plugin_info *info); +int csum_node41(znode *node, int check); + +#ifdef GUESS_EXISTS +int guess_node41(const znode * node); +#endif +extern void reiser4_handle_error(void); + +/* __REISER4_NODE41_H__ */ +#endif +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/object.c b/fs/reiser4/plugin/object.c new file mode 100644 index 000000000000..c039455abb03 --- /dev/null +++ b/fs/reiser4/plugin/object.c @@ -0,0 +1,553 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * Examples of object plugins: file, directory, symlink, special file. + * + * Plugins associated with inode: + * + * Plugin of inode is plugin referenced by plugin-id field of on-disk + * stat-data. How we store this plugin in in-core inode is not + * important. Currently pointers are used, another variant is to store offsets + * and do array lookup on each access. + * + * Now, each inode has one selected plugin: object plugin that + * determines what type of file this object is: directory, regular etc. + * + * This main plugin can use other plugins that are thus subordinated to + * it. Directory instance of object plugin uses hash; regular file + * instance uses tail policy plugin. + * + * Object plugin is either taken from id in stat-data or guessed from + * i_mode bits. Once it is established we ask it to install its + * subordinate plugins, by looking again in stat-data or inheriting them + * from parent. + * + * How new inode is initialized during ->read_inode(): + * 1 read stat-data and initialize inode fields: i_size, i_mode, + * i_generation, capabilities etc. + * 2 read plugin id from stat data or try to guess plugin id + * from inode->i_mode bits if plugin id is missing. + * 3 Call ->init_inode() method of stat-data plugin to initialise inode fields. + * + * NIKITA-FIXME-HANS: can you say a little about 1 being done before 3? What + * if stat data does contain i_size, etc., due to it being an unusual plugin? + * + * 4 Call ->activate() method of object's plugin. Plugin is either read from + * from stat-data or guessed from mode bits + * 5 Call ->inherit() method of object plugin to inherit as yet un initialized + * plugins from parent. + * + * Easy induction proves that on last step all plugins of inode would be + * initialized. + * + * When creating new object: + * 1 obtain object plugin id (see next period) + * NIKITA-FIXME-HANS: period? + * 2 ->install() this plugin + * 3 ->inherit() the rest from the parent + * + * We need some examples of creating an object with default and non-default + * plugin ids. Nikita, please create them. + */ + +#include "../inode.h" + +int _bugop(void) +{ + BUG_ON(1); + return 0; +} + +#define bugop ((void *)_bugop) + +static int flow_by_inode_bugop(struct inode *inode, const char __user *buf, + int user, loff_t size, + loff_t off, rw_op op, flow_t *f) +{ + BUG_ON(1); + return 0; +} + +static int key_by_inode_bugop(struct inode *inode, loff_t off, reiser4_key *key) +{ + BUG_ON(1); + return 0; +} + +static int _dummyop(void) +{ + return 0; +} + +#define dummyop ((void *)_dummyop) + +static int change_file(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + /* cannot change object plugin of already existing object */ + if (memb == PSET_FILE) + return RETERR(-EINVAL); + + /* Change PSET_CREATE */ + return aset_set_unsafe(&reiser4_inode_data(inode)->pset, memb, plugin); +} + +static reiser4_plugin_ops file_plugin_ops = { + .change = change_file +}; + +static struct inode_operations null_i_ops = {.create = NULL}; +static struct file_operations null_f_ops = {.owner = NULL}; +static struct address_space_operations null_a_ops = {.writepage = NULL}; + +/* + * Reiser4 provides for VFS either dispatcher, or common (fop, + * iop, aop) method. + * + * Dispatchers (suffixed with "dispatch") pass management to + * proper plugin in accordance with plugin table (pset) located + * in the private part of inode. + * + * Common methods are NOT prefixed with "dispatch". They are + * the same for all plugins of FILE interface, and, hence, no + * dispatching is needed. + */ + +/* + * VFS methods for regular files + */ +static struct inode_operations regular_file_i_ops = { + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_dispatch, + .getattr = reiser4_getattr_common +}; +static struct file_operations regular_file_f_ops = { + .llseek = generic_file_llseek, + .read = reiser4_read_dispatch, + .write = reiser4_write_dispatch, + .read_iter = generic_file_read_iter, + .unlocked_ioctl = reiser4_ioctl_dispatch, +#ifdef CONFIG_COMPAT + .compat_ioctl = reiser4_ioctl_dispatch, +#endif + .mmap = reiser4_mmap_dispatch, + .open = reiser4_open_dispatch, + .release = reiser4_release_dispatch, + .fsync = reiser4_sync_file_common, + .splice_read = generic_file_splice_read, +}; +static struct address_space_operations regular_file_a_ops = { + .writepage = reiser4_writepage, + .readpage = reiser4_readpage_dispatch, + //.sync_page = block_sync_page, + .writepages = reiser4_writepages_dispatch, + .set_page_dirty = reiser4_set_page_dirty, + .readpages = reiser4_readpages_dispatch, + .write_begin = reiser4_write_begin_dispatch, + .write_end = reiser4_write_end_dispatch, + .bmap = reiser4_bmap_dispatch, + .invalidatepage = reiser4_invalidatepage, + .releasepage = reiser4_releasepage, + .migratepage = reiser4_migratepage +}; + +/* VFS methods for symlink files */ +static struct inode_operations symlink_file_i_ops = { + .get_link = reiser4_get_link_common, + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_common, + .getattr = reiser4_getattr_common +}; + +/* VFS methods for special files */ +static struct inode_operations special_file_i_ops = { + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_common, + .getattr = reiser4_getattr_common +}; + +/* VFS methods for directories */ +static struct inode_operations directory_i_ops = { + .create = reiser4_create_common, + .lookup = reiser4_lookup_common, + .link = reiser4_link_common, + .unlink = reiser4_unlink_common, + .symlink = reiser4_symlink_common, + .mkdir = reiser4_mkdir_common, + .rmdir = reiser4_unlink_common, + .mknod = reiser4_mknod_common, + .rename = reiser4_rename2_common, + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_common, + .getattr = reiser4_getattr_common +}; +static struct file_operations directory_f_ops = { + .llseek = reiser4_llseek_dir_common, + .read = generic_read_dir, + .iterate = reiser4_iterate_common, + .release = reiser4_release_dir_common, + .fsync = reiser4_sync_common +}; +static struct address_space_operations directory_a_ops = { + .writepages = dummyop, +}; + +/* + * Definitions of object plugins. + */ + +file_plugin file_plugins[LAST_FILE_PLUGIN_ID] = { + [UNIX_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = UNIX_FILE_PLUGIN_ID, + .groups = (1 << REISER4_REGULAR_FILE), + .pops = &file_plugin_ops, + .label = "reg", + .desc = "regular file", + .linkage = {NULL, NULL}, + }, + /* + * invariant vfs ops + */ + .inode_ops = ®ular_file_i_ops, + .file_ops = ®ular_file_f_ops, + .as_ops = ®ular_file_a_ops, + /* + * private i_ops + */ + .setattr = setattr_unix_file, + .open = open_unix_file, + .read = read_unix_file, + .write = write_unix_file, + .ioctl = ioctl_unix_file, + .mmap = mmap_unix_file, + .release = release_unix_file, + /* + * private f_ops + */ + .readpage = readpage_unix_file, + .readpages = readpages_unix_file, + .writepages = writepages_unix_file, + .write_begin = write_begin_unix_file, + .write_end = write_end_unix_file, + /* + * private a_ops + */ + .bmap = bmap_unix_file, + /* + * other private methods + */ + .write_sd_by_inode = write_sd_by_inode_common, + .flow_by_inode = flow_by_inode_unix_file, + .key_by_inode = key_by_inode_and_offset_common, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common, + .create_object = reiser4_create_object_common, + .delete_object = delete_object_unix_file, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .owns_item = owns_item_unix_file, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_data_unix_file, + .cut_tree_worker = cut_tree_worker_common, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + }, + [DIRECTORY_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = DIRECTORY_FILE_PLUGIN_ID, + .groups = (1 << REISER4_DIRECTORY_FILE), + .pops = &file_plugin_ops, + .label = "dir", + .desc = "directory", + .linkage = {NULL, NULL} + }, + .inode_ops = &null_i_ops, + .file_ops = &null_f_ops, + .as_ops = &null_a_ops, + + .write_sd_by_inode = write_sd_by_inode_common, + .flow_by_inode = flow_by_inode_bugop, + .key_by_inode = key_by_inode_bugop, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common_dir, + .create_object = reiser4_create_object_common, + .delete_object = reiser4_delete_dir_common, + .add_link = reiser4_add_link_common, + .rem_link = rem_link_common_dir, + .owns_item = owns_item_common_dir, + .can_add_link = can_add_link_common, + .can_rem_link = can_rem_link_common_dir, + .detach = reiser4_detach_common_dir, + .bind = reiser4_bind_common_dir, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common_dir, + .update = estimate_update_common, + .unlink = estimate_unlink_common_dir + }, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + }, + .init_inode_data = init_inode_ordering, + .cut_tree_worker = cut_tree_worker_common, + }, + [SYMLINK_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = SYMLINK_FILE_PLUGIN_ID, + .groups = (1 << REISER4_SYMLINK_FILE), + .pops = &file_plugin_ops, + .label = "symlink", + .desc = "symbolic link", + .linkage = {NULL,NULL} + }, + .inode_ops = &symlink_file_i_ops, + /* inode->i_fop of symlink is initialized + by NULL in setup_inode_ops */ + .file_ops = &null_f_ops, + .as_ops = &null_a_ops, + + .write_sd_by_inode = write_sd_by_inode_common, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common, + .create_object = reiser4_create_symlink, + .delete_object = reiser4_delete_object_common, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_ordering, + .cut_tree_worker = cut_tree_worker_common, + .destroy_inode = destroy_inode_symlink, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + }, + [SPECIAL_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = SPECIAL_FILE_PLUGIN_ID, + .groups = (1 << REISER4_SPECIAL_FILE), + .pops = &file_plugin_ops, + .label = "special", + .desc = + "special: fifo, device or socket", + .linkage = {NULL, NULL} + }, + .inode_ops = &special_file_i_ops, + /* file_ops of special files (sockets, block, char, fifo) are + initialized by init_special_inode. */ + .file_ops = &null_f_ops, + .as_ops = &null_a_ops, + + .write_sd_by_inode = write_sd_by_inode_common, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common, + .create_object = reiser4_create_object_common, + .delete_object = reiser4_delete_object_common, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .owns_item = owns_item_common, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_ordering, + .cut_tree_worker = cut_tree_worker_common, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + }, + [CRYPTCOMPRESS_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = CRYPTCOMPRESS_FILE_PLUGIN_ID, + .groups = (1 << REISER4_REGULAR_FILE), + .pops = &file_plugin_ops, + .label = "cryptcompress", + .desc = "cryptcompress file", + .linkage = {NULL, NULL} + }, + .inode_ops = ®ular_file_i_ops, + .file_ops = ®ular_file_f_ops, + .as_ops = ®ular_file_a_ops, + + .setattr = setattr_cryptcompress, + .open = open_cryptcompress, + .read = read_cryptcompress, + .write = write_cryptcompress, + .ioctl = ioctl_cryptcompress, + .mmap = mmap_cryptcompress, + .release = release_cryptcompress, + + .readpage = readpage_cryptcompress, + .readpages = readpages_cryptcompress, + .writepages = writepages_cryptcompress, + .write_begin = write_begin_cryptcompress, + .write_end = write_end_cryptcompress, + + .bmap = bmap_cryptcompress, + + .write_sd_by_inode = write_sd_by_inode_common, + .flow_by_inode = flow_by_inode_cryptcompress, + .key_by_inode = key_by_inode_cryptcompress, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_cryptcompress, + .create_object = create_object_cryptcompress, + .delete_object = delete_object_cryptcompress, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .owns_item = owns_item_common, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_data_cryptcompress, + .cut_tree_worker = cut_tree_worker_cryptcompress, + .destroy_inode = destroy_inode_cryptcompress, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + } +}; + +static int change_dir(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + /* cannot change dir plugin of already existing object */ + return RETERR(-EINVAL); +} + +static reiser4_plugin_ops dir_plugin_ops = { + .change = change_dir +}; + +/* + * definition of directory plugins + */ + +dir_plugin dir_plugins[LAST_DIR_ID] = { + /* standard hashed directory plugin */ + [HASHED_DIR_PLUGIN_ID] = { + .h = { + .type_id = REISER4_DIR_PLUGIN_TYPE, + .id = HASHED_DIR_PLUGIN_ID, + .pops = &dir_plugin_ops, + .label = "dir", + .desc = "hashed directory", + .linkage = {NULL, NULL} + }, + .inode_ops = &directory_i_ops, + .file_ops = &directory_f_ops, + .as_ops = &directory_a_ops, + + .get_parent = get_parent_common, + .is_name_acceptable = is_name_acceptable_common, + .build_entry_key = build_entry_key_hashed, + .build_readdir_key = build_readdir_key_common, + .add_entry = reiser4_add_entry_common, + .rem_entry = reiser4_rem_entry_common, + .init = reiser4_dir_init_common, + .done = reiser4_dir_done_common, + .attach = reiser4_attach_common, + .detach = reiser4_detach_common, + .estimate = { + .add_entry = estimate_add_entry_common, + .rem_entry = estimate_rem_entry_common, + .unlink = dir_estimate_unlink_common + } + }, + /* hashed directory for which seekdir/telldir are guaranteed to + * work. Brain-damage. */ + [SEEKABLE_HASHED_DIR_PLUGIN_ID] = { + .h = { + .type_id = REISER4_DIR_PLUGIN_TYPE, + .id = SEEKABLE_HASHED_DIR_PLUGIN_ID, + .pops = &dir_plugin_ops, + .label = "dir32", + .desc = "directory hashed with 31 bit hash", + .linkage = {NULL, NULL} + }, + .inode_ops = &directory_i_ops, + .file_ops = &directory_f_ops, + .as_ops = &directory_a_ops, + + .get_parent = get_parent_common, + .is_name_acceptable = is_name_acceptable_common, + .build_entry_key = build_entry_key_seekable, + .build_readdir_key = build_readdir_key_common, + .add_entry = reiser4_add_entry_common, + .rem_entry = reiser4_rem_entry_common, + .init = reiser4_dir_init_common, + .done = reiser4_dir_done_common, + .attach = reiser4_attach_common, + .detach = reiser4_detach_common, + .estimate = { + .add_entry = estimate_add_entry_common, + .rem_entry = estimate_rem_entry_common, + .unlink = dir_estimate_unlink_common + } + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/object.h b/fs/reiser4/plugin/object.h new file mode 100644 index 000000000000..60ca4b9a9a25 --- /dev/null +++ b/fs/reiser4/plugin/object.h @@ -0,0 +1,117 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declaration of object plugin functions. */ + +#if !defined(__FS_REISER4_PLUGIN_OBJECT_H__) +#define __FS_REISER4_PLUGIN_OBJECT_H__ + +#include "../type_safe_hash.h" + +/* common implementations of inode operations */ +int reiser4_create_common(struct inode *parent, struct dentry *dentry, + umode_t mode, bool); +struct dentry *reiser4_lookup_common(struct inode *parent, + struct dentry *dentry, + unsigned int); +int reiser4_link_common(struct dentry *existing, struct inode *parent, + struct dentry *newname); +int reiser4_unlink_common(struct inode *parent, struct dentry *victim); +int reiser4_mkdir_common(struct inode *parent, struct dentry *dentry, umode_t mode); +int reiser4_symlink_common(struct inode *parent, struct dentry *dentry, + const char *linkname); +int reiser4_mknod_common(struct inode *parent, struct dentry *dentry, + umode_t mode, dev_t rdev); +int reiser4_rename2_common(struct inode *old_dir, struct dentry *old_name, + struct inode *new_dir, struct dentry *new_name, + unsigned flags); +const char *reiser4_get_link_common(struct dentry *, struct inode *inode, + struct delayed_call *done); +int reiser4_permission_common(struct inode *, int mask); +int reiser4_setattr_common(struct dentry *, struct iattr *); +int reiser4_getattr_common(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); + +/* common implementations of file operations */ +loff_t reiser4_llseek_dir_common(struct file *, loff_t off, int origin); +int reiser4_iterate_common(struct file *, struct dir_context *context); +int reiser4_release_dir_common(struct inode *, struct file *); +int reiser4_sync_common(struct file *, loff_t, loff_t, int datasync); + +/* file plugin operations: common implementations */ +int write_sd_by_inode_common(struct inode *); +int key_by_inode_and_offset_common(struct inode *, loff_t, reiser4_key *); +int set_plug_in_inode_common(struct inode *object, struct inode *parent, + reiser4_object_create_data *); +int adjust_to_parent_common(struct inode *object, struct inode *parent, + struct inode *root); +int adjust_to_parent_common_dir(struct inode *object, struct inode *parent, + struct inode *root); +int adjust_to_parent_cryptcompress(struct inode *object, struct inode *parent, + struct inode *root); +int reiser4_create_object_common(struct inode *object, struct inode *parent, + reiser4_object_create_data *); +int reiser4_delete_object_common(struct inode *); +int reiser4_delete_dir_common(struct inode *); +int reiser4_add_link_common(struct inode *object, struct inode *parent); +int reiser4_rem_link_common(struct inode *object, struct inode *parent); +int rem_link_common_dir(struct inode *object, struct inode *parent); +int owns_item_common(const struct inode *, const coord_t *); +int owns_item_common_dir(const struct inode *, const coord_t *); +int can_add_link_common(const struct inode *); +int can_rem_link_common_dir(const struct inode *); +int reiser4_detach_common_dir(struct inode *child, struct inode *parent); +int reiser4_bind_common_dir(struct inode *child, struct inode *parent); +int safelink_common(struct inode *, reiser4_safe_link_t, __u64 value); +reiser4_block_nr estimate_create_common(const struct inode *); +reiser4_block_nr estimate_create_common_dir(const struct inode *); +reiser4_block_nr estimate_update_common(const struct inode *); +reiser4_block_nr estimate_unlink_common(const struct inode *, + const struct inode *); +reiser4_block_nr estimate_unlink_common_dir(const struct inode *, + const struct inode *); +char *wire_write_common(struct inode *, char *start); +char *wire_read_common(char *addr, reiser4_object_on_wire *); +struct dentry *wire_get_common(struct super_block *, reiser4_object_on_wire *); +int wire_size_common(struct inode *); +void wire_done_common(reiser4_object_on_wire *); + +/* dir plugin operations: common implementations */ +struct dentry *get_parent_common(struct inode *child); +int is_name_acceptable_common(const struct inode *, const char *name, int len); +void build_entry_key_common(const struct inode *, + const struct qstr *qname, reiser4_key *); +int build_readdir_key_common(struct file *dir, reiser4_key *); +int reiser4_add_entry_common(struct inode *object, struct dentry *where, + reiser4_object_create_data * , reiser4_dir_entry_desc *); +int reiser4_rem_entry_common(struct inode *object, struct dentry *where, + reiser4_dir_entry_desc *); +int reiser4_dir_init_common(struct inode *object, struct inode *parent, + reiser4_object_create_data *); +int reiser4_dir_done_common(struct inode *); +int reiser4_attach_common(struct inode *child, struct inode *parent); +int reiser4_detach_common(struct inode *object, struct inode *parent); +reiser4_block_nr estimate_add_entry_common(const struct inode *); +reiser4_block_nr estimate_rem_entry_common(const struct inode *); +reiser4_block_nr dir_estimate_unlink_common(const struct inode *, + const struct inode *); + +/* these are essential parts of common implementations, they are to make + customized implementations easier */ + +/* merely useful functions */ +int lookup_sd(struct inode *, znode_lock_mode, coord_t *, lock_handle * , + const reiser4_key * , int silent); + +/* __FS_REISER4_PLUGIN_OBJECT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/plugin.c b/fs/reiser4/plugin/plugin.c new file mode 100644 index 000000000000..4af0b88ac88b --- /dev/null +++ b/fs/reiser4/plugin/plugin.c @@ -0,0 +1,569 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Basic plugin infrastructure, lookup etc. */ + +/* PLUGINS: + + Plugins are internal Reiser4 "modules" or "objects" used to increase + extensibility and allow external users to easily adapt reiser4 to + their needs. + + Plugins are classified into several disjoint "types". Plugins + belonging to the particular plugin type are termed "instances" of + this type. Existing types are listed by enum reiser4_plugin_type + (see plugin/plugin_header.h) + +NIKITA-FIXME-HANS: update this list, and review this entire comment for currency + + Object (file) plugin determines how given file-system object serves + standard VFS requests for read, write, seek, mmap etc. Instances of + file plugins are: regular file, directory, symlink. Another example + of file plugin is audit plugin, that optionally records accesses to + underlying object and forwards requests to it. + + Hash plugins compute hashes used by reiser4 to store and locate + files within directories. Instances of hash plugin type are: r5, + tea, rupasov. + + Tail plugins (or, more precisely, tail policy plugins) determine + when last part of the file should be stored in a formatted item. + + Scope and lookup: + + label such that pair ( type_label, plugin_label ) is unique. This + pair is a globally persistent and user-visible plugin + identifier. Internally kernel maintains plugins and plugin types in + arrays using an index into those arrays as plugin and plugin type + identifiers. File-system in turn, also maintains persistent + "dictionary" which is mapping from plugin label to numerical + identifier which is stored in file-system objects. That is, we + store the offset into the plugin array for that plugin type as the + plugin id in the stat data of the filesystem object. + + Internal kernel plugin type identifier (index in plugins[] array) is + of type reiser4_plugin_type. Set of available plugin types is + currently static, but dynamic loading doesn't seem to pose + insurmountable problems. + + Within each type plugins are addressed by the identifiers of type + reiser4_plugin_id (indices in reiser4_plugin_type_data.builtin[]). + Such identifiers are only required to be unique within one type, + not globally. + + Thus, plugin in memory is uniquely identified by the pair (type_id, + id). + + Usage: + + There exists only one instance of each plugin instance, but this + single instance can be associated with many entities (file-system + objects, items, nodes, transactions, file-descriptors etc.). Entity + to which plugin of given type is termed (due to the lack of + imagination) "subject" of this plugin type and, by abuse of + terminology, subject of particular instance of this type to which + it's attached currently. For example, inode is subject of object + plugin type. Inode representing directory is subject of directory + plugin, hash plugin type and some particular instance of hash plugin + type. Inode, representing regular file is subject of "regular file" + plugin, tail-policy plugin type etc. + + With each subject the plugin possibly stores some state. For example, + the state of a directory plugin (instance of object plugin type) is pointer + to hash plugin (if directories always use hashing that is). + + Interface: + + In addition to a scalar identifier, each plugin type and plugin + proper has a "label": short string and a "description"---longer + descriptive string. Labels and descriptions of plugin types are + hard-coded into plugins[] array, declared and defined in + plugin.c. Label and description of plugin are stored in .label and + .desc fields of reiser4_plugin_header respectively. It's possible to + locate plugin by the pair of labels. + + Features (not implemented): + + . user-level plugin manipulations: + + reiser4("filename/..file_plugin<='audit'"); + + write(open("filename/..file_plugin"), "audit", 8); + + . user level utilities lsplug and chplug to manipulate plugins. + Utilities are not of primary priority. Possibly they will be not + working on v4.0 + + NIKITA-FIXME-HANS: this should be a mkreiserfs option not a mount + option, do you agree? I don't think that specifying it at mount time, + and then changing it with each mount, is a good model for usage. + + . mount option "plug" to set-up plugins of root-directory. + "plug=foo:bar" will set "bar" as default plugin of type "foo". + + Limitations: + + . each plugin type has to provide at least one builtin + plugin. This is technical limitation and it can be lifted in the + future. + + TODO: + + New plugin types/plugings: + Things we should be able to separately choose to inherit: + + security plugins + + stat data + + file bodies + + file plugins + + dir plugins + + . perm:acl + + . audi---audit plugin intercepting and possibly logging all + accesses to object. Requires to put stub functions in file_operations + in stead of generic_file_*. + +NIKITA-FIXME-HANS: why make overflows a plugin? + . over---handle hash overflows + + . sqnt---handle different access patterns and instruments read-ahead + +NIKITA-FIXME-HANS: describe the line below in more detail. + + . hier---handle inheritance of plugins along file-system hierarchy + + Different kinds of inheritance: on creation vs. on access. + Compatible/incompatible plugins. + Inheritance for multi-linked files. + Layered plugins. + Notion of plugin context is abandoned. + +Each file is associated + with one plugin and dependant plugins (hash, etc.) are stored as + main plugin state. Now, if we have plugins used for regular files + but not for directories, how such plugins would be inherited? + . always store them with directories also + +NIKTIA-FIXME-HANS: Do the line above. It is not exclusive of doing +the line below which is also useful. + + . use inheritance hierarchy, independent of file-system namespace +*/ + +#include "../debug.h" +#include "../dformat.h" +#include "plugin_header.h" +#include "item/static_stat.h" +#include "node/node.h" +#include "security/perm.h" +#include "space/space_allocator.h" +#include "disk_format/disk_format.h" +#include "plugin.h" +#include "../reiser4.h" +#include "../jnode.h" +#include "../inode.h" + +#include /* for struct super_block */ + +/* + * init_plugins - initialize plugin sub-system. + * Just call this once on reiser4 startup. + * + * Initializes plugin sub-system. It is part of reiser4 module + * initialization. For each plugin of each type init method is called and each + * plugin is put into list of plugins. + */ +int init_plugins(void) +{ + reiser4_plugin_type type_id; + + for (type_id = 0; type_id < REISER4_PLUGIN_TYPES; ++type_id) { + struct reiser4_plugin_type_data *ptype; + int i; + + ptype = &plugins[type_id]; + assert("nikita-3508", ptype->label != NULL); + assert("nikita-3509", ptype->type_id == type_id); + + INIT_LIST_HEAD(&ptype->plugins_list); +/* NIKITA-FIXME-HANS: change builtin_num to some other name lacking the term + * builtin. */ + for (i = 0; i < ptype->builtin_num; ++i) { + reiser4_plugin *plugin; + + plugin = plugin_at(ptype, i); + + if (plugin->h.label == NULL) + /* uninitialized slot encountered */ + continue; + assert("nikita-3445", plugin->h.type_id == type_id); + plugin->h.id = i; + if (plugin->h.pops != NULL && + plugin->h.pops->init != NULL) { + int result; + + result = plugin->h.pops->init(plugin); + if (result != 0) + return result; + } + INIT_LIST_HEAD(&plugin->h.linkage); + list_add_tail(&plugin->h.linkage, &ptype->plugins_list); + } + } + return 0; +} + +/* true if plugin type id is valid */ +int is_plugin_type_valid(reiser4_plugin_type type) +{ + /* "type" is unsigned, so no comparison with 0 is + necessary */ + return (type < REISER4_PLUGIN_TYPES); +} + +/* true if plugin id is valid */ +int is_plugin_id_valid(reiser4_plugin_type type, reiser4_plugin_id id) +{ + assert("nikita-1653", is_plugin_type_valid(type)); + return id < plugins[type].builtin_num; +} + +/* return plugin by its @type and @id. + + Both arguments are checked for validness: this is supposed to be called + from user-level. + +NIKITA-FIXME-HANS: Do you instead mean that this checks ids created in +user space, and passed to the filesystem by use of method files? Your +comment really confused me on the first reading.... + +*/ +reiser4_plugin *plugin_by_unsafe_id(reiser4_plugin_type type /* plugin type + * unchecked */, + reiser4_plugin_id id /* plugin id, + * unchecked */) +{ + if (is_plugin_type_valid(type)) { + if (is_plugin_id_valid(type, id)) + return plugin_at(&plugins[type], id); + else + /* id out of bounds */ + warning("nikita-2913", + "Invalid plugin id: [%i:%i]", type, id); + } else + /* type_id out of bounds */ + warning("nikita-2914", "Invalid type_id: %i", type); + return NULL; +} + +/** + * save_plugin_id - store plugin id in disk format + * @plugin: plugin to convert + * @area: where to store result + * + * Puts id of @plugin in little endian format to address @area. + */ +int save_plugin_id(reiser4_plugin *plugin /* plugin to convert */ , + d16 * area/* where to store result */) +{ + assert("nikita-1261", plugin != NULL); + assert("nikita-1262", area != NULL); + + put_unaligned(cpu_to_le16(plugin->h.id), area); + return 0; +} + +/* list of all plugins of given type */ +struct list_head *get_plugin_list(reiser4_plugin_type type) +{ + assert("nikita-1056", is_plugin_type_valid(type)); + return &plugins[type].plugins_list; +} + +static void update_pset_mask(reiser4_inode * info, pset_member memb) +{ + struct dentry *rootdir; + reiser4_inode *root; + + assert("edward-1443", memb != PSET_FILE); + + rootdir = inode_by_reiser4_inode(info)->i_sb->s_root; + if (rootdir != NULL) { + root = reiser4_inode_data(rootdir->d_inode); + /* + * if inode is different from the default one, or we are + * changing plugin of root directory, update plugin_mask + */ + if (aset_get(info->pset, memb) != + aset_get(root->pset, memb) || + info == root) + info->plugin_mask |= (1 << memb); + else + info->plugin_mask &= ~(1 << memb); + } +} + +/* Get specified plugin set member from parent, + or from fs-defaults (if no parent is given) and + install the result to pset of @self */ +int grab_plugin_pset(struct inode *self, + struct inode *ancestor, + pset_member memb) +{ + reiser4_plugin *plug; + reiser4_inode *info; + int result = 0; + + /* Do not grab if initialised already. */ + info = reiser4_inode_data(self); + if (aset_get(info->pset, memb) != NULL) + return 0; + if (ancestor) { + reiser4_inode *parent; + + parent = reiser4_inode_data(ancestor); + plug = aset_get(parent->hset, memb) ? : + aset_get(parent->pset, memb); + } else + plug = get_default_plugin(memb); + + result = set_plugin(&info->pset, memb, plug); + if (result == 0) { + if (!ancestor || self->i_sb->s_root->d_inode != self) + update_pset_mask(info, memb); + } + return result; +} + +/* Take missing pset members from root inode */ +int finish_pset(struct inode *inode) +{ + reiser4_plugin *plug; + reiser4_inode *root; + reiser4_inode *info; + pset_member memb; + int result = 0; + + root = reiser4_inode_data(inode->i_sb->s_root->d_inode); + info = reiser4_inode_data(inode); + + assert("edward-1455", root != NULL); + assert("edward-1456", info != NULL); + + /* file and directory plugins are already initialized. */ + for (memb = PSET_DIR + 1; memb < PSET_LAST; ++memb) { + + /* Do not grab if initialised already. */ + if (aset_get(info->pset, memb) != NULL) + continue; + + plug = aset_get(root->pset, memb); + result = set_plugin(&info->pset, memb, plug); + if (result != 0) + break; + } + if (result != 0) { + warning("nikita-3447", + "Cannot set up plugins for %lli", + (unsigned long long) + get_inode_oid(inode)); + } + return result; +} + +int force_plugin_pset(struct inode *self, pset_member memb, + reiser4_plugin * plug) +{ + reiser4_inode *info; + int result = 0; + + if (!self->i_sb->s_root || self->i_sb->s_root->d_inode == self) { + /* Changing pset in the root object. */ + return RETERR(-EINVAL); + } + + info = reiser4_inode_data(self); + if (plug->h.pops != NULL && plug->h.pops->change != NULL) + result = plug->h.pops->change(self, plug, memb); + else + result = aset_set_unsafe(&info->pset, memb, plug); + if (result == 0) { + __u16 oldmask = info->plugin_mask; + + update_pset_mask(info, memb); + if (oldmask != info->plugin_mask) + reiser4_inode_clr_flag(self, REISER4_SDLEN_KNOWN); + } + return result; +} + +struct reiser4_plugin_type_data plugins[REISER4_PLUGIN_TYPES] = { + /* C90 initializers */ + [REISER4_FILE_PLUGIN_TYPE] = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .label = "file", + .desc = "Object plugins", + .builtin_num = sizeof_array(file_plugins), + .builtin = file_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(file_plugin) + }, + [REISER4_DIR_PLUGIN_TYPE] = { + .type_id = REISER4_DIR_PLUGIN_TYPE, + .label = "dir", + .desc = "Directory plugins", + .builtin_num = sizeof_array(dir_plugins), + .builtin = dir_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(dir_plugin) + }, + [REISER4_HASH_PLUGIN_TYPE] = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .label = "hash", + .desc = "Directory hashes", + .builtin_num = sizeof_array(hash_plugins), + .builtin = hash_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(hash_plugin) + }, + [REISER4_FIBRATION_PLUGIN_TYPE] = { + .type_id = + REISER4_FIBRATION_PLUGIN_TYPE, + .label = "fibration", + .desc = "Directory fibrations", + .builtin_num = sizeof_array(fibration_plugins), + .builtin = fibration_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(fibration_plugin) + }, + [REISER4_CIPHER_PLUGIN_TYPE] = { + .type_id = REISER4_CIPHER_PLUGIN_TYPE, + .label = "cipher", + .desc = "Cipher plugins", + .builtin_num = sizeof_array(cipher_plugins), + .builtin = cipher_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(cipher_plugin) + }, + [REISER4_DIGEST_PLUGIN_TYPE] = { + .type_id = REISER4_DIGEST_PLUGIN_TYPE, + .label = "digest", + .desc = "Digest plugins", + .builtin_num = sizeof_array(digest_plugins), + .builtin = digest_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(digest_plugin) + }, + [REISER4_COMPRESSION_PLUGIN_TYPE] = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .label = "compression", + .desc = "Compression plugins", + .builtin_num = sizeof_array(compression_plugins), + .builtin = compression_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(compression_plugin) + }, + [REISER4_FORMATTING_PLUGIN_TYPE] = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .label = "formatting", + .desc = "Tail inlining policies", + .builtin_num = sizeof_array(formatting_plugins), + .builtin = formatting_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(formatting_plugin) + }, + [REISER4_PERM_PLUGIN_TYPE] = { + .type_id = REISER4_PERM_PLUGIN_TYPE, + .label = "perm", + .desc = "Permission checks", + .builtin_num = sizeof_array(perm_plugins), + .builtin = perm_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(perm_plugin) + }, + [REISER4_ITEM_PLUGIN_TYPE] = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .label = "item", + .desc = "Item handlers", + .builtin_num = sizeof_array(item_plugins), + .builtin = item_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(item_plugin) + }, + [REISER4_NODE_PLUGIN_TYPE] = { + .type_id = REISER4_NODE_PLUGIN_TYPE, + .label = "node", + .desc = "node layout handlers", + .builtin_num = sizeof_array(node_plugins), + .builtin = node_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(node_plugin) + }, + [REISER4_SD_EXT_PLUGIN_TYPE] = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .label = "sd_ext", + .desc = "Parts of stat-data", + .builtin_num = sizeof_array(sd_ext_plugins), + .builtin = sd_ext_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(sd_ext_plugin) + }, + [REISER4_FORMAT_PLUGIN_TYPE] = { + .type_id = REISER4_FORMAT_PLUGIN_TYPE, + .label = "disk_layout", + .desc = "defines filesystem on disk layout", + .builtin_num = sizeof_array(format_plugins), + .builtin = format_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(disk_format_plugin) + }, + [REISER4_JNODE_PLUGIN_TYPE] = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .label = "jnode", + .desc = "defines kind of jnode", + .builtin_num = sizeof_array(jnode_plugins), + .builtin = jnode_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(jnode_plugin) + }, + [REISER4_COMPRESSION_MODE_PLUGIN_TYPE] = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .label = "compression_mode", + .desc = "Defines compression mode", + .builtin_num = sizeof_array(compression_mode_plugins), + .builtin = compression_mode_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(compression_mode_plugin) + }, + [REISER4_CLUSTER_PLUGIN_TYPE] = { + .type_id = REISER4_CLUSTER_PLUGIN_TYPE, + .label = "cluster", + .desc = "Defines cluster size", + .builtin_num = sizeof_array(cluster_plugins), + .builtin = cluster_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(cluster_plugin) + }, + [REISER4_TXMOD_PLUGIN_TYPE] = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .label = "txmod", + .desc = "Defines transaction model", + .builtin_num = sizeof_array(txmod_plugins), + .builtin = txmod_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(txmod_plugin) + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/plugin/plugin.h b/fs/reiser4/plugin/plugin.h new file mode 100644 index 000000000000..c7d75d50de4c --- /dev/null +++ b/fs/reiser4/plugin/plugin.h @@ -0,0 +1,999 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Basic plugin data-types. + see fs/reiser4/plugin/plugin.c for details */ + +#if !defined(__FS_REISER4_PLUGIN_TYPES_H__) +#define __FS_REISER4_PLUGIN_TYPES_H__ + +#include "../forward.h" +#include "../debug.h" +#include "../dformat.h" +#include "../key.h" +#include "compress/compress.h" +#include "crypto/cipher.h" +#include "plugin_header.h" +#include "item/static_stat.h" +#include "item/internal.h" +#include "item/sde.h" +#include "item/cde.h" +#include "item/item.h" +#include "node/node.h" +#include "node/node41.h" +#include "security/perm.h" +#include "fibration.h" + +#include "space/bitmap.h" +#include "space/space_allocator.h" + +#include "disk_format/disk_format40.h" +#include "disk_format/disk_format.h" + +#include /* for struct super_block, address_space */ +#include /* for struct page */ +#include /* for struct buffer_head */ +#include /* for struct dentry */ +#include +#include + +typedef struct reiser4_object_on_wire reiser4_object_on_wire; + +/* + * File plugin. Defines the set of methods that file plugins implement, some + * of which are optional. + * + * A file plugin offers to the caller an interface for IO ( writing to and/or + * reading from) to what the caller sees as one sequence of bytes. An IO to it + * may affect more than one physical sequence of bytes, or no physical sequence + * of bytes, it may affect sequences of bytes offered by other file plugins to + * the semantic layer, and the file plugin may invoke other plugins and + * delegate work to them, but its interface is structured for offering the + * caller the ability to read and/or write what the caller sees as being a + * single sequence of bytes. + * + * The file plugin must present a sequence of bytes to the caller, but it does + * not necessarily have to store a sequence of bytes, it does not necessarily + * have to support efficient tree traversal to any offset in the sequence of + * bytes (tail and extent items, whose keys contain offsets, do however provide + * efficient non-sequential lookup of any offset in the sequence of bytes). + * + * Directory plugins provide methods for selecting file plugins by resolving a + * name for them. + * + * The functionality other filesystems call an attribute, and rigidly tie + * together, we decompose into orthogonal selectable features of files. Using + * the terminology we will define next, an attribute is a perhaps constrained, + * perhaps static length, file whose parent has a uni-count-intra-link to it, + * which might be grandparent-major-packed, and whose parent has a deletion + * method that deletes it. + * + * File plugins can implement constraints. + * + * Files can be of variable length (e.g. regular unix files), or of static + * length (e.g. static sized attributes). + * + * An object may have many sequences of bytes, and many file plugins, but, it + * has exactly one objectid. It is usually desirable that an object has a + * deletion method which deletes every item with that objectid. Items cannot + * in general be found by just their objectids. This means that an object must + * have either a method built into its deletion plugin method for knowing what + * items need to be deleted, or links stored with the object that provide the + * plugin with a method for finding those items. Deleting a file within an + * object may or may not have the effect of deleting the entire object, + * depending on the file plugin's deletion method. + * + * LINK TAXONOMY: + * + * Many objects have a reference count, and when the reference count reaches 0 + * the object's deletion method is invoked. Some links embody a reference + * count increase ("countlinks"), and others do not ("nocountlinks"). + * + * Some links are bi-directional links ("bilinks"), and some are + * uni-directional("unilinks"). + * + * Some links are between parts of the same object ("intralinks"), and some are + * between different objects ("interlinks"). + * + * PACKING TAXONOMY: + * + * Some items of an object are stored with a major packing locality based on + * their object's objectid (e.g. unix directory items in plan A), and these are + * called "self-major-packed". + * + * Some items of an object are stored with a major packing locality based on + * their semantic parent object's objectid (e.g. unix file bodies in plan A), + * and these are called "parent-major-packed". + * + * Some items of an object are stored with a major packing locality based on + * their semantic grandparent, and these are called "grandparent-major-packed". + * Now carefully notice that we run into trouble with key length if we have to + * store a 8 byte major+minor grandparent based packing locality, an 8 byte + * parent objectid, an 8 byte attribute objectid, and an 8 byte offset, all in + * a 24 byte key. One of these fields must be sacrificed if an item is to be + * grandparent-major-packed, and which to sacrifice is left to the item author + * choosing to make the item grandparent-major-packed. You cannot make tail + * items and extent items grandparent-major-packed, though you could make them + * self-major-packed (usually they are parent-major-packed). + * + * In the case of ACLs (which are composed of fixed length ACEs which consist + * of {subject-type, subject, and permission bitmask} triples), it makes sense + * to not have an offset field in the ACE item key, and to allow duplicate keys + * for ACEs. Thus, the set of ACES for a given file is found by looking for a + * key consisting of the objectid of the grandparent (thus grouping all ACLs in + * a directory together), the minor packing locality of ACE, the objectid of + * the file, and 0. + * + * IO involves moving data from one location to another, which means that two + * locations must be specified, source and destination. + * + * This source and destination can be in the filesystem, or they can be a + * pointer in the user process address space plus a byte count. + * + * If both source and destination are in the filesystem, then at least one of + * them must be representable as a pure stream of bytes (which we call a flow, + * and define as a struct containing a key, a data pointer, and a length). + * This may mean converting one of them into a flow. We provide a generic + * cast_into_flow() method, which will work for any plugin supporting + * read_flow(), though it is inefficiently implemented in that it temporarily + * stores the flow in a buffer (Question: what to do with huge flows that + * cannot fit into memory? Answer: we must not convert them all at once. ) + * + * Performing a write requires resolving the write request into a flow defining + * the source, and a method that performs the write, and a key that defines + * where in the tree the write is to go. + * + * Performing a read requires resolving the read request into a flow defining + * the target, and a method that performs the read, and a key that defines + * where in the tree the read is to come from. + * + * There will exist file plugins which have no pluginid stored on the disk for + * them, and which are only invoked by other plugins. + */ + +/* + * This should be incremented in every release which adds one + * or more new plugins. + * NOTE: Make sure that respective marco is also incremented in + * the new release of reiser4progs. + */ +#define PLUGIN_LIBRARY_VERSION 2 + + /* enumeration of fields within plugin_set */ +typedef enum { + PSET_FILE, + PSET_DIR, /* PSET_FILE and PSET_DIR should be first + * elements: inode.c:read_inode() depends on + * this. */ + PSET_PERM, + PSET_FORMATTING, + PSET_HASH, + PSET_FIBRATION, + PSET_SD, + PSET_DIR_ITEM, + PSET_CIPHER, + PSET_DIGEST, + PSET_COMPRESSION, + PSET_COMPRESSION_MODE, + PSET_CLUSTER, + PSET_CREATE, + PSET_LAST +} pset_member; + +/* builtin file-plugins */ +typedef enum { + /* regular file */ + UNIX_FILE_PLUGIN_ID, + /* directory */ + DIRECTORY_FILE_PLUGIN_ID, + /* symlink */ + SYMLINK_FILE_PLUGIN_ID, + /* for objects completely handled by the VFS: fifos, devices, + sockets */ + SPECIAL_FILE_PLUGIN_ID, + /* regular cryptcompress file */ + CRYPTCOMPRESS_FILE_PLUGIN_ID, + /* number of file plugins. Used as size of arrays to hold + file plugins. */ + LAST_FILE_PLUGIN_ID +} reiser4_file_id; + +typedef struct file_plugin { + + /* generic fields */ + plugin_header h; + + /* VFS methods */ + struct inode_operations * inode_ops; + struct file_operations * file_ops; + struct address_space_operations * as_ops; + /** + * Private methods. These are optional. If used they will allow you + * to minimize the amount of code needed to implement a deviation + * from some other method that also uses them. + */ + /* + * private inode_ops + */ + int (*setattr)(struct dentry *, struct iattr *); + /* + * private file_ops + */ + /* do whatever is necessary to do when object is opened */ + int (*open) (struct inode *inode, struct file *file); + ssize_t (*read) (struct file *, char __user *buf, size_t read_amount, + loff_t *off); + /* write as much as possible bytes from nominated @write_amount + * before plugin scheduling is occurred. Save scheduling state + * in @cont */ + ssize_t (*write) (struct file *, const char __user *buf, + size_t write_amount, loff_t * off, + struct dispatch_context * cont); + int (*ioctl) (struct file *filp, unsigned int cmd, unsigned long arg); + int (*mmap) (struct file *, struct vm_area_struct *); + int (*release) (struct inode *, struct file *); + /* + * private a_ops + */ + int (*readpage) (struct file *file, struct page *page); + int (*readpages)(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + int (*writepages)(struct address_space *mapping, + struct writeback_control *wbc); + int (*write_begin)(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata); + int (*write_end)(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata); + sector_t (*bmap) (struct address_space * mapping, sector_t lblock); + /* other private methods */ + /* save inode cached stat-data onto disk. It was called + reiserfs_update_sd() in 3.x */ + int (*write_sd_by_inode) (struct inode *); + /* + * Construct flow into @flow according to user-supplied data. + * + * This is used by read/write methods to construct a flow to + * write/read. ->flow_by_inode() is plugin method, rather than single + * global implementation, because key in a flow used by plugin may + * depend on data in a @buf. + * + * NIKITA-FIXME-HANS: please create statistics on what functions are + * dereferenced how often for the mongo benchmark. You can supervise + * Elena doing this for you if that helps. Email me the list of the + * top 10, with their counts, and an estimate of the total number of + * CPU cycles spent dereferencing as a percentage of CPU cycles spent + * processing (non-idle processing). If the total percent is, say, + * less than 1%, it will make our coding discussions much easier, and + * keep me from questioning whether functions like the below are too + * frequently called to be dereferenced. If the total percent is more + * than 1%, perhaps private methods should be listed in a "required" + * comment at the top of each plugin (with stern language about how if + * the comment is missing it will not be accepted by the maintainer), + * and implemented using macros not dereferenced functions. How about + * replacing this whole private methods part of the struct with a + * thorough documentation of what the standard helper functions are for + * use in constructing plugins? I think users have been asking for + * that, though not in so many words. + */ + int (*flow_by_inode) (struct inode *, const char __user *buf, + int user, loff_t size, + loff_t off, rw_op op, flow_t *); + /* + * Return the key used to retrieve an offset of a file. It is used by + * default implementation of ->flow_by_inode() method + * (common_build_flow()) and, among other things, to get to the extent + * from jnode of unformatted node. + */ + int (*key_by_inode) (struct inode *, loff_t off, reiser4_key *); + + /* NIKITA-FIXME-HANS: this comment is not as clear to others as you + * think.... */ + /* + * set the plugin for a file. Called during file creation in creat() + * but not reiser4() unless an inode already exists for the file. + */ + int (*set_plug_in_inode) (struct inode *inode, struct inode *parent, + reiser4_object_create_data *); + + /* NIKITA-FIXME-HANS: comment and name seem to say different things, + * are you setting up the object itself also or just adjusting the + * parent?.... */ + /* set up plugins for new @object created in @parent. @root is root + directory. */ + int (*adjust_to_parent) (struct inode *object, struct inode *parent, + struct inode *root); + /* + * this does whatever is necessary to do when object is created. For + * instance, for unix files stat data is inserted. It is supposed to be + * called by create of struct inode_operations. + */ + int (*create_object) (struct inode *object, struct inode *parent, + reiser4_object_create_data *); + /* + * this method should check REISER4_NO_SD and set REISER4_NO_SD on + * success. Deletion of an object usually includes removal of items + * building file body (for directories this is removal of "." and "..") + * and removal of stat-data item. + */ + int (*delete_object) (struct inode *); + + /* add link from @parent to @object */ + int (*add_link) (struct inode *object, struct inode *parent); + + /* remove link from @parent to @object */ + int (*rem_link) (struct inode *object, struct inode *parent); + + /* + * return true if item addressed by @coord belongs to @inode. This is + * used by read/write to properly slice flow into items in presence of + * multiple key assignment policies, because items of a file are not + * necessarily contiguous in a key space, for example, in a plan-b. + */ + int (*owns_item) (const struct inode *, const coord_t *); + + /* checks whether yet another hard links to this object can be + added */ + int (*can_add_link) (const struct inode *); + + /* checks whether hard links to this object can be removed */ + int (*can_rem_link) (const struct inode *); + + /* not empty for DIRECTORY_FILE_PLUGIN_ID only currently. It calls + detach of directory plugin to remove ".." */ + int (*detach) (struct inode *child, struct inode *parent); + + /* called when @child was just looked up in the @parent. It is not + empty for DIRECTORY_FILE_PLUGIN_ID only where it calls attach of + directory plugin */ + int (*bind) (struct inode *child, struct inode *parent); + + /* process safe-link during mount */ + int (*safelink) (struct inode *object, reiser4_safe_link_t link, + __u64 value); + + /* The couple of estimate methods for all file operations */ + struct { + reiser4_block_nr(*create) (const struct inode *); + reiser4_block_nr(*update) (const struct inode *); + reiser4_block_nr(*unlink) (const struct inode *, + const struct inode *); + } estimate; + + /* + * reiser4 specific part of inode has a union of structures which are + * specific to a plugin. This method is called when inode is read + * (read_inode) and when file is created (common_create_child) so that + * file plugin could initialize its inode data + */ + void (*init_inode_data) (struct inode *, reiser4_object_create_data * , + int); + + /* + * This method performs progressive deletion of items and whole nodes + * from right to left. + * + * @tap: the point deletion process begins from, + * @from_key: the beginning of the deleted key range, + * @to_key: the end of the deleted key range, + * @smallest_removed: the smallest removed key, + * + * @return: 0 if success, error code otherwise, -E_REPEAT means that + * long cut_tree operation was interrupted for allowing atom commit . + */ + int (*cut_tree_worker) (tap_t *, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, struct inode *, + int, int *); + + /* called from ->destroy_inode() */ + void (*destroy_inode) (struct inode *); + + /* + * methods to serialize object identify. This is used, for example, by + * reiser4_{en,de}code_fh(). + */ + struct { + /* store object's identity at @area */ + char *(*write) (struct inode *inode, char *area); + /* parse object from wire to the @obj */ + char *(*read) (char *area, reiser4_object_on_wire * obj); + /* given object identity in @obj, find or create its dentry */ + struct dentry *(*get) (struct super_block *s, + reiser4_object_on_wire * obj); + /* how many bytes ->wire.write() consumes */ + int (*size) (struct inode *inode); + /* finish with object identify */ + void (*done) (reiser4_object_on_wire * obj); + } wire; +} file_plugin; + +extern file_plugin file_plugins[LAST_FILE_PLUGIN_ID]; + +struct reiser4_object_on_wire { + file_plugin *plugin; + union { + struct { + obj_key_id key_id; + } std; + void *generic; + } u; +}; + +/* builtin dir-plugins */ +typedef enum { + HASHED_DIR_PLUGIN_ID, + SEEKABLE_HASHED_DIR_PLUGIN_ID, + LAST_DIR_ID +} reiser4_dir_id; + +typedef struct dir_plugin { + /* generic fields */ + plugin_header h; + + struct inode_operations * inode_ops; + struct file_operations * file_ops; + struct address_space_operations * as_ops; + + /* + * private methods: These are optional. If used they will allow you to + * minimize the amount of code needed to implement a deviation from + * some other method that uses them. You could logically argue that + * they should be a separate type of plugin. + */ + + struct dentry *(*get_parent) (struct inode *childdir); + + /* + * check whether "name" is acceptable name to be inserted into this + * object. Optionally implemented by directory-like objects. Can check + * for maximal length, reserved symbols etc + */ + int (*is_name_acceptable) (const struct inode *inode, const char *name, + int len); + + void (*build_entry_key) (const struct inode *dir /* directory where + * entry is (or will + * be) in.*/ , + const struct qstr *name /* name of file + * referenced by this + * entry */ , + reiser4_key * result /* resulting key of + * directory entry */ ); + int (*build_readdir_key) (struct file *dir, reiser4_key * result); + int (*add_entry) (struct inode *object, struct dentry *where, + reiser4_object_create_data * data, + reiser4_dir_entry_desc * entry); + int (*rem_entry) (struct inode *object, struct dentry *where, + reiser4_dir_entry_desc * entry); + + /* + * initialize directory structure for newly created object. For normal + * unix directories, insert dot and dotdot. + */ + int (*init) (struct inode *object, struct inode *parent, + reiser4_object_create_data * data); + + /* destroy directory */ + int (*done) (struct inode *child); + + /* called when @subdir was just looked up in the @dir */ + int (*attach) (struct inode *subdir, struct inode *dir); + int (*detach) (struct inode *subdir, struct inode *dir); + + struct { + reiser4_block_nr(*add_entry) (const struct inode *); + reiser4_block_nr(*rem_entry) (const struct inode *); + reiser4_block_nr(*unlink) (const struct inode *, + const struct inode *); + } estimate; +} dir_plugin; + +extern dir_plugin dir_plugins[LAST_DIR_ID]; + +typedef struct formatting_plugin { + /* generic fields */ + plugin_header h; + /* returns non-zero iff file's tail has to be stored + in a direct item. */ + int (*have_tail) (const struct inode *inode, loff_t size); +} formatting_plugin; + +/** + * Plugins of this interface implement different transaction models. + * Transaction model is a high-level block allocator, which assigns block + * numbers to dirty nodes, and, thereby, decides, how individual dirty + * nodes of an atom will be committed. + */ +typedef struct txmod_plugin { + /* generic fields */ + plugin_header h; + /** + * allocate blocks in the FORWARD PARENT-FIRST context + * for formatted nodes + */ + int (*forward_alloc_formatted)(znode *node, const coord_t *parent_coord, + flush_pos_t *pos); //was allocate_znode_loaded + /** + * allocate blocks in the REVERSE PARENT-FIRST context + * for formatted nodes + */ + int (*reverse_alloc_formatted)(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos); // was reverse_relocate_test + /** + * allocate blocks in the FORWARD PARENT-FIRST context + * for unformatted nodes. + * + * This is called by handle_pos_on_twig to proceed extent unit + * flush_pos->coord is set to. It is to prepare for flushing + * sequence of not flushprepped nodes (slum). It supposes that + * slum starts at flush_pos->pos_in_unit position within the extent + */ + int (*forward_alloc_unformatted)(flush_pos_t *flush_pos); //was reiser4_alloc_extent + /** + * allocale blocks for unformatted nodes in squeeze_right_twig(). + * @coord is set to extent unit + */ + squeeze_result (*squeeze_alloc_unformatted)(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key); // was_squalloc_extent +} txmod_plugin; + +typedef struct hash_plugin { + /* generic fields */ + plugin_header h; + /* computes hash of the given name */ + __u64(*hash) (const unsigned char *name, int len); +} hash_plugin; + +typedef struct cipher_plugin { + /* generic fields */ + plugin_header h; + struct crypto_blkcipher * (*alloc) (void); + void (*free) (struct crypto_blkcipher *tfm); + /* Offset translator. For each offset this returns (k * offset), where + k (k >= 1) is an expansion factor of the cipher algorithm. + For all symmetric algorithms k == 1. For asymmetric algorithms (which + inflate data) offset translation guarantees that all disk cluster's + units will have keys smaller then next cluster's one. + */ + loff_t(*scale) (struct inode *inode, size_t blocksize, loff_t src); + /* Cipher algorithms can accept data only by chunks of cipher block + size. This method is to align any flow up to cipher block size when + we pass it to cipher algorithm. To align means to append padding of + special format specific to the cipher algorithm */ + int (*align_stream) (__u8 *tail, int clust_size, int blocksize); + /* low-level key manager (check, install, etc..) */ + int (*setkey) (struct crypto_tfm *tfm, const __u8 *key, + unsigned int keylen); + /* main text processing procedures */ + void (*encrypt) (__u32 *expkey, __u8 *dst, const __u8 *src); + void (*decrypt) (__u32 *expkey, __u8 *dst, const __u8 *src); +} cipher_plugin; + +typedef struct digest_plugin { + /* generic fields */ + plugin_header h; + /* fingerprint size in bytes */ + int fipsize; + struct crypto_hash * (*alloc) (void); + void (*free) (struct crypto_hash *tfm); +} digest_plugin; + +typedef struct compression_plugin { + /* generic fields */ + plugin_header h; + int (*init) (void); + /* the maximum number of bytes the size of the "compressed" data can + * exceed the uncompressed data. */ + int (*overrun) (unsigned src_len); + coa_t(*alloc) (tfm_action act); + void (*free) (coa_t coa, tfm_action act); + /* minimal size of the flow we still try to compress */ + int (*min_size_deflate) (void); + __u32(*checksum) (char *data, __u32 length); + /* main transform procedures */ + void (*compress) (coa_t coa, __u8 *src_first, size_t src_len, + __u8 *dst_first, size_t *dst_len); + void (*decompress) (coa_t coa, __u8 *src_first, size_t src_len, + __u8 *dst_first, size_t *dst_len); +} compression_plugin; + +typedef struct compression_mode_plugin { + /* generic fields */ + plugin_header h; + /* this is called when estimating compressibility + of a logical cluster by its content */ + int (*should_deflate) (struct inode *inode, cloff_t index); + /* this is called when results of compression should be saved */ + int (*accept_hook) (struct inode *inode, cloff_t index); + /* this is called when results of compression should be discarded */ + int (*discard_hook) (struct inode *inode, cloff_t index); +} compression_mode_plugin; + +typedef struct cluster_plugin { + /* generic fields */ + plugin_header h; + int shift; +} cluster_plugin; + +typedef struct sd_ext_plugin { + /* generic fields */ + plugin_header h; + int (*present) (struct inode *inode, char **area, int *len); + int (*absent) (struct inode *inode); + int (*save_len) (struct inode *inode); + int (*save) (struct inode *inode, char **area); + /* alignment requirement for this stat-data part */ + int alignment; +} sd_ext_plugin; + +/* this plugin contains methods to allocate objectid for newly created files, + to deallocate objectid when file gets removed, to report number of used and + free objectids */ +typedef struct oid_allocator_plugin { + /* generic fields */ + plugin_header h; + int (*init_oid_allocator) (reiser4_oid_allocator * map, __u64 nr_files, + __u64 oids); + /* used to report statfs->f_files */ + __u64(*oids_used) (reiser4_oid_allocator * map); + /* get next oid to use */ + __u64(*next_oid) (reiser4_oid_allocator * map); + /* used to report statfs->f_ffree */ + __u64(*oids_free) (reiser4_oid_allocator * map); + /* allocate new objectid */ + int (*allocate_oid) (reiser4_oid_allocator * map, oid_t *); + /* release objectid */ + int (*release_oid) (reiser4_oid_allocator * map, oid_t); + /* how many pages to reserve in transaction for allocation of new + objectid */ + int (*oid_reserve_allocate) (reiser4_oid_allocator * map); + /* how many pages to reserve in transaction for freeing of an + objectid */ + int (*oid_reserve_release) (reiser4_oid_allocator * map); + void (*print_info) (const char *, reiser4_oid_allocator *); +} oid_allocator_plugin; + +/* disk layout plugin: this specifies super block, journal, bitmap (if there + are any) locations, etc */ +typedef struct disk_format_plugin { + /* generic fields */ + plugin_header h; + /* replay journal, initialize super_info_data, etc */ + int (*init_format) (struct super_block *, void *data); + + /* key of root directory stat data */ + const reiser4_key * (*root_dir_key) (const struct super_block *); + + int (*release) (struct super_block *); + jnode * (*log_super) (struct super_block *); + int (*check_open) (const struct inode *object); + int (*version_update) (struct super_block *); +} disk_format_plugin; + +struct jnode_plugin { + /* generic fields */ + plugin_header h; + int (*init) (jnode * node); + int (*parse) (jnode * node); + struct address_space *(*mapping) (const jnode * node); + unsigned long (*index) (const jnode * node); + jnode * (*clone) (jnode * node); +}; + +/* plugin instance. */ +/* */ +/* This is "wrapper" union for all types of plugins. Most of the code uses */ +/* plugins of particular type (file_plugin, dir_plugin, etc.) rather than */ +/* operates with pointers to reiser4_plugin. This union is only used in */ +/* some generic code in plugin/plugin.c that operates on all */ +/* plugins. Technically speaking purpose of this union is to add type */ +/* safety to said generic code: each plugin type (file_plugin, for */ +/* example), contains plugin_header as its first memeber. This first member */ +/* is located at the same place in memory as .h member of */ +/* reiser4_plugin. Generic code, obtains pointer to reiser4_plugin and */ +/* looks in the .h which is header of plugin type located in union. This */ +/* allows to avoid type-casts. */ +union reiser4_plugin { + /* generic fields */ + plugin_header h; + /* file plugin */ + file_plugin file; + /* directory plugin */ + dir_plugin dir; + /* hash plugin, used by directory plugin */ + hash_plugin hash; + /* fibration plugin used by directory plugin */ + fibration_plugin fibration; + /* cipher transform plugin, used by file plugin */ + cipher_plugin cipher; + /* digest transform plugin, used by file plugin */ + digest_plugin digest; + /* compression transform plugin, used by file plugin */ + compression_plugin compression; + /* tail plugin, used by file plugin */ + formatting_plugin formatting; + /* permission plugin */ + perm_plugin perm; + /* node plugin */ + node_plugin node; + /* item plugin */ + item_plugin item; + /* stat-data extension plugin */ + sd_ext_plugin sd_ext; + /* disk layout plugin */ + disk_format_plugin format; + /* object id allocator plugin */ + oid_allocator_plugin oid_allocator; + /* plugin for different jnode types */ + jnode_plugin jnode; + /* compression mode plugin, used by object plugin */ + compression_mode_plugin compression_mode; + /* cluster plugin, used by object plugin */ + cluster_plugin clust; + /* transaction mode plugin */ + txmod_plugin txmod; + /* place-holder for new plugin types that can be registered + dynamically, and used by other dynamically loaded plugins. */ + void *generic; +}; + +struct reiser4_plugin_ops { + /* called when plugin is initialized */ + int (*init) (reiser4_plugin * plugin); + /* called when plugin is unloaded */ + int (*done) (reiser4_plugin * plugin); + /* load given plugin from disk */ + int (*load) (struct inode *inode, + reiser4_plugin * plugin, char **area, int *len); + /* how many space is required to store this plugin's state + in stat-data */ + int (*save_len) (struct inode *inode, reiser4_plugin * plugin); + /* save persistent plugin-data to disk */ + int (*save) (struct inode *inode, reiser4_plugin * plugin, + char **area); + /* alignment requirement for on-disk state of this plugin + in number of bytes */ + int alignment; + /* install itself into given inode. This can return error + (e.g., you cannot change hash of non-empty directory). */ + int (*change) (struct inode *inode, reiser4_plugin * plugin, + pset_member memb); + /* install itself into given inode. This can return error + (e.g., you cannot change hash of non-empty directory). */ + int (*inherit) (struct inode *inode, struct inode *parent, + reiser4_plugin * plugin); +}; + +/* functions implemented in fs/reiser4/plugin/plugin.c */ + +/* stores plugin reference in reiser4-specific part of inode */ +extern int set_object_plugin(struct inode *inode, reiser4_plugin_id id); +extern int init_plugins(void); + +/* builtin plugins */ + +/* builtin hash-plugins */ + +typedef enum { + RUPASOV_HASH_ID, + R5_HASH_ID, + TEA_HASH_ID, + FNV1_HASH_ID, + DEGENERATE_HASH_ID, + LAST_HASH_ID +} reiser4_hash_id; + +/* builtin cipher plugins */ + +typedef enum { + NONE_CIPHER_ID, + LAST_CIPHER_ID +} reiser4_cipher_id; + +/* builtin digest plugins */ + +typedef enum { + SHA256_32_DIGEST_ID, + LAST_DIGEST_ID +} reiser4_digest_id; + +/* builtin compression mode plugins */ +typedef enum { + NONE_COMPRESSION_MODE_ID, + LATTD_COMPRESSION_MODE_ID, + ULTIM_COMPRESSION_MODE_ID, + FORCE_COMPRESSION_MODE_ID, + CONVX_COMPRESSION_MODE_ID, + LAST_COMPRESSION_MODE_ID +} reiser4_compression_mode_id; + +/* builtin cluster plugins */ +typedef enum { + CLUSTER_64K_ID, + CLUSTER_32K_ID, + CLUSTER_16K_ID, + CLUSTER_8K_ID, + CLUSTER_4K_ID, + LAST_CLUSTER_ID +} reiser4_cluster_id; + +/* builtin tail packing policies */ +typedef enum { + NEVER_TAILS_FORMATTING_ID, + ALWAYS_TAILS_FORMATTING_ID, + SMALL_FILE_FORMATTING_ID, + LAST_TAIL_FORMATTING_ID +} reiser4_formatting_id; + +/* builtin transaction models */ +typedef enum { + HYBRID_TXMOD_ID, + JOURNAL_TXMOD_ID, + WA_TXMOD_ID, + LAST_TXMOD_ID +} reiser4_txmod_id; + + +/* data type used to pack parameters that we pass to vfs object creation + function create_object() */ +struct reiser4_object_create_data { + /* plugin to control created object */ + reiser4_file_id id; + /* mode of regular file, directory or special file */ +/* what happens if some other sort of perm plugin is in use? */ + umode_t mode; + /* rdev of special file */ + dev_t rdev; + /* symlink target */ + const char *name; + /* add here something for non-standard objects you invent, like + query for interpolation file etc. */ + + struct reiser4_crypto_info *crypto; + + struct inode *parent; + struct dentry *dentry; +}; + +/* description of directory entry being created/destroyed/sought for + + It is passed down to the directory plugin and farther to the + directory item plugin methods. Creation of new directory is done in + several stages: first we search for an entry with the same name, then + create new one. reiser4_dir_entry_desc is used to store some information + collected at some stage of this process and required later: key of + item that we want to insert/delete and pointer to an object that will + be bound by the new directory entry. Probably some more fields will + be added there. + +*/ +struct reiser4_dir_entry_desc { + /* key of directory entry */ + reiser4_key key; + /* object bound by this entry. */ + struct inode *obj; +}; + +#define MAX_PLUGIN_TYPE_LABEL_LEN 32 +#define MAX_PLUGIN_PLUG_LABEL_LEN 32 + +#define PLUGIN_BY_ID(TYPE, ID, FIELD) \ +static inline TYPE *TYPE ## _by_id(reiser4_plugin_id id) \ +{ \ + reiser4_plugin *plugin = plugin_by_id(ID, id); \ + return plugin ? &plugin->FIELD : NULL; \ +} \ +static inline TYPE *TYPE ## _by_disk_id(reiser4_tree * tree, d16 *id) \ +{ \ + reiser4_plugin *plugin = plugin_by_disk_id(tree, ID, id); \ + return plugin ? &plugin->FIELD : NULL; \ +} \ +static inline TYPE *TYPE ## _by_unsafe_id(reiser4_plugin_id id) \ +{ \ + reiser4_plugin *plugin = plugin_by_unsafe_id(ID, id); \ + return plugin ? &plugin->FIELD : NULL; \ +} \ +static inline reiser4_plugin* TYPE ## _to_plugin(TYPE* plugin) \ +{ \ + return (reiser4_plugin *) plugin; \ +} \ +static inline reiser4_plugin_id TYPE ## _id(TYPE* plugin) \ +{ \ + return TYPE ## _to_plugin(plugin)->h.id; \ +} \ +typedef struct { int foo; } TYPE ## _plugin_dummy + +static inline int get_release_number_major(void) +{ + return LAST_FORMAT_ID - 1; +} + +static inline int get_release_number_minor(void) +{ + return PLUGIN_LIBRARY_VERSION; +} + +PLUGIN_BY_ID(item_plugin, REISER4_ITEM_PLUGIN_TYPE, item); +PLUGIN_BY_ID(file_plugin, REISER4_FILE_PLUGIN_TYPE, file); +PLUGIN_BY_ID(dir_plugin, REISER4_DIR_PLUGIN_TYPE, dir); +PLUGIN_BY_ID(node_plugin, REISER4_NODE_PLUGIN_TYPE, node); +PLUGIN_BY_ID(sd_ext_plugin, REISER4_SD_EXT_PLUGIN_TYPE, sd_ext); +PLUGIN_BY_ID(perm_plugin, REISER4_PERM_PLUGIN_TYPE, perm); +PLUGIN_BY_ID(hash_plugin, REISER4_HASH_PLUGIN_TYPE, hash); +PLUGIN_BY_ID(fibration_plugin, REISER4_FIBRATION_PLUGIN_TYPE, fibration); +PLUGIN_BY_ID(cipher_plugin, REISER4_CIPHER_PLUGIN_TYPE, cipher); +PLUGIN_BY_ID(digest_plugin, REISER4_DIGEST_PLUGIN_TYPE, digest); +PLUGIN_BY_ID(compression_plugin, REISER4_COMPRESSION_PLUGIN_TYPE, compression); +PLUGIN_BY_ID(formatting_plugin, REISER4_FORMATTING_PLUGIN_TYPE, formatting); +PLUGIN_BY_ID(disk_format_plugin, REISER4_FORMAT_PLUGIN_TYPE, format); +PLUGIN_BY_ID(jnode_plugin, REISER4_JNODE_PLUGIN_TYPE, jnode); +PLUGIN_BY_ID(compression_mode_plugin, REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + compression_mode); +PLUGIN_BY_ID(cluster_plugin, REISER4_CLUSTER_PLUGIN_TYPE, clust); +PLUGIN_BY_ID(txmod_plugin, REISER4_TXMOD_PLUGIN_TYPE, txmod); + +extern int save_plugin_id(reiser4_plugin * plugin, d16 * area); + +extern struct list_head *get_plugin_list(reiser4_plugin_type type_id); + +#define for_all_plugins(ptype, plugin) \ +for (plugin = list_entry(get_plugin_list(ptype)->next, reiser4_plugin, h.linkage); \ + get_plugin_list(ptype) != &plugin->h.linkage; \ + plugin = list_entry(plugin->h.linkage.next, reiser4_plugin, h.linkage)) + + +extern int grab_plugin_pset(struct inode *self, struct inode *ancestor, + pset_member memb); +extern int force_plugin_pset(struct inode *self, pset_member memb, + reiser4_plugin *plug); +extern int finish_pset(struct inode *inode); + +/* defined in fs/reiser4/plugin/object.c */ +extern file_plugin file_plugins[LAST_FILE_PLUGIN_ID]; +/* defined in fs/reiser4/plugin/object.c */ +extern dir_plugin dir_plugins[LAST_DIR_ID]; +/* defined in fs/reiser4/plugin/item/static_stat.c */ +extern sd_ext_plugin sd_ext_plugins[LAST_SD_EXTENSION]; +/* defined in fs/reiser4/plugin/hash.c */ +extern hash_plugin hash_plugins[LAST_HASH_ID]; +/* defined in fs/reiser4/plugin/fibration.c */ +extern fibration_plugin fibration_plugins[LAST_FIBRATION_ID]; +/* defined in fs/reiser4/plugin/txmod.c */ +extern txmod_plugin txmod_plugins[LAST_TXMOD_ID]; +/* defined in fs/reiser4/plugin/crypt.c */ +extern cipher_plugin cipher_plugins[LAST_CIPHER_ID]; +/* defined in fs/reiser4/plugin/digest.c */ +extern digest_plugin digest_plugins[LAST_DIGEST_ID]; +/* defined in fs/reiser4/plugin/compress/compress.c */ +extern compression_plugin compression_plugins[LAST_COMPRESSION_ID]; +/* defined in fs/reiser4/plugin/compress/compression_mode.c */ +extern compression_mode_plugin +compression_mode_plugins[LAST_COMPRESSION_MODE_ID]; +/* defined in fs/reiser4/plugin/cluster.c */ +extern cluster_plugin cluster_plugins[LAST_CLUSTER_ID]; +/* defined in fs/reiser4/plugin/tail.c */ +extern formatting_plugin formatting_plugins[LAST_TAIL_FORMATTING_ID]; +/* defined in fs/reiser4/plugin/security/security.c */ +extern perm_plugin perm_plugins[LAST_PERM_ID]; +/* defined in fs/reiser4/plugin/item/item.c */ +extern item_plugin item_plugins[LAST_ITEM_ID]; +/* defined in fs/reiser4/plugin/node/node.c */ +extern node_plugin node_plugins[LAST_NODE_ID]; +/* defined in fs/reiser4/plugin/disk_format/disk_format.c */ +extern disk_format_plugin format_plugins[LAST_FORMAT_ID]; + +/* __FS_REISER4_PLUGIN_TYPES_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/plugin_header.h b/fs/reiser4/plugin/plugin_header.h new file mode 100644 index 000000000000..5ee74af48bff --- /dev/null +++ b/fs/reiser4/plugin/plugin_header.h @@ -0,0 +1,150 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* plugin header. Data structures required by all plugin types. */ + +#if !defined(__PLUGIN_HEADER_H__) +#define __PLUGIN_HEADER_H__ + +/* plugin data-types and constants */ + +#include "../debug.h" +#include "../dformat.h" + +/* The list of Reiser4 interfaces */ +typedef enum { + REISER4_FILE_PLUGIN_TYPE, /* manage VFS objects */ + REISER4_DIR_PLUGIN_TYPE, /* manage directories */ + REISER4_ITEM_PLUGIN_TYPE, /* manage items */ + REISER4_NODE_PLUGIN_TYPE, /* manage formatted nodes */ + REISER4_HASH_PLUGIN_TYPE, /* hash methods */ + REISER4_FIBRATION_PLUGIN_TYPE, /* directory fibrations */ + REISER4_FORMATTING_PLUGIN_TYPE, /* dispatching policy */ + REISER4_PERM_PLUGIN_TYPE, /* stub (vacancy) */ + REISER4_SD_EXT_PLUGIN_TYPE, /* manage stat-data extensions */ + REISER4_FORMAT_PLUGIN_TYPE, /* disk format specifications */ + REISER4_JNODE_PLUGIN_TYPE, /* manage in-memory headers */ + REISER4_CIPHER_PLUGIN_TYPE, /* cipher transform methods */ + REISER4_DIGEST_PLUGIN_TYPE, /* digest transform methods */ + REISER4_COMPRESSION_PLUGIN_TYPE, /* compression methods */ + REISER4_COMPRESSION_MODE_PLUGIN_TYPE, /* dispatching policies */ + REISER4_CLUSTER_PLUGIN_TYPE, /* manage logical clusters */ + REISER4_TXMOD_PLUGIN_TYPE, /* transaction models */ + REISER4_PLUGIN_TYPES +} reiser4_plugin_type; + +/* Supported plugin groups */ +typedef enum { + REISER4_DIRECTORY_FILE, + REISER4_REGULAR_FILE, + REISER4_SYMLINK_FILE, + REISER4_SPECIAL_FILE, +} file_plugin_group; + +struct reiser4_plugin_ops; +/* generic plugin operations, supported by each + plugin type. */ +typedef struct reiser4_plugin_ops reiser4_plugin_ops; + +/* the common part of all plugin instances. */ +typedef struct plugin_header { + /* plugin type */ + reiser4_plugin_type type_id; + /* id of this plugin */ + reiser4_plugin_id id; + /* bitmask of groups the plugin belongs to. */ + reiser4_plugin_groups groups; + /* plugin operations */ + reiser4_plugin_ops *pops; +/* NIKITA-FIXME-HANS: usage of and access to label and desc is not commented and + * defined. */ + /* short label of this plugin */ + const char *label; + /* descriptive string.. */ + const char *desc; + /* list linkage */ + struct list_head linkage; +} plugin_header; + +#define plugin_of_group(plug, group) (plug->h.groups & (1 << group)) + +/* PRIVATE INTERFACES */ +/* NIKITA-FIXME-HANS: what is this for and why does it duplicate what is in + * plugin_header? */ +/* plugin type representation. */ +struct reiser4_plugin_type_data { + /* internal plugin type identifier. Should coincide with + index of this item in plugins[] array. */ + reiser4_plugin_type type_id; + /* short symbolic label of this plugin type. Should be no longer + than MAX_PLUGIN_TYPE_LABEL_LEN characters including '\0'. */ + const char *label; + /* plugin type description longer than .label */ + const char *desc; + +/* NIKITA-FIXME-HANS: define built-in */ + /* number of built-in plugin instances of this type */ + int builtin_num; + /* array of built-in plugins */ + void *builtin; + struct list_head plugins_list; + size_t size; +}; + +extern struct reiser4_plugin_type_data plugins[REISER4_PLUGIN_TYPES]; + +int is_plugin_type_valid(reiser4_plugin_type type); +int is_plugin_id_valid(reiser4_plugin_type type, reiser4_plugin_id id); + +static inline reiser4_plugin *plugin_at(struct reiser4_plugin_type_data *ptype, + int i) +{ + char *builtin; + + builtin = ptype->builtin; + return (reiser4_plugin *) (builtin + i * ptype->size); +} + +/* return plugin by its @type_id and @id */ +static inline reiser4_plugin *plugin_by_id(reiser4_plugin_type type, + reiser4_plugin_id id) +{ + assert("nikita-1651", is_plugin_type_valid(type)); + assert("nikita-1652", is_plugin_id_valid(type, id)); + return plugin_at(&plugins[type], id); +} + +extern reiser4_plugin *plugin_by_unsafe_id(reiser4_plugin_type type_id, + reiser4_plugin_id id); + +/** + * plugin_by_disk_id - get reiser4_plugin + * @type_id: plugin type id + * @did: plugin id in disk format + * + * Returns reiser4_plugin by plugin type id an dplugin_id. + */ +static inline reiser4_plugin *plugin_by_disk_id(reiser4_tree * tree UNUSED_ARG, + reiser4_plugin_type type_id, + __le16 *plugin_id) +{ + /* + * what we should do properly is to maintain within each file-system a + * dictionary that maps on-disk plugin ids to "universal" ids. This + * dictionary will be resolved on mount time, so that this function + * will perform just one additional array lookup. + */ + return plugin_by_unsafe_id(type_id, le16_to_cpu(*plugin_id)); +} + +/* __PLUGIN_HEADER_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/plugin_set.c b/fs/reiser4/plugin/plugin_set.c new file mode 100644 index 000000000000..cae7a295515d --- /dev/null +++ b/fs/reiser4/plugin/plugin_set.c @@ -0,0 +1,387 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ +/* This file contains Reiser4 plugin set operations */ + +/* plugin sets + * + * Each file in reiser4 is controlled by a whole set of plugins (file plugin, + * directory plugin, hash plugin, tail policy plugin, security plugin, etc.) + * assigned (inherited, deduced from mode bits, etc.) at creation time. This + * set of plugins (so called pset) is described by structure plugin_set (see + * plugin/plugin_set.h), which contains pointers to all required plugins. + * + * Children can inherit some pset members from their parent, however sometimes + * it is useful to specify members different from parent ones. Since object's + * pset can not be easily changed without fatal consequences, we use for this + * purpose another special plugin table (so called hset, or heir set) described + * by the same structure. + * + * Inode only stores a pointers to pset and hset. Different inodes with the + * same set of pset (hset) members point to the same pset (hset). This is + * archived by storing psets and hsets in global hash table. Races are avoided + * by simple (and efficient so far) solution of never recycling psets, even + * when last inode pointing to it is destroyed. + */ + +#include "../debug.h" +#include "../super.h" +#include "plugin_set.h" + +#include +#include + +/* slab for plugin sets */ +static struct kmem_cache *plugin_set_slab; + +static spinlock_t plugin_set_lock[8] __cacheline_aligned_in_smp = { + __SPIN_LOCK_UNLOCKED(plugin_set_lock[0]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[1]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[2]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[3]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[4]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[5]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[6]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[7]) +}; + +/* hash table support */ + +#define PS_TABLE_SIZE (32) + +static inline plugin_set *cast_to(const unsigned long *a) +{ + return container_of(a, plugin_set, hashval); +} + +static inline int pseq(const unsigned long *a1, const unsigned long *a2) +{ + plugin_set *set1; + plugin_set *set2; + + /* make sure fields are not missed in the code below */ + cassert(sizeof *set1 == + sizeof set1->hashval + + sizeof set1->link + + sizeof set1->file + + sizeof set1->dir + + sizeof set1->perm + + sizeof set1->formatting + + sizeof set1->hash + + sizeof set1->fibration + + sizeof set1->sd + + sizeof set1->dir_item + + sizeof set1->cipher + + sizeof set1->digest + + sizeof set1->compression + + sizeof set1->compression_mode + + sizeof set1->cluster + + sizeof set1->create); + + set1 = cast_to(a1); + set2 = cast_to(a2); + return + set1->hashval == set2->hashval && + set1->file == set2->file && + set1->dir == set2->dir && + set1->perm == set2->perm && + set1->formatting == set2->formatting && + set1->hash == set2->hash && + set1->fibration == set2->fibration && + set1->sd == set2->sd && + set1->dir_item == set2->dir_item && + set1->cipher == set2->cipher && + set1->digest == set2->digest && + set1->compression == set2->compression && + set1->compression_mode == set2->compression_mode && + set1->cluster == set2->cluster && + set1->create == set2->create; +} + +#define HASH_FIELD(hash, set, field) \ +({ \ + (hash) += (unsigned long)(set)->field >> 2; \ +}) + +static inline unsigned long calculate_hash(const plugin_set * set) +{ + unsigned long result; + + result = 0; + HASH_FIELD(result, set, file); + HASH_FIELD(result, set, dir); + HASH_FIELD(result, set, perm); + HASH_FIELD(result, set, formatting); + HASH_FIELD(result, set, hash); + HASH_FIELD(result, set, fibration); + HASH_FIELD(result, set, sd); + HASH_FIELD(result, set, dir_item); + HASH_FIELD(result, set, cipher); + HASH_FIELD(result, set, digest); + HASH_FIELD(result, set, compression); + HASH_FIELD(result, set, compression_mode); + HASH_FIELD(result, set, cluster); + HASH_FIELD(result, set, create); + return result & (PS_TABLE_SIZE - 1); +} + +static inline unsigned long +pshash(ps_hash_table * table, const unsigned long *a) +{ + return *a; +} + +/* The hash table definition */ +#define KMALLOC(size) kmalloc((size), reiser4_ctx_gfp_mask_get()) +#define KFREE(ptr, size) kfree(ptr) +TYPE_SAFE_HASH_DEFINE(ps, plugin_set, unsigned long, hashval, link, pshash, + pseq); +#undef KFREE +#undef KMALLOC + +static ps_hash_table ps_table; +static plugin_set empty_set = { + .hashval = 0, + .file = NULL, + .dir = NULL, + .perm = NULL, + .formatting = NULL, + .hash = NULL, + .fibration = NULL, + .sd = NULL, + .dir_item = NULL, + .cipher = NULL, + .digest = NULL, + .compression = NULL, + .compression_mode = NULL, + .cluster = NULL, + .create = NULL, + .link = {NULL} +}; + +plugin_set *plugin_set_get_empty(void) +{ + return &empty_set; +} + +void plugin_set_put(plugin_set * set) +{ +} + +static inline unsigned long *pset_field(plugin_set * set, int offset) +{ + return (unsigned long *)(((char *)set) + offset); +} + +static int plugin_set_field(plugin_set ** set, const unsigned long val, + const int offset) +{ + unsigned long *spot; + spinlock_t *lock; + plugin_set replica; + plugin_set *twin; + plugin_set *psal; + plugin_set *orig; + + assert("nikita-2902", set != NULL); + assert("nikita-2904", *set != NULL); + + spot = pset_field(*set, offset); + if (unlikely(*spot == val)) + return 0; + + replica = *(orig = *set); + *pset_field(&replica, offset) = val; + replica.hashval = calculate_hash(&replica); + rcu_read_lock(); + twin = ps_hash_find(&ps_table, &replica.hashval); + if (unlikely(twin == NULL)) { + rcu_read_unlock(); + psal = kmem_cache_alloc(plugin_set_slab, + reiser4_ctx_gfp_mask_get()); + if (psal == NULL) + return RETERR(-ENOMEM); + *psal = replica; + lock = &plugin_set_lock[replica.hashval & 7]; + spin_lock(lock); + twin = ps_hash_find(&ps_table, &replica.hashval); + if (likely(twin == NULL)) { + *set = psal; + ps_hash_insert_rcu(&ps_table, psal); + } else { + *set = twin; + kmem_cache_free(plugin_set_slab, psal); + } + spin_unlock(lock); + } else { + rcu_read_unlock(); + *set = twin; + } + return 0; +} + +static struct { + int offset; + reiser4_plugin_groups groups; + reiser4_plugin_type type; +} pset_descr[PSET_LAST] = { + [PSET_FILE] = { + .offset = offsetof(plugin_set, file), + .type = REISER4_FILE_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_DIR] = { + .offset = offsetof(plugin_set, dir), + .type = REISER4_DIR_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_PERM] = { + .offset = offsetof(plugin_set, perm), + .type = REISER4_PERM_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_FORMATTING] = { + .offset = offsetof(plugin_set, formatting), + .type = REISER4_FORMATTING_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_HASH] = { + .offset = offsetof(plugin_set, hash), + .type = REISER4_HASH_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_FIBRATION] = { + .offset = offsetof(plugin_set, fibration), + .type = REISER4_FIBRATION_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_SD] = { + .offset = offsetof(plugin_set, sd), + .type = REISER4_ITEM_PLUGIN_TYPE, + .groups = (1 << STAT_DATA_ITEM_TYPE) + }, + [PSET_DIR_ITEM] = { + .offset = offsetof(plugin_set, dir_item), + .type = REISER4_ITEM_PLUGIN_TYPE, + .groups = (1 << DIR_ENTRY_ITEM_TYPE) + }, + [PSET_CIPHER] = { + .offset = offsetof(plugin_set, cipher), + .type = REISER4_CIPHER_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_DIGEST] = { + .offset = offsetof(plugin_set, digest), + .type = REISER4_DIGEST_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_COMPRESSION] = { + .offset = offsetof(plugin_set, compression), + .type = REISER4_COMPRESSION_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_COMPRESSION_MODE] = { + .offset = offsetof(plugin_set, compression_mode), + .type = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_CLUSTER] = { + .offset = offsetof(plugin_set, cluster), + .type = REISER4_CLUSTER_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_CREATE] = { + .offset = offsetof(plugin_set, create), + .type = REISER4_FILE_PLUGIN_TYPE, + .groups = (1 << REISER4_REGULAR_FILE) + } +}; + +#define DEFINE_PSET_OPS(PREFIX) \ + reiser4_plugin_type PREFIX##_member_to_type_unsafe(pset_member memb) \ +{ \ + if (memb > PSET_LAST) \ + return REISER4_PLUGIN_TYPES; \ + return pset_descr[memb].type; \ +} \ + \ +int PREFIX##_set_unsafe(plugin_set ** set, pset_member memb, \ + reiser4_plugin * plugin) \ +{ \ + assert("nikita-3492", set != NULL); \ + assert("nikita-3493", *set != NULL); \ + assert("nikita-3494", plugin != NULL); \ + assert("nikita-3495", 0 <= memb && memb < PSET_LAST); \ + assert("nikita-3496", plugin->h.type_id == pset_descr[memb].type); \ + \ + if (pset_descr[memb].groups) \ + if (!(pset_descr[memb].groups & plugin->h.groups)) \ + return -EINVAL; \ + \ + return plugin_set_field(set, \ + (unsigned long)plugin, pset_descr[memb].offset); \ +} \ + \ +reiser4_plugin *PREFIX##_get(plugin_set * set, pset_member memb) \ +{ \ + assert("nikita-3497", set != NULL); \ + assert("nikita-3498", 0 <= memb && memb < PSET_LAST); \ + \ + return *(reiser4_plugin **) (((char *)set) + pset_descr[memb].offset); \ +} + +DEFINE_PSET_OPS(aset); + +int set_plugin(plugin_set ** set, pset_member memb, reiser4_plugin * plugin) +{ + return plugin_set_field(set, + (unsigned long)plugin, pset_descr[memb].offset); +} + +/** + * init_plugin_set - create plugin set cache and hash table + * + * Initializes slab cache of plugin_set-s and their hash table. It is part of + * reiser4 module initialization. + */ +int init_plugin_set(void) +{ + int result; + + result = ps_hash_init(&ps_table, PS_TABLE_SIZE); + if (result == 0) { + plugin_set_slab = kmem_cache_create("plugin_set", + sizeof(plugin_set), 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (plugin_set_slab == NULL) + result = RETERR(-ENOMEM); + } + return result; +} + +/** + * done_plugin_set - delete plugin_set cache and plugin_set hash table + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_plugin_set(void) +{ + plugin_set *cur, *next; + + for_all_in_htable(&ps_table, ps, cur, next) { + ps_hash_remove(&ps_table, cur); + kmem_cache_free(plugin_set_slab, cur); + } + destroy_reiser4_cache(&plugin_set_slab); + ps_hash_done(&ps_table); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/plugin/plugin_set.h b/fs/reiser4/plugin/plugin_set.h new file mode 100644 index 000000000000..5afb61a3d9ab --- /dev/null +++ b/fs/reiser4/plugin/plugin_set.h @@ -0,0 +1,78 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Reiser4 plugin set definition. + See fs/reiser4/plugin/plugin_set.c for details */ + +#if !defined(__PLUGIN_SET_H__) +#define __PLUGIN_SET_H__ + +#include "../type_safe_hash.h" +#include "plugin.h" + +#include + +struct plugin_set; +typedef struct plugin_set plugin_set; + +TYPE_SAFE_HASH_DECLARE(ps, plugin_set); + +struct plugin_set { + unsigned long hashval; + /* plugin of file */ + file_plugin *file; + /* plugin of dir */ + dir_plugin *dir; + /* perm plugin for this file */ + perm_plugin *perm; + /* tail policy plugin. Only meaningful for regular files */ + formatting_plugin *formatting; + /* hash plugin. Only meaningful for directories. */ + hash_plugin *hash; + /* fibration plugin. Only meaningful for directories. */ + fibration_plugin *fibration; + /* plugin of stat-data */ + item_plugin *sd; + /* plugin of items a directory is built of */ + item_plugin *dir_item; + /* cipher plugin */ + cipher_plugin *cipher; + /* digest plugin */ + digest_plugin *digest; + /* compression plugin */ + compression_plugin *compression; + /* compression mode plugin */ + compression_mode_plugin *compression_mode; + /* cluster plugin */ + cluster_plugin *cluster; + /* this specifies file plugin of regular children. + only meaningful for directories */ + file_plugin *create; + ps_hash_link link; +}; + +extern plugin_set *plugin_set_get_empty(void); +extern void plugin_set_put(plugin_set * set); + +extern int init_plugin_set(void); +extern void done_plugin_set(void); + +extern reiser4_plugin *aset_get(plugin_set * set, pset_member memb); +extern int set_plugin(plugin_set ** set, pset_member memb, + reiser4_plugin * plugin); +extern int aset_set_unsafe(plugin_set ** set, pset_member memb, + reiser4_plugin * plugin); +extern reiser4_plugin_type aset_member_to_type_unsafe(pset_member memb); + +/* __PLUGIN_SET_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/regular.c b/fs/reiser4/plugin/regular.c new file mode 100644 index 000000000000..9918e9563e3f --- /dev/null +++ b/fs/reiser4/plugin/regular.c @@ -0,0 +1,44 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Contains Reiser4 regular plugins which: + . specify a set of reiser4 regular object plugins, + . used by directory plugin to create entries powered by specified + regular plugins */ + +#include "plugin.h" + +regular_plugin regular_plugins[LAST_REGULAR_ID] = { + [UF_REGULAR_ID] = { + .h = { + .type_id = REISER4_REGULAR_PLUGIN_TYPE, + .id = UF_REGULAR_ID, + .pops = NULL, + .label = "unixfile", + .desc = "Unix file regular plugin", + .linkage = {NULL, NULL} + }, + .id = UNIX_FILE_PLUGIN_ID + }, + [CRC_REGULAR_ID] = { + .h = { + .type_id = REISER4_REGULAR_PLUGIN_TYPE, + .id = CRC_REGULAR_ID, + .pops = NULL, + .label = "cryptcompress", + .desc = "Cryptcompress regular plugin", + .linkage = {NULL, NULL} + }, + .id = CRC_FILE_PLUGIN_ID + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/security/Makefile b/fs/reiser4/plugin/security/Makefile new file mode 100644 index 000000000000..645dbb550d84 --- /dev/null +++ b/fs/reiser4/plugin/security/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_REISER4_FS) += security_plugins.o + +security_plugins-objs := \ + perm.o diff --git a/fs/reiser4/plugin/security/perm.c b/fs/reiser4/plugin/security/perm.c new file mode 100644 index 000000000000..64c285611273 --- /dev/null +++ b/fs/reiser4/plugin/security/perm.c @@ -0,0 +1,33 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* + * This file contains implementation of permission plugins. + * See the comments in perm.h + */ + +#include "../plugin.h" +#include "../plugin_header.h" +#include "../../debug.h" + +perm_plugin perm_plugins[LAST_PERM_ID] = { + [NULL_PERM_ID] = { + .h = { + .type_id = REISER4_PERM_PLUGIN_TYPE, + .id = NULL_PERM_ID, + .pops = NULL, + .label = "null", + .desc = "stub permission plugin", + .linkage = {NULL, NULL} + } + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/security/perm.h b/fs/reiser4/plugin/security/perm.h new file mode 100644 index 000000000000..caa27498dd69 --- /dev/null +++ b/fs/reiser4/plugin/security/perm.h @@ -0,0 +1,38 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Perm (short for "permissions") plugins common stuff. */ + +#if !defined( __REISER4_PERM_H__ ) +#define __REISER4_PERM_H__ + +#include "../../forward.h" +#include "../plugin_header.h" + +#include + +/* Definition of permission plugin */ +/* NIKITA-FIXME-HANS: define what this is targeted for. + It does not seem to be intended for use with sys_reiser4. Explain. */ + +/* NOTE-EDWARD: This seems to be intended for deprecated sys_reiser4. + Consider it like a temporary "seam" and reserved pset member. + If you have something usefull to add, then rename this plugin and add here */ +typedef struct perm_plugin { + /* generic plugin fields */ + plugin_header h; +} perm_plugin; + +typedef enum { NULL_PERM_ID, LAST_PERM_ID } reiser4_perm_id; + +/* __REISER4_PERM_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/space/Makefile b/fs/reiser4/plugin/space/Makefile new file mode 100644 index 000000000000..5a0c94fbef74 --- /dev/null +++ b/fs/reiser4/plugin/space/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_REISER4_FS) += space_plugins.o + +space_plugins-objs := \ + bitmap.o diff --git a/fs/reiser4/plugin/space/bitmap.c b/fs/reiser4/plugin/space/bitmap.c new file mode 100644 index 000000000000..898c3920cc3a --- /dev/null +++ b/fs/reiser4/plugin/space/bitmap.c @@ -0,0 +1,1609 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../../dformat.h" +#include "../../txnmgr.h" +#include "../../jnode.h" +#include "../../block_alloc.h" +#include "../../tree.h" +#include "../../super.h" +#include "../plugin.h" +#include "space_allocator.h" +#include "bitmap.h" + +#include +#include /* for struct super_block */ +#include +#include + +/* Proposed (but discarded) optimization: dynamic loading/unloading of bitmap + * blocks + + A useful optimization of reiser4 bitmap handling would be dynamic bitmap + blocks loading/unloading which is different from v3.x where all bitmap + blocks are loaded at mount time. + + To implement bitmap blocks unloading we need to count bitmap block usage + and detect currently unused blocks allowing them to be unloaded. It is not + a simple task since we allow several threads to modify one bitmap block + simultaneously. + + Briefly speaking, the following schema is proposed: we count in special + variable associated with each bitmap block. That is for counting of block + alloc/dealloc operations on that bitmap block. With a deferred block + deallocation feature of reiser4 all those operation will be represented in + atom dirty/deleted lists as jnodes for freshly allocated or deleted + nodes. + + So, we increment usage counter for each new node allocated or deleted, and + decrement it at atom commit one time for each node from the dirty/deleted + atom's list. Of course, freshly allocated node deletion and node reusing + from atom deleted (if we do so) list should decrement bitmap usage counter + also. + + This schema seems to be working but that reference counting is + not easy to debug. I think we should agree with Hans and do not implement + it in v4.0. Current code implements "on-demand" bitmap blocks loading only. + + For simplicity all bitmap nodes (both commit and working bitmap blocks) are + loaded into memory on fs mount time or each bitmap nodes are loaded at the + first access to it, the "dont_load_bitmap" mount option controls whether + bimtap nodes should be loaded at mount time. Dynamic unloading of bitmap + nodes currently is not supported. */ + +#define CHECKSUM_SIZE 4 + +#define BYTES_PER_LONG (sizeof(long)) + +#if BITS_PER_LONG == 64 +# define LONG_INT_SHIFT (6) +#else +# define LONG_INT_SHIFT (5) +#endif + +#define LONG_INT_MASK (BITS_PER_LONG - 1UL) + +typedef unsigned long ulong_t; + +#define bmap_size(blocksize) ((blocksize) - CHECKSUM_SIZE) +#define bmap_bit_count(blocksize) (bmap_size(blocksize) << 3) + +/* Block allocation/deallocation are done through special bitmap objects which + are allocated in an array at fs mount. */ +struct bitmap_node { + struct mutex mutex; /* long term lock object */ + + jnode *wjnode; /* j-nodes for WORKING ... */ + jnode *cjnode; /* ... and COMMIT bitmap blocks */ + + bmap_off_t first_zero_bit; /* for skip_busy option implementation */ + + atomic_t loaded; /* a flag which shows that bnode is loaded + * already */ +}; + +static inline char *bnode_working_data(struct bitmap_node *bnode) +{ + char *data; + + data = jdata(bnode->wjnode); + assert("zam-429", data != NULL); + + return data + CHECKSUM_SIZE; +} + +static inline char *bnode_commit_data(const struct bitmap_node *bnode) +{ + char *data; + + data = jdata(bnode->cjnode); + assert("zam-430", data != NULL); + + return data + CHECKSUM_SIZE; +} + +static inline __u32 bnode_commit_crc(const struct bitmap_node *bnode) +{ + char *data; + + data = jdata(bnode->cjnode); + assert("vpf-261", data != NULL); + + return le32_to_cpu(get_unaligned((d32 *)data)); +} + +static inline void bnode_set_commit_crc(struct bitmap_node *bnode, __u32 crc) +{ + char *data; + + data = jdata(bnode->cjnode); + assert("vpf-261", data != NULL); + + put_unaligned(cpu_to_le32(crc), (d32 *)data); +} + +/* ZAM-FIXME-HANS: is the idea that this might be a union someday? having + * written the code, does this added abstraction still have */ +/* ANSWER(Zam): No, the abstractions is in the level above (exact place is the + * reiser4_space_allocator structure) */ +/* ZAM-FIXME-HANS: I don't understand your english in comment above. */ +/* FIXME-HANS(Zam): I don't understand the questions like "might be a union + * someday?". What they about? If there is a reason to have a union, it should + * be a union, if not, it should not be a union. "..might be someday" means no + * reason. */ +struct bitmap_allocator_data { + /* an array for bitmap blocks direct access */ + struct bitmap_node *bitmap; +}; + +#define get_barray(super) \ +(((struct bitmap_allocator_data *)(get_super_private(super)->space_allocator.u.generic)) -> bitmap) + +#define get_bnode(super, i) (get_barray(super) + i) + +/* allocate and initialize jnode with JNODE_BITMAP type */ +static jnode *bnew(void) +{ + jnode *jal = jalloc(); + + if (jal) + jnode_init(jal, current_tree, JNODE_BITMAP); + + return jal; +} + +/* this file contains: + - bitmap based implementation of space allocation plugin + - all the helper functions like set bit, find_first_zero_bit, etc */ + +/* Audited by: green(2002.06.12) */ +static int find_next_zero_bit_in_word(ulong_t word, int start_bit) +{ + ulong_t mask = 1UL << start_bit; + int i = start_bit; + + while ((word & mask) != 0) { + mask <<= 1; + if (++i >= BITS_PER_LONG) + break; + } + + return i; +} + +#include + +#if BITS_PER_LONG == 64 + +#define OFF(addr) (((ulong_t)(addr) & (BYTES_PER_LONG - 1)) << 3) +#define BASE(addr) ((ulong_t*) ((ulong_t)(addr) & ~(BYTES_PER_LONG - 1))) + +static inline void reiser4_set_bit(int nr, void *addr) +{ + __test_and_set_bit_le(nr + OFF(addr), BASE(addr)); +} + +static inline void reiser4_clear_bit(int nr, void *addr) +{ + __test_and_clear_bit_le(nr + OFF(addr), BASE(addr)); +} + +static inline int reiser4_test_bit(int nr, void *addr) +{ + return test_bit_le(nr + OFF(addr), BASE(addr)); +} +static inline int reiser4_find_next_zero_bit(void *addr, int maxoffset, + int offset) +{ + int off = OFF(addr); + + return find_next_zero_bit_le(BASE(addr), maxoffset + off, + offset + off) - off; +} + +#else + +#define reiser4_set_bit(nr, addr) __test_and_set_bit_le(nr, addr) +#define reiser4_clear_bit(nr, addr) __test_and_clear_bit_le(nr, addr) +#define reiser4_test_bit(nr, addr) test_bit_le(nr, addr) + +#define reiser4_find_next_zero_bit(addr, maxoffset, offset) \ +find_next_zero_bit_le(addr, maxoffset, offset) +#endif + +/* Search for a set bit in the bit array [@start_offset, @max_offset[, offsets + * are counted from @addr, return the offset of the first bit if it is found, + * @maxoffset otherwise. */ +static bmap_off_t __reiser4_find_next_set_bit(void *addr, bmap_off_t max_offset, + bmap_off_t start_offset) +{ + ulong_t *base = addr; + /* start_offset is in bits, convert it to byte offset within bitmap. */ + int word_nr = start_offset >> LONG_INT_SHIFT; + /* bit number within the byte. */ + int bit_nr = start_offset & LONG_INT_MASK; + int max_word_nr = (max_offset - 1) >> LONG_INT_SHIFT; + + assert("zam-387", max_offset != 0); + + /* Unaligned @start_offset case. */ + if (bit_nr != 0) { + bmap_nr_t nr; + + nr = find_next_zero_bit_in_word(~(base[word_nr]), bit_nr); + + if (nr < BITS_PER_LONG) + return (word_nr << LONG_INT_SHIFT) + nr; + + ++word_nr; + } + + /* Fast scan trough aligned words. */ + while (word_nr <= max_word_nr) { + if (base[word_nr] != 0) { + return (word_nr << LONG_INT_SHIFT) + + find_next_zero_bit_in_word(~(base[word_nr]), 0); + } + + ++word_nr; + } + + return max_offset; +} + +#if BITS_PER_LONG == 64 + +static bmap_off_t reiser4_find_next_set_bit(void *addr, bmap_off_t max_offset, + bmap_off_t start_offset) +{ + bmap_off_t off = OFF(addr); + + return __reiser4_find_next_set_bit(BASE(addr), max_offset + off, + start_offset + off) - off; +} + +#else +#define reiser4_find_next_set_bit(addr, max_offset, start_offset) \ + __reiser4_find_next_set_bit(addr, max_offset, start_offset) +#endif + +/* search for the first set bit in single word. */ +static int find_last_set_bit_in_word(ulong_t word, int start_bit) +{ + ulong_t bit_mask; + int nr = start_bit; + + assert("zam-965", start_bit < BITS_PER_LONG); + assert("zam-966", start_bit >= 0); + + bit_mask = (1UL << nr); + + while (bit_mask != 0) { + if (bit_mask & word) + return nr; + bit_mask >>= 1; + nr--; + } + return BITS_PER_LONG; +} + +/* Search bitmap for a set bit in backward direction from the end to the + * beginning of given region + * + * @result: result offset of the last set bit + * @addr: base memory address, + * @low_off: low end of the search region, edge bit included into the region, + * @high_off: high end of the search region, edge bit included into the region, + * + * @return: 0 - set bit was found, -1 otherwise. + */ +static int +reiser4_find_last_set_bit(bmap_off_t * result, void *addr, bmap_off_t low_off, + bmap_off_t high_off) +{ + ulong_t *base = addr; + int last_word; + int first_word; + int last_bit; + int nr; + + assert("zam-962", high_off >= low_off); + + last_word = high_off >> LONG_INT_SHIFT; + last_bit = high_off & LONG_INT_MASK; + first_word = low_off >> LONG_INT_SHIFT; + + if (last_bit < BITS_PER_LONG) { + nr = find_last_set_bit_in_word(base[last_word], last_bit); + if (nr < BITS_PER_LONG) { + *result = (last_word << LONG_INT_SHIFT) + nr; + return 0; + } + --last_word; + } + while (last_word >= first_word) { + if (base[last_word] != 0x0) { + last_bit = + find_last_set_bit_in_word(base[last_word], + BITS_PER_LONG - 1); + assert("zam-972", last_bit < BITS_PER_LONG); + *result = (last_word << LONG_INT_SHIFT) + last_bit; + return 0; + } + --last_word; + } + + return -1; /* set bit not found */ +} + +/* Search bitmap for a clear bit in backward direction from the end to the + * beginning of given region */ +static int +reiser4_find_last_zero_bit(bmap_off_t * result, void *addr, bmap_off_t low_off, + bmap_off_t high_off) +{ + ulong_t *base = addr; + int last_word; + int first_word; + int last_bit; + int nr; + + last_word = high_off >> LONG_INT_SHIFT; + last_bit = high_off & LONG_INT_MASK; + first_word = low_off >> LONG_INT_SHIFT; + + if (last_bit < BITS_PER_LONG) { + nr = find_last_set_bit_in_word(~base[last_word], last_bit); + if (nr < BITS_PER_LONG) { + *result = (last_word << LONG_INT_SHIFT) + nr; + return 0; + } + --last_word; + } + while (last_word >= first_word) { + if (base[last_word] != (ulong_t) (-1)) { + *result = (last_word << LONG_INT_SHIFT) + + find_last_set_bit_in_word(~base[last_word], + BITS_PER_LONG - 1); + return 0; + } + --last_word; + } + + return -1; /* zero bit not found */ +} + +/* Audited by: green(2002.06.12) */ +static void reiser4_clear_bits(char *addr, bmap_off_t start, bmap_off_t end) +{ + int first_byte; + int last_byte; + + unsigned char first_byte_mask = 0xFF; + unsigned char last_byte_mask = 0xFF; + + assert("zam-410", start < end); + + first_byte = start >> 3; + last_byte = (end - 1) >> 3; + + if (last_byte > first_byte + 1) + memset(addr + first_byte + 1, 0, + (size_t) (last_byte - first_byte - 1)); + + first_byte_mask >>= 8 - (start & 0x7); + last_byte_mask <<= ((end - 1) & 0x7) + 1; + + if (first_byte == last_byte) { + addr[first_byte] &= (first_byte_mask | last_byte_mask); + } else { + addr[first_byte] &= first_byte_mask; + addr[last_byte] &= last_byte_mask; + } +} + +/* Audited by: green(2002.06.12) */ +/* ZAM-FIXME-HANS: comment this */ +static void reiser4_set_bits(char *addr, bmap_off_t start, bmap_off_t end) +{ + int first_byte; + int last_byte; + + unsigned char first_byte_mask = 0xFF; + unsigned char last_byte_mask = 0xFF; + + assert("zam-386", start < end); + + first_byte = start >> 3; + last_byte = (end - 1) >> 3; + + if (last_byte > first_byte + 1) + memset(addr + first_byte + 1, 0xFF, + (size_t) (last_byte - first_byte - 1)); + + first_byte_mask <<= start & 0x7; + last_byte_mask >>= 7 - ((end - 1) & 0x7); + + if (first_byte == last_byte) { + addr[first_byte] |= (first_byte_mask & last_byte_mask); + } else { + addr[first_byte] |= first_byte_mask; + addr[last_byte] |= last_byte_mask; + } +} + +#define ADLER_BASE 65521 +#define ADLER_NMAX 5552 + +/* Calculates the adler32 checksum for the data pointed by `data` of the + length `len`. This function was originally taken from zlib, version 1.1.3, + July 9th, 1998. + + Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + The above comment applies only to the reiser4_adler32 function. +*/ + +__u32 reiser4_adler32(char *data, __u32 len) +{ + unsigned char *t = data; + __u32 s1 = 1; + __u32 s2 = 0; + int k; + + while (len > 0) { + k = len < ADLER_NMAX ? len : ADLER_NMAX; + len -= k; + + while (k--) { + s1 += *t++; + s2 += s1; + } + + s1 %= ADLER_BASE; + s2 %= ADLER_BASE; + } + return (s2 << 16) | s1; +} + +#define sb_by_bnode(bnode) \ + ((struct super_block *)jnode_get_tree(bnode->wjnode)->super) + +static __u32 bnode_calc_crc(const struct bitmap_node *bnode, unsigned long size) +{ + return reiser4_adler32(bnode_commit_data(bnode), bmap_size(size)); +} + +static int +bnode_check_adler32(const struct bitmap_node *bnode, unsigned long size) +{ + if (bnode_calc_crc(bnode, size) != bnode_commit_crc(bnode)) { + bmap_nr_t bmap; + + bmap = bnode - get_bnode(sb_by_bnode(bnode), 0); + + warning("vpf-263", + "Checksum for the bitmap block %llu is incorrect", + bmap); + + return RETERR(-EIO); + } + + return 0; +} + +#define REISER4_CHECK_BMAP_CRC (0) + +#if REISER4_CHECK_BMAP_CRC +static int bnode_check_crc(const struct bitmap_node *bnode) +{ + return bnode_check_adler32(bnode, + bmap_size(sb_by_bnode(bnode)->s_blocksize)); +} + +/* REISER4_CHECK_BMAP_CRC */ +#else + +#define bnode_check_crc(bnode) (0) + +/* REISER4_CHECK_BMAP_CRC */ +#endif + +/* Recalculates the adler32 checksum for only 1 byte change. + adler - previous adler checksum + old_data, data - old, new byte values. + tail == (chunk - offset) : length, checksum was calculated for, - offset of + the changed byte within this chunk. + This function can be used for checksum calculation optimisation. +*/ + +static __u32 +adler32_recalc(__u32 adler, unsigned char old_data, unsigned char data, + __u32 tail) +{ + __u32 delta = data - old_data + 2 * ADLER_BASE; + __u32 s1 = adler & 0xffff; + __u32 s2 = (adler >> 16) & 0xffff; + + s1 = (delta + s1) % ADLER_BASE; + s2 = (delta * tail + s2) % ADLER_BASE; + + return (s2 << 16) | s1; +} + +#define LIMIT(val, boundary) ((val) > (boundary) ? (boundary) : (val)) + +/** + * get_nr_bitmap - calculate number of bitmap blocks + * @super: super block with initialized blocksize and block count + * + * Calculates number of bitmap blocks of a filesystem which uses bitmaps to + * maintain free disk space. It assumes that each bitmap addresses the same + * number of blocks which is calculated by bmap_block_count macro defined in + * above. Number of blocks in the filesystem has to be initialized in reiser4 + * private data of super block already so that it can be obtained via + * reiser4_block_count(). Unfortunately, number of blocks addressed by a bitmap + * is not power of 2 because 4 bytes are used for checksum. Therefore, we have + * to use special function to divide and modulo 64bits filesystem block + * counters. + * + * Example: suppose filesystem have 32768 blocks. Blocksize is 4096. Each bitmap + * block addresses (4096 - 4) * 8 = 32736 blocks. Number of bitmaps to address + * all 32768 blocks is calculated as (32768 - 1) / 32736 + 1 = 2. + */ +static bmap_nr_t get_nr_bmap(const struct super_block *super) +{ + u64 quotient; + + assert("zam-393", reiser4_block_count(super) != 0); + + quotient = reiser4_block_count(super) - 1; + do_div(quotient, bmap_bit_count(super->s_blocksize)); + return quotient + 1; +} + +/** + * parse_blocknr - calculate bitmap number and offset in it by block number + * @block: pointer to block number to calculate location in bitmap of + * @bmap: pointer where to store bitmap block number + * @offset: pointer where to store offset within bitmap block + * + * Calculates location of bit which is responsible for allocation/freeing of + * block @*block. That location is represented by bitmap block number and offset + * within that bitmap block. + */ +static void +parse_blocknr(const reiser4_block_nr *block, bmap_nr_t *bmap, + bmap_off_t *offset) +{ + struct super_block *super = get_current_context()->super; + u64 quotient = *block; + + *offset = do_div(quotient, bmap_bit_count(super->s_blocksize)); + *bmap = quotient; + + assert("zam-433", *bmap < get_nr_bmap(super)); + assert("", *offset < bmap_bit_count(super->s_blocksize)); +} + +#if REISER4_DEBUG +/* Audited by: green(2002.06.12) */ +static void +check_block_range(const reiser4_block_nr * start, const reiser4_block_nr * len) +{ + struct super_block *sb = reiser4_get_current_sb(); + + assert("zam-436", sb != NULL); + + assert("zam-455", start != NULL); + assert("zam-437", *start != 0); + assert("zam-541", !reiser4_blocknr_is_fake(start)); + assert("zam-441", *start < reiser4_block_count(sb)); + + if (len != NULL) { + assert("zam-438", *len != 0); + assert("zam-442", *start + *len <= reiser4_block_count(sb)); + } +} + +static void check_bnode_loaded(const struct bitmap_node *bnode) +{ + assert("zam-485", bnode != NULL); + assert("zam-483", jnode_page(bnode->wjnode) != NULL); + assert("zam-484", jnode_page(bnode->cjnode) != NULL); + assert("nikita-2820", jnode_is_loaded(bnode->wjnode)); + assert("nikita-2821", jnode_is_loaded(bnode->cjnode)); +} + +#else + +# define check_block_range(start, len) do { /* nothing */} while(0) +# define check_bnode_loaded(bnode) do { /* nothing */} while(0) + +#endif + +/* modify bnode->first_zero_bit (if we free bits before); bnode should be + spin-locked */ +static inline void +adjust_first_zero_bit(struct bitmap_node *bnode, bmap_off_t offset) +{ + if (offset < bnode->first_zero_bit) + bnode->first_zero_bit = offset; +} + +/* return a physical disk address for logical bitmap number @bmap */ +/* FIXME-VS: this is somehow related to disk layout? */ +/* ZAM-FIXME-HANS: your answer is? Use not more than one function dereference + * per block allocation so that performance is not affected. Probably this + * whole file should be considered part of the disk layout plugin, and other + * disk layouts can use other defines and efficiency will not be significantly + * affected. */ + +#define REISER4_FIRST_BITMAP_BLOCK \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 2) + +/* Audited by: green(2002.06.12) */ +static void +get_bitmap_blocknr(struct super_block *super, bmap_nr_t bmap, + reiser4_block_nr * bnr) +{ + + assert("zam-390", bmap < get_nr_bmap(super)); + +#ifdef CONFIG_REISER4_BADBLOCKS +#define BITMAP_PLUGIN_DISKMAP_ID ((0xc0e1<<16) | (0xe0ff)) + /* Check if the diskmap have this already, first. */ + if (reiser4_get_diskmap_value(BITMAP_PLUGIN_DISKMAP_ID, bmap, bnr) == 0) + return; /* Found it in diskmap */ +#endif + /* FIXME_ZAM: before discussing of disk layouts and disk format + plugins I implement bitmap location scheme which is close to scheme + used in reiser 3.6 */ + if (bmap == 0) { + *bnr = REISER4_FIRST_BITMAP_BLOCK; + } else { + *bnr = bmap * bmap_bit_count(super->s_blocksize); + } +} + +/* construct a fake block number for shadow bitmap (WORKING BITMAP) block */ +/* Audited by: green(2002.06.12) */ +static void get_working_bitmap_blocknr(bmap_nr_t bmap, reiser4_block_nr * bnr) +{ + *bnr = + (reiser4_block_nr) ((bmap & ~REISER4_BLOCKNR_STATUS_BIT_MASK) | + REISER4_BITMAP_BLOCKS_STATUS_VALUE); +} + +/* bnode structure initialization */ +static void +init_bnode(struct bitmap_node *bnode, + struct super_block *super UNUSED_ARG, bmap_nr_t bmap UNUSED_ARG) +{ + memset(bnode, 0, sizeof(struct bitmap_node)); + + mutex_init(&bnode->mutex); + atomic_set(&bnode->loaded, 0); +} + +static void release(jnode * node) +{ + jrelse(node); + JF_SET(node, JNODE_HEARD_BANSHEE); + jput(node); +} + +/* This function is for internal bitmap.c use because it assumes that jnode is + in under full control of this thread */ +static void done_bnode(struct bitmap_node *bnode) +{ + if (bnode) { + atomic_set(&bnode->loaded, 0); + if (bnode->wjnode != NULL) + release(bnode->wjnode); + if (bnode->cjnode != NULL) + release(bnode->cjnode); + bnode->wjnode = bnode->cjnode = NULL; + } +} + +/* ZAM-FIXME-HANS: comment this. Called only by load_and_lock_bnode()*/ +static int prepare_bnode(struct bitmap_node *bnode, jnode **cjnode_ret, + jnode **wjnode_ret) +{ + struct super_block *super; + jnode *cjnode; + jnode *wjnode; + bmap_nr_t bmap; + int ret; + + super = reiser4_get_current_sb(); + + *wjnode_ret = wjnode = bnew(); + if (wjnode == NULL) { + *cjnode_ret = NULL; + return RETERR(-ENOMEM); + } + + *cjnode_ret = cjnode = bnew(); + if (cjnode == NULL) + return RETERR(-ENOMEM); + + bmap = bnode - get_bnode(super, 0); + + get_working_bitmap_blocknr(bmap, &wjnode->blocknr); + get_bitmap_blocknr(super, bmap, &cjnode->blocknr); + + jref(cjnode); + jref(wjnode); + + /* load commit bitmap */ + ret = jload_gfp(cjnode, GFP_NOFS, 1); + + if (ret) + goto error; + + /* allocate memory for working bitmap block. Note that for + * bitmaps jinit_new() doesn't actually modifies node content, + * so parallel calls to this are ok. */ + ret = jinit_new(wjnode, GFP_NOFS); + + if (ret != 0) { + jrelse(cjnode); + goto error; + } + + return 0; + + error: + jput(cjnode); + jput(wjnode); + *wjnode_ret = *cjnode_ret = NULL; + return ret; + +} + +/* Check the bnode data on read. */ +static int check_struct_bnode(struct bitmap_node *bnode, __u32 blksize) +{ + void *data; + int ret; + + /* Check CRC */ + ret = bnode_check_adler32(bnode, blksize); + + if (ret) { + return ret; + } + + data = jdata(bnode->cjnode) + CHECKSUM_SIZE; + + /* Check the very first bit -- it must be busy. */ + if (!reiser4_test_bit(0, data)) { + warning("vpf-1362", "The allocator block %llu is not marked " + "as used.", (unsigned long long)bnode->cjnode->blocknr); + + return -EINVAL; + } + + return 0; +} + +/* load bitmap blocks "on-demand" */ +static int load_and_lock_bnode(struct bitmap_node *bnode) +{ + int ret; + + jnode *cjnode; + jnode *wjnode; + + assert("nikita-3040", reiser4_schedulable()); + +/* ZAM-FIXME-HANS: since bitmaps are never unloaded, this does not + * need to be atomic, right? Just leave a comment that if bitmaps were + * unloadable, this would need to be atomic. */ + if (atomic_read(&bnode->loaded)) { + /* bitmap is already loaded, nothing to do */ + check_bnode_loaded(bnode); + mutex_lock(&bnode->mutex); + assert("nikita-2827", atomic_read(&bnode->loaded)); + return 0; + } + + ret = prepare_bnode(bnode, &cjnode, &wjnode); + if (ret) + return ret; + + mutex_lock(&bnode->mutex); + + if (!atomic_read(&bnode->loaded)) { + assert("nikita-2822", cjnode != NULL); + assert("nikita-2823", wjnode != NULL); + assert("nikita-2824", jnode_is_loaded(cjnode)); + assert("nikita-2825", jnode_is_loaded(wjnode)); + + bnode->wjnode = wjnode; + bnode->cjnode = cjnode; + + ret = check_struct_bnode(bnode, current_blocksize); + if (unlikely(ret != 0)) + goto error; + + atomic_set(&bnode->loaded, 1); + /* working bitmap is initialized by on-disk + * commit bitmap. This should be performed + * under mutex. */ + memcpy(bnode_working_data(bnode), + bnode_commit_data(bnode), + bmap_size(current_blocksize)); + } else + /* race: someone already loaded bitmap + * while we were busy initializing data. */ + check_bnode_loaded(bnode); + return 0; + + error: + release(wjnode); + release(cjnode); + bnode->wjnode = NULL; + bnode->cjnode = NULL; + mutex_unlock(&bnode->mutex); + return ret; +} + +static void release_and_unlock_bnode(struct bitmap_node *bnode) +{ + check_bnode_loaded(bnode); + mutex_unlock(&bnode->mutex); +} + +/* This function does all block allocation work but only for one bitmap + block.*/ +/* FIXME_ZAM: It does not allow us to allocate block ranges across bitmap + block responsibility zone boundaries. This had no sense in v3.6 but may + have it in v4.x */ +/* ZAM-FIXME-HANS: do you mean search one bitmap block forward? */ +static int +search_one_bitmap_forward(bmap_nr_t bmap, bmap_off_t * offset, + bmap_off_t max_offset, int min_len, int max_len) +{ + struct super_block *super = get_current_context()->super; + struct bitmap_node *bnode = get_bnode(super, bmap); + + char *data; + + bmap_off_t search_end; + bmap_off_t start; + bmap_off_t end; + + int set_first_zero_bit = 0; + + int ret; + + assert("zam-364", min_len > 0); + assert("zam-365", max_len >= min_len); + assert("zam-366", *offset <= max_offset); + + ret = load_and_lock_bnode(bnode); + + if (ret) + return ret; + + data = bnode_working_data(bnode); + + start = *offset; + + if (bnode->first_zero_bit >= start) { + start = bnode->first_zero_bit; + set_first_zero_bit = 1; + } + + while (start + min_len < max_offset) { + + start = + reiser4_find_next_zero_bit((long *)data, max_offset, start); + if (set_first_zero_bit) { + bnode->first_zero_bit = start; + set_first_zero_bit = 0; + } + if (start >= max_offset) + break; + + search_end = LIMIT(start + max_len, max_offset); + end = + reiser4_find_next_set_bit((long *)data, search_end, start); + if (end >= start + min_len) { + /* we can't trust find_next_set_bit result if set bit + was not fount, result may be bigger than + max_offset */ + if (end > search_end) + end = search_end; + + ret = end - start; + *offset = start; + + reiser4_set_bits(data, start, end); + + /* FIXME: we may advance first_zero_bit if [start, + end] region overlaps the first_zero_bit point */ + + break; + } + + start = end + 1; + } + + release_and_unlock_bnode(bnode); + + return ret; +} + +static int +search_one_bitmap_backward(bmap_nr_t bmap, bmap_off_t * start_offset, + bmap_off_t end_offset, int min_len, int max_len) +{ + struct super_block *super = get_current_context()->super; + struct bitmap_node *bnode = get_bnode(super, bmap); + char *data; + bmap_off_t start; + int ret; + + assert("zam-958", min_len > 0); + assert("zam-959", max_len >= min_len); + assert("zam-960", *start_offset >= end_offset); + + ret = load_and_lock_bnode(bnode); + if (ret) + return ret; + + data = bnode_working_data(bnode); + start = *start_offset; + + while (1) { + bmap_off_t end, search_end; + + /* Find the beginning of the zero filled region */ + if (reiser4_find_last_zero_bit(&start, data, end_offset, start)) + break; + /* Is there more than `min_len' bits from `start' to + * `end_offset'? */ + if (start < end_offset + min_len - 1) + break; + + /* Do not search to `end_offset' if we need to find less than + * `max_len' zero bits. */ + if (end_offset + max_len - 1 < start) + search_end = start - max_len + 1; + else + search_end = end_offset; + + if (reiser4_find_last_set_bit(&end, data, search_end, start)) + end = search_end; + else + end++; + + if (end + min_len <= start + 1) { + if (end < search_end) + end = search_end; + ret = start - end + 1; + *start_offset = end; /* `end' is lowest offset */ + assert("zam-987", + reiser4_find_next_set_bit(data, start + 1, + end) >= start + 1); + reiser4_set_bits(data, end, start + 1); + break; + } + + if (end <= end_offset) + /* left search boundary reached. */ + break; + start = end - 1; + } + + release_and_unlock_bnode(bnode); + return ret; +} + +/* allocate contiguous range of blocks in bitmap */ +static int bitmap_alloc_forward(reiser4_block_nr * start, + const reiser4_block_nr * end, int min_len, + int max_len) +{ + bmap_nr_t bmap, end_bmap; + bmap_off_t offset, end_offset; + int len; + + reiser4_block_nr tmp; + + struct super_block *super = get_current_context()->super; + const bmap_off_t max_offset = bmap_bit_count(super->s_blocksize); + + parse_blocknr(start, &bmap, &offset); + + tmp = *end - 1; + parse_blocknr(&tmp, &end_bmap, &end_offset); + ++end_offset; + + assert("zam-358", end_bmap >= bmap); + assert("zam-359", ergo(end_bmap == bmap, end_offset >= offset)); + + for (; bmap < end_bmap; bmap++, offset = 0) { + len = + search_one_bitmap_forward(bmap, &offset, max_offset, + min_len, max_len); + if (len != 0) + goto out; + } + + len = + search_one_bitmap_forward(bmap, &offset, end_offset, min_len, + max_len); + out: + *start = bmap * max_offset + offset; + return len; +} + +/* allocate contiguous range of blocks in bitmap (from @start to @end in + * backward direction) */ +static int bitmap_alloc_backward(reiser4_block_nr * start, + const reiser4_block_nr * end, int min_len, + int max_len) +{ + bmap_nr_t bmap, end_bmap; + bmap_off_t offset, end_offset; + int len; + struct super_block *super = get_current_context()->super; + const bmap_off_t max_offset = bmap_bit_count(super->s_blocksize); + + parse_blocknr(start, &bmap, &offset); + parse_blocknr(end, &end_bmap, &end_offset); + + assert("zam-961", end_bmap <= bmap); + assert("zam-962", ergo(end_bmap == bmap, end_offset <= offset)); + + for (; bmap > end_bmap; bmap--, offset = max_offset - 1) { + len = + search_one_bitmap_backward(bmap, &offset, 0, min_len, + max_len); + if (len != 0) + goto out; + } + + len = + search_one_bitmap_backward(bmap, &offset, end_offset, min_len, + max_len); + out: + *start = bmap * max_offset + offset; + return len; +} + +/* plugin->u.space_allocator.alloc_blocks() */ +static int alloc_blocks_forward(reiser4_blocknr_hint *hint, int needed, + reiser4_block_nr *start, reiser4_block_nr *len) +{ + struct super_block *super = get_current_context()->super; + int actual_len; + + reiser4_block_nr search_start; + reiser4_block_nr search_end; + + assert("zam-398", super != NULL); + assert("zam-412", hint != NULL); + assert("zam-397", hint->blk <= reiser4_block_count(super)); + + if (hint->max_dist == 0) + search_end = reiser4_block_count(super); + else + search_end = + LIMIT(hint->blk + hint->max_dist, + reiser4_block_count(super)); + + /* We use @hint -> blk as a search start and search from it to the end + of the disk or in given region if @hint -> max_dist is not zero */ + search_start = hint->blk; + + actual_len = + bitmap_alloc_forward(&search_start, &search_end, 1, needed); + + /* There is only one bitmap search if max_dist was specified or first + pass was from the beginning of the bitmap. We also do one pass for + scanning bitmap in backward direction. */ + if (!(actual_len != 0 || hint->max_dist != 0 || search_start == 0)) { + /* next step is a scanning from 0 to search_start */ + search_end = search_start; + search_start = 0; + actual_len = + bitmap_alloc_forward(&search_start, &search_end, 1, needed); + } + if (actual_len == 0) + return RETERR(-ENOSPC); + if (actual_len < 0) + return RETERR(actual_len); + *len = actual_len; + *start = search_start; + return 0; +} + +static int alloc_blocks_backward(reiser4_blocknr_hint * hint, int needed, + reiser4_block_nr * start, + reiser4_block_nr * len) +{ + reiser4_block_nr search_start; + reiser4_block_nr search_end; + int actual_len; + + ON_DEBUG(struct super_block *super = reiser4_get_current_sb()); + + assert("zam-969", super != NULL); + assert("zam-970", hint != NULL); + assert("zam-971", hint->blk <= reiser4_block_count(super)); + + search_start = hint->blk; + if (hint->max_dist == 0 || search_start <= hint->max_dist) + search_end = 0; + else + search_end = search_start - hint->max_dist; + + actual_len = + bitmap_alloc_backward(&search_start, &search_end, 1, needed); + if (actual_len == 0) + return RETERR(-ENOSPC); + if (actual_len < 0) + return RETERR(actual_len); + *len = actual_len; + *start = search_start; + return 0; +} + +/* plugin->u.space_allocator.alloc_blocks() */ +int reiser4_alloc_blocks_bitmap(reiser4_space_allocator * allocator, + reiser4_blocknr_hint * hint, int needed, + reiser4_block_nr * start, reiser4_block_nr * len) +{ + if (hint->backward) + return alloc_blocks_backward(hint, needed, start, len); + return alloc_blocks_forward(hint, needed, start, len); +} + +/* plugin->u.space_allocator.dealloc_blocks(). */ +/* It just frees blocks in WORKING BITMAP. Usually formatted an unformatted + nodes deletion is deferred until transaction commit. However, deallocation + of temporary objects like wandered blocks and transaction commit records + requires immediate node deletion from WORKING BITMAP.*/ +void reiser4_dealloc_blocks_bitmap(reiser4_space_allocator * allocator, + reiser4_block_nr start, reiser4_block_nr len) +{ + struct super_block *super = reiser4_get_current_sb(); + + bmap_nr_t bmap; + bmap_off_t offset; + + struct bitmap_node *bnode; + int ret; + + assert("zam-468", len != 0); + check_block_range(&start, &len); + + parse_blocknr(&start, &bmap, &offset); + + assert("zam-469", offset + len <= bmap_bit_count(super->s_blocksize)); + + bnode = get_bnode(super, bmap); + + assert("zam-470", bnode != NULL); + + ret = load_and_lock_bnode(bnode); + assert("zam-481", ret == 0); + + reiser4_clear_bits(bnode_working_data(bnode), offset, + (bmap_off_t) (offset + len)); + + adjust_first_zero_bit(bnode, offset); + + release_and_unlock_bnode(bnode); +} + +static int check_blocks_one_bitmap(bmap_nr_t bmap, bmap_off_t start_offset, + bmap_off_t end_offset, int desired) +{ + struct super_block *super = reiser4_get_current_sb(); + struct bitmap_node *bnode = get_bnode(super, bmap); + int ret; + + assert("nikita-2215", bnode != NULL); + + ret = load_and_lock_bnode(bnode); + assert("zam-626", ret == 0); + + assert("nikita-2216", jnode_is_loaded(bnode->wjnode)); + + if (desired) { + ret = reiser4_find_next_zero_bit(bnode_working_data(bnode), + end_offset, start_offset) + >= end_offset; + } else { + ret = reiser4_find_next_set_bit(bnode_working_data(bnode), + end_offset, start_offset) + >= end_offset; + } + + release_and_unlock_bnode(bnode); + + return ret; +} + +/* plugin->u.space_allocator.check_blocks(). */ +int reiser4_check_blocks_bitmap(const reiser4_block_nr * start, + const reiser4_block_nr * len, int desired) +{ + struct super_block *super = reiser4_get_current_sb(); + + reiser4_block_nr end; + bmap_nr_t bmap, end_bmap; + bmap_off_t offset, end_offset; + const bmap_off_t max_offset = bmap_bit_count(super->s_blocksize); + + assert("intelfx-9", start != NULL); + assert("intelfx-10", ergo(len != NULL, *len > 0)); + + if (len != NULL) { + check_block_range(start, len); + end = *start + *len - 1; + } else { + /* on next line, end is used as temporary len for check_block_range() */ + end = 1; check_block_range(start, &end); + end = *start; + } + + parse_blocknr(start, &bmap, &offset); + + if (end == *start) { + end_bmap = bmap; + end_offset = offset; + } else { + parse_blocknr(&end, &end_bmap, &end_offset); + } + ++end_offset; + + assert("intelfx-4", end_bmap >= bmap); + assert("intelfx-5", ergo(end_bmap == bmap, end_offset >= offset)); + + for (; bmap < end_bmap; bmap++, offset = 0) { + if (!check_blocks_one_bitmap(bmap, offset, max_offset, desired)) { + return 0; + } + } + return check_blocks_one_bitmap(bmap, offset, end_offset, desired); +} + +/* conditional insertion of @node into atom's overwrite set if it was not there */ +static void cond_add_to_overwrite_set(txn_atom * atom, jnode * node) +{ + assert("zam-546", atom != NULL); + assert("zam-547", atom->stage == ASTAGE_PRE_COMMIT); + assert("zam-548", node != NULL); + + spin_lock_atom(atom); + spin_lock_jnode(node); + + if (node->atom == NULL) { + JF_SET(node, JNODE_OVRWR); + insert_into_atom_ovrwr_list(atom, node); + } else { + assert("zam-549", node->atom == atom); + } + + spin_unlock_jnode(node); + spin_unlock_atom(atom); +} + +/* an actor which applies delete set to COMMIT bitmap pages and link modified + pages in a single-linked list */ +static int +apply_dset_to_commit_bmap(txn_atom * atom, const reiser4_block_nr * start, + const reiser4_block_nr * len, void *data) +{ + + bmap_nr_t bmap; + bmap_off_t offset; + int ret; + + long long *blocks_freed_p = data; + + struct bitmap_node *bnode; + + struct super_block *sb = reiser4_get_current_sb(); + + check_block_range(start, len); + + parse_blocknr(start, &bmap, &offset); + + /* FIXME-ZAM: we assume that all block ranges are allocated by this + bitmap-based allocator and each block range can't go over a zone of + responsibility of one bitmap block; same assumption is used in + other journal hooks in bitmap code. */ + bnode = get_bnode(sb, bmap); + assert("zam-448", bnode != NULL); + + /* it is safe to unlock atom with is in ASTAGE_PRE_COMMIT */ + assert("zam-767", atom->stage == ASTAGE_PRE_COMMIT); + ret = load_and_lock_bnode(bnode); + if (ret) + return ret; + + /* put bnode into atom's overwrite set */ + cond_add_to_overwrite_set(atom, bnode->cjnode); + + data = bnode_commit_data(bnode); + + ret = bnode_check_crc(bnode); + if (ret != 0) + return ret; + + if (len != NULL) { + /* FIXME-ZAM: a check that all bits are set should be there */ + assert("zam-443", + offset + *len <= bmap_bit_count(sb->s_blocksize)); + reiser4_clear_bits(data, offset, (bmap_off_t) (offset + *len)); + + (*blocks_freed_p) += *len; + } else { + reiser4_clear_bit(offset, data); + (*blocks_freed_p)++; + } + + bnode_set_commit_crc(bnode, bnode_calc_crc(bnode, sb->s_blocksize)); + + release_and_unlock_bnode(bnode); + + return 0; +} + +/* plugin->u.space_allocator.pre_commit_hook(). */ +/* It just applies transaction changes to fs-wide COMMIT BITMAP, hoping the + rest is done by transaction manager (allocate wandered locations for COMMIT + BITMAP blocks, copy COMMIT BITMAP blocks data). */ +/* Only one instance of this function can be running at one given time, because + only one transaction can be committed a time, therefore it is safe to access + some global variables without any locking */ + +int reiser4_pre_commit_hook_bitmap(void) +{ + struct super_block *super = reiser4_get_current_sb(); + txn_atom *atom; + + long long blocks_freed = 0; + + atom = get_current_atom_locked(); + assert("zam-876", atom->stage == ASTAGE_PRE_COMMIT); + spin_unlock_atom(atom); + + { /* scan atom's captured list and find all freshly allocated nodes, + * mark corresponded bits in COMMIT BITMAP as used */ + struct list_head *head = ATOM_CLEAN_LIST(atom); + jnode *node = list_entry(head->next, jnode, capture_link); + + while (head != &node->capture_link) { + /* we detect freshly allocated jnodes */ + if (JF_ISSET(node, JNODE_RELOC)) { + int ret; + bmap_nr_t bmap; + + bmap_off_t offset; + bmap_off_t index; + struct bitmap_node *bn; + __u32 size = bmap_size(super->s_blocksize); + __u32 crc; + char byte; + + assert("zam-559", !JF_ISSET(node, JNODE_OVRWR)); + assert("zam-460", + !reiser4_blocknr_is_fake(&node->blocknr)); + + parse_blocknr(&node->blocknr, &bmap, &offset); + bn = get_bnode(super, bmap); + + index = offset >> 3; + assert("vpf-276", index < size); + + ret = bnode_check_crc(bnode); + if (ret != 0) + return ret; + + check_bnode_loaded(bn); + load_and_lock_bnode(bn); + + byte = *(bnode_commit_data(bn) + index); + reiser4_set_bit(offset, bnode_commit_data(bn)); + + crc = adler32_recalc(bnode_commit_crc(bn), byte, + *(bnode_commit_data(bn) + + index), + size - index), + bnode_set_commit_crc(bn, crc); + + release_and_unlock_bnode(bn); + + ret = bnode_check_crc(bn); + if (ret != 0) + return ret; + + /* working of this depends on how it inserts + new j-node into clean list, because we are + scanning the same list now. It is OK, if + insertion is done to the list front */ + cond_add_to_overwrite_set(atom, bn->cjnode); + } + + node = list_entry(node->capture_link.next, jnode, capture_link); + } + } + + atom_dset_deferred_apply(atom, apply_dset_to_commit_bmap, &blocks_freed, 0); + + blocks_freed -= atom->nr_blocks_allocated; + + { + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + sbinfo->blocks_free_committed += blocks_freed; + spin_unlock_reiser4_super(sbinfo); + } + + return 0; +} + +/* plugin->u.space_allocator.init_allocator + constructor of reiser4_space_allocator object. It is called on fs mount */ +int reiser4_init_allocator_bitmap(reiser4_space_allocator * allocator, + struct super_block *super, void *arg) +{ + struct bitmap_allocator_data *data = NULL; + bmap_nr_t bitmap_blocks_nr; + bmap_nr_t i; + + assert("nikita-3039", reiser4_schedulable()); + + /* getting memory for bitmap allocator private data holder */ + data = + kmalloc(sizeof(struct bitmap_allocator_data), + reiser4_ctx_gfp_mask_get()); + + if (data == NULL) + return RETERR(-ENOMEM); + + /* allocation and initialization for the array of bnodes */ + bitmap_blocks_nr = get_nr_bmap(super); + + /* FIXME-ZAM: it is not clear what to do with huge number of bitmaps + which is bigger than 2^32 (= 8 * 4096 * 4096 * 2^32 bytes = 5.76e+17, + may I never meet someone who still uses the ia32 architecture when + storage devices of that size enter the market, and wants to use ia32 + with that storage device, much less reiser4. ;-) -Hans). Kmalloc is not possible and, + probably, another dynamic data structure should replace a static + array of bnodes. */ + /*data->bitmap = reiser4_kmalloc((size_t) (sizeof (struct bitmap_node) * bitmap_blocks_nr), GFP_KERNEL); */ + data->bitmap = reiser4_vmalloc(sizeof(struct bitmap_node) * bitmap_blocks_nr); + if (data->bitmap == NULL) { + kfree(data); + return RETERR(-ENOMEM); + } + + for (i = 0; i < bitmap_blocks_nr; i++) + init_bnode(data->bitmap + i, super, i); + + allocator->u.generic = data; + +#if REISER4_DEBUG + get_super_private(super)->min_blocks_used += bitmap_blocks_nr; +#endif + + /* Load all bitmap blocks at mount time. */ + if (!test_bit + (REISER4_DONT_LOAD_BITMAP, &get_super_private(super)->fs_flags)) { + __u64 start_time, elapsed_time; + struct bitmap_node *bnode; + int ret; + + if (REISER4_DEBUG) + printk(KERN_INFO "loading reiser4 bitmap..."); + start_time = jiffies; + + for (i = 0; i < bitmap_blocks_nr; i++) { + bnode = data->bitmap + i; + ret = load_and_lock_bnode(bnode); + if (ret) { + reiser4_destroy_allocator_bitmap(allocator, + super); + return ret; + } + release_and_unlock_bnode(bnode); + } + + elapsed_time = jiffies - start_time; + if (REISER4_DEBUG) + printk("...done (%llu jiffies)\n", + (unsigned long long)elapsed_time); + } + + return 0; +} + +/* plugin->u.space_allocator.destroy_allocator + destructor. It is called on fs unmount */ +int reiser4_destroy_allocator_bitmap(reiser4_space_allocator * allocator, + struct super_block *super) +{ + bmap_nr_t bitmap_blocks_nr; + bmap_nr_t i; + + struct bitmap_allocator_data *data = allocator->u.generic; + + assert("zam-414", data != NULL); + assert("zam-376", data->bitmap != NULL); + + bitmap_blocks_nr = get_nr_bmap(super); + + for (i = 0; i < bitmap_blocks_nr; i++) { + struct bitmap_node *bnode = data->bitmap + i; + + mutex_lock(&bnode->mutex); + +#if REISER4_DEBUG + if (atomic_read(&bnode->loaded)) { + jnode *wj = bnode->wjnode; + jnode *cj = bnode->cjnode; + + assert("zam-480", jnode_page(cj) != NULL); + assert("zam-633", jnode_page(wj) != NULL); + + assert("zam-634", + memcmp(jdata(wj), jdata(wj), + bmap_size(super->s_blocksize)) == 0); + + } +#endif + done_bnode(bnode); + mutex_unlock(&bnode->mutex); + } + + vfree(data->bitmap); + kfree(data); + + allocator->u.generic = NULL; + + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/space/bitmap.h b/fs/reiser4/plugin/space/bitmap.h new file mode 100644 index 000000000000..4590498adb45 --- /dev/null +++ b/fs/reiser4/plugin/space/bitmap.h @@ -0,0 +1,47 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined (__REISER4_PLUGIN_SPACE_BITMAP_H__) +#define __REISER4_PLUGIN_SPACE_BITMAP_H__ + +#include "../../dformat.h" +#include "../../block_alloc.h" + +#include /* for __u?? */ +#include /* for struct super_block */ +/* EDWARD-FIXME-HANS: write something as informative as the below for every .h file lacking it. */ +/* declarations of functions implementing methods of space allocator plugin for + bitmap based allocator. The functions themselves are in bitmap.c */ +extern int reiser4_init_allocator_bitmap(reiser4_space_allocator *, + struct super_block *, void *); +extern int reiser4_destroy_allocator_bitmap(reiser4_space_allocator *, + struct super_block *); +extern int reiser4_alloc_blocks_bitmap(reiser4_space_allocator *, + reiser4_blocknr_hint *, int needed, + reiser4_block_nr * start, + reiser4_block_nr * len); +extern int reiser4_check_blocks_bitmap(const reiser4_block_nr *, + const reiser4_block_nr *, int); +extern void reiser4_dealloc_blocks_bitmap(reiser4_space_allocator *, + reiser4_block_nr, + reiser4_block_nr); +extern int reiser4_pre_commit_hook_bitmap(void); + +#define reiser4_post_commit_hook_bitmap() do{}while(0) +#define reiser4_post_write_back_hook_bitmap() do{}while(0) +#define reiser4_print_info_bitmap(pref, al) do{}while(0) + +typedef __u64 bmap_nr_t; +typedef __u32 bmap_off_t; + +#endif /* __REISER4_PLUGIN_SPACE_BITMAP_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/space/space_allocator.h b/fs/reiser4/plugin/space/space_allocator.h new file mode 100644 index 000000000000..71bfd11016d7 --- /dev/null +++ b/fs/reiser4/plugin/space/space_allocator.h @@ -0,0 +1,80 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#ifndef __SPACE_ALLOCATOR_H__ +#define __SPACE_ALLOCATOR_H__ + +#include "../../forward.h" +#include "bitmap.h" +/* NIKITA-FIXME-HANS: surely this could use a comment. Something about how bitmap is the only space allocator for now, + * but... */ +#define DEF_SPACE_ALLOCATOR(allocator) \ + \ +static inline int sa_init_allocator (reiser4_space_allocator * al, struct super_block *s, void * opaque) \ +{ \ + return reiser4_init_allocator_##allocator (al, s, opaque); \ +} \ + \ +static inline void sa_destroy_allocator (reiser4_space_allocator *al, struct super_block *s) \ +{ \ + reiser4_destroy_allocator_##allocator (al, s); \ +} \ + \ +static inline int sa_alloc_blocks (reiser4_space_allocator *al, reiser4_blocknr_hint * hint, \ + int needed, reiser4_block_nr * start, reiser4_block_nr * len) \ +{ \ + return reiser4_alloc_blocks_##allocator (al, hint, needed, start, len); \ +} \ +static inline void sa_dealloc_blocks (reiser4_space_allocator * al, reiser4_block_nr start, reiser4_block_nr len) \ +{ \ + reiser4_dealloc_blocks_##allocator (al, start, len); \ +} \ + \ +static inline int sa_check_blocks (const reiser4_block_nr * start, const reiser4_block_nr * end, int desired) \ +{ \ + return reiser4_check_blocks_##allocator (start, end, desired); \ +} \ + \ +static inline void sa_pre_commit_hook (void) \ +{ \ + reiser4_pre_commit_hook_##allocator (); \ +} \ + \ +static inline void sa_post_commit_hook (void) \ +{ \ + reiser4_post_commit_hook_##allocator (); \ +} \ + \ +static inline void sa_post_write_back_hook (void) \ +{ \ + reiser4_post_write_back_hook_##allocator(); \ +} \ + \ +static inline void sa_print_info(const char * prefix, reiser4_space_allocator * al) \ +{ \ + reiser4_print_info_##allocator (prefix, al); \ +} + +DEF_SPACE_ALLOCATOR(bitmap) + +/* this object is part of reiser4 private in-core super block */ +struct reiser4_space_allocator { + union { + /* space allocators might use this pointer to reference their + * data. */ + void *generic; + } u; +}; + +/* __SPACE_ALLOCATOR_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/tail_policy.c b/fs/reiser4/plugin/tail_policy.c new file mode 100644 index 000000000000..1e0eb1d29e42 --- /dev/null +++ b/fs/reiser4/plugin/tail_policy.c @@ -0,0 +1,113 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Formatting policy plugins */ + +/* + * Formatting policy plugin is used by object plugin (of regular file) to + * convert file between two representations. + * + * Currently following policies are implemented: + * never store file in formatted nodes + * always store file in formatted nodes + * store file in formatted nodes if file is smaller than 4 blocks (default) + */ + +#include "../tree.h" +#include "../inode.h" +#include "../super.h" +#include "object.h" +#include "plugin.h" +#include "node/node.h" +#include "plugin_header.h" + +#include +#include /* For struct inode */ + +/** + * have_formatting_never - + * @inode: + * @size: + * + * + */ +/* Never store file's tail as direct item */ +/* Audited by: green(2002.06.12) */ +static int have_formatting_never(const struct inode *inode UNUSED_ARG + /* inode to operate on */ , + loff_t size UNUSED_ARG/* new object size */) +{ + return 0; +} + +/* Always store file's tail as direct item */ +/* Audited by: green(2002.06.12) */ +static int +have_formatting_always(const struct inode *inode UNUSED_ARG + /* inode to operate on */ , + loff_t size UNUSED_ARG/* new object size */) +{ + return 1; +} + +/* This function makes test if we should store file denoted @inode as tails only + or as extents only. */ +static int +have_formatting_default(const struct inode *inode UNUSED_ARG + /* inode to operate on */ , + loff_t size/* new object size */) +{ + assert("umka-1253", inode != NULL); + + if (size > inode->i_sb->s_blocksize * 4) + return 0; + + return 1; +} + +/* tail plugins */ +formatting_plugin formatting_plugins[LAST_TAIL_FORMATTING_ID] = { + [NEVER_TAILS_FORMATTING_ID] = { + .h = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .id = NEVER_TAILS_FORMATTING_ID, + .pops = NULL, + .label = "never", + .desc = "Never store file's tail", + .linkage = {NULL, NULL} + }, + .have_tail = have_formatting_never + }, + [ALWAYS_TAILS_FORMATTING_ID] = { + .h = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .id = ALWAYS_TAILS_FORMATTING_ID, + .pops = NULL, + .label = "always", + .desc = "Always store file's tail", + .linkage = {NULL, NULL} + }, + .have_tail = have_formatting_always + }, + [SMALL_FILE_FORMATTING_ID] = { + .h = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .id = SMALL_FILE_FORMATTING_ID, + .pops = NULL, + .label = "4blocks", + .desc = "store files shorter than 4 blocks in tail items", + .linkage = {NULL, NULL} + }, + .have_tail = have_formatting_default + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/txmod.c b/fs/reiser4/plugin/txmod.c new file mode 100644 index 000000000000..8e489af23caf --- /dev/null +++ b/fs/reiser4/plugin/txmod.c @@ -0,0 +1,1238 @@ +#include "../forward.h" +#include "../debug.h" +#include "../coord.h" +#include "../plugin/plugin.h" +#include "../jnode.h" +#include "../znode.h" +#include "../block_alloc.h" +#include "../reiser4.h" +#include "../flush.h" + +/* + * This file contains implementation of different transaction models. + * + * Transaction model is a high-level block allocator, which assigns block + * numbers to dirty nodes, and, thereby, decides, how those nodes will be + * committed. + * + * Every dirty node of reiser4 atom can be committed by either of the + * following two ways: + * 1) via journal; + * 2) using "write-anywhere" technique. + * + * If the allocator doesn't change on-disk location of a node, then + * this node will be committed using journalling technique (overwrite). + * Otherwise, it will be comitted via write-anywhere technique (relocate): + * + * relocate <---- allocate --- > overwrite + * + * So, in our interpretation the 2 traditional "classic" strategies in + * committing transactions (journalling and "write-anywhere") are just two + * boundary cases: 1) when all nodes are overwritten, and 2) when all nodes + * are relocated. + * + * Besides those 2 boundary cases we can implement in reiser4 the infinite + * set of their various combinations, so that user can choose what is really + * suitable for his needs. + */ + +/* jnode_make_wander_nolock <- find_flush_start_jnode (special case for znode-above-root) + <- jnode_make_wander */ +void jnode_make_wander_nolock(jnode * node); + +/* jnode_make_wander <- txmod.forward_alloc_formatted */ +void jnode_make_wander(jnode * node); + +/* jnode_make_reloc_nolock <- znode_make_reloc + <- unformatted_make_reloc */ +static void jnode_make_reloc_nolock(flush_queue_t * fq, jnode * node); + + + + /* Handle formatted nodes in forward context */ + + +/** + * txmod.forward_alloc_formatted <- allocate_znode <- alloc_pos_and_ancestors <- jnode_flush + * <- alloc_one_ancestor <- alloc_pos_and_ancestors <- jnode_flush + * <- alloc_one_ancestor (recursive) + * <- lock_parent_and_allocate_znode <- squalloc_upper_levels <- check_parents_and_squalloc_upper_levels <- squalloc_upper_levels (recursive) + * <- handle_pos_on_formatted + * <- handle_pos_on_formatted + * <- handle_pos_end_of_twig + * <- handle_pos_to_leaf + */ +void znode_make_reloc(znode * z, flush_queue_t * fq); + + + /* Handle unformatted nodes */ + + +/* unformatted_make_reloc <- assign_real_blocknrs <- txmod.forward_alloc_unformatted + <- txmod.squeeze_alloc_unformatted +*/ +void unformatted_make_reloc(jnode *node, flush_queue_t *fq); + +static void forward_overwrite_unformatted(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, reiser4_block_nr width); + +/* mark_jnode_overwrite <- forward_overwrite_unformatted <- txmod.forward_alloc_unformatted + squeeze_overwrite_unformatted <- txmod.squeeze_alloc_unformatted +*/ +static void mark_jnode_overwrite(struct list_head *jnodes, jnode *node); + +int split_allocated_extent(coord_t *coord, reiser4_block_nr pos_in_unit); +int allocated_extent_slum_size(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, unsigned long count); +void allocate_blocks_unformatted(reiser4_blocknr_hint *preceder, + reiser4_block_nr wanted_count, + reiser4_block_nr *first_allocated, + reiser4_block_nr *allocated, + block_stage_t block_stage); +void assign_real_blocknrs(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, reiser4_block_nr count, + reiser4_block_nr first); +int convert_extent(coord_t *coord, reiser4_extent *replace); +int put_unit_to_end(znode *node, + const reiser4_key *key, reiser4_extent *copy_ext); + +/* + * txmod.forward_alloc_unformatted <- handle_pos_on_twig + * txmod.squeeze_alloc_unformatted <- squeeze_right_twig + */ + +/* Common functions */ + +/** + * Mark node JNODE_OVRWR and put it on atom->overwrite_nodes list. + * Atom lock and jnode lock should be taken before calling this + * function. + */ +void jnode_make_wander_nolock(jnode * node) +{ + txn_atom *atom; + + assert("nikita-2432", !JF_ISSET(node, JNODE_RELOC)); + assert("nikita-3153", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-897", !JF_ISSET(node, JNODE_FLUSH_QUEUED)); + assert("nikita-3367", !reiser4_blocknr_is_fake(jnode_get_block(node))); + + atom = node->atom; + + assert("zam-895", atom != NULL); + assert("zam-894", atom_is_protected(atom)); + + JF_SET(node, JNODE_OVRWR); + /* move node to atom's overwrite list */ + list_move_tail(&node->capture_link, ATOM_OVRWR_LIST(atom)); + ON_DEBUG(count_jnode(atom, node, DIRTY_LIST, OVRWR_LIST, 1)); +} + +/* + * Same as jnode_make_wander_nolock, but all necessary locks + * are taken inside this function. + */ +void jnode_make_wander(jnode * node) +{ + txn_atom *atom; + + spin_lock_jnode(node); + atom = jnode_get_atom(node); + assert("zam-913", atom != NULL); + assert("zam-914", !JF_ISSET(node, JNODE_RELOC)); + + jnode_make_wander_nolock(node); + spin_unlock_atom(atom); + spin_unlock_jnode(node); +} + +/* this just sets RELOC bit */ +static void jnode_make_reloc_nolock(flush_queue_t * fq, jnode * node) +{ + assert_spin_locked(&(node->guard)); + assert("zam-916", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-917", !JF_ISSET(node, JNODE_RELOC)); + assert("zam-918", !JF_ISSET(node, JNODE_OVRWR)); + assert("zam-920", !JF_ISSET(node, JNODE_FLUSH_QUEUED)); + assert("nikita-3367", !reiser4_blocknr_is_fake(jnode_get_block(node))); + jnode_set_reloc(node); +} + +/* + * Mark znode RELOC and put it on flush queue + */ +void znode_make_reloc(znode * z, flush_queue_t * fq) +{ + jnode *node; + txn_atom *atom; + + node = ZJNODE(z); + spin_lock_jnode(node); + + atom = jnode_get_atom(node); + assert("zam-919", atom != NULL); + + jnode_make_reloc_nolock(fq, node); + queue_jnode(fq, node); + + spin_unlock_atom(atom); + spin_unlock_jnode(node); +} + +/* Mark unformatted node RELOC and put it on flush queue */ +void unformatted_make_reloc(jnode *node, flush_queue_t *fq) +{ + assert("vs-1479", jnode_is_unformatted(node)); + + jnode_make_reloc_nolock(fq, node); + queue_jnode(fq, node); +} + +/** + * mark_jnode_overwrite - assign node to overwrite set + * @jnodes: overwrite set list head + * @node: jnode to belong to overwrite set + * + * Sets OVRWR jnode state bit and puts @node to the end of list head @jnodes + * which is an accumulator for nodes before they get to overwrite set list of + * atom. + */ +static void mark_jnode_overwrite(struct list_head *jnodes, jnode *node) +{ + spin_lock_jnode(node); + + assert("zam-917", !JF_ISSET(node, JNODE_RELOC)); + assert("zam-918", !JF_ISSET(node, JNODE_OVRWR)); + + JF_SET(node, JNODE_OVRWR); + list_move_tail(&node->capture_link, jnodes); + ON_DEBUG(count_jnode(node->atom, node, DIRTY_LIST, OVRWR_LIST, 0)); + + spin_unlock_jnode(node); +} + +static int forward_relocate_unformatted(flush_pos_t *flush_pos, + reiser4_extent *ext, + extent_state state, + oid_t oid, __u64 index, + __u64 width, int *exit) +{ + int result; + coord_t *coord; + reiser4_extent replace_ext; + reiser4_block_nr protected; + reiser4_block_nr start; + reiser4_block_nr first_allocated; + __u64 allocated; + block_stage_t block_stage; + + *exit = 0; + coord = &flush_pos->coord; + start = extent_get_start(ext); + + if (flush_pos->pos_in_unit) { + /* + * split extent unit into two ones + */ + result = split_allocated_extent(coord, + flush_pos->pos_in_unit); + flush_pos->pos_in_unit = 0; + *exit = 1; + return result; + } + /* + * limit number of nodes to allocate + */ + if (flush_pos->nr_to_write < width) + width = flush_pos->nr_to_write; + + if (state == ALLOCATED_EXTENT) { + /* + * all protected nodes are not flushprepped, therefore + * they are counted as flush_reserved + */ + block_stage = BLOCK_FLUSH_RESERVED; + protected = allocated_extent_slum_size(flush_pos, oid, + index, width); + if (protected == 0) { + flush_pos->state = POS_INVALID; + flush_pos->pos_in_unit = 0; + *exit = 1; + return 0; + } + } else { + block_stage = BLOCK_UNALLOCATED; + protected = width; + } + /* + * look at previous unit if possible. If it is allocated, make + * preceder more precise + */ + if (coord->unit_pos && + (state_of_extent(ext - 1) == ALLOCATED_EXTENT)) + reiser4_pos_hint(flush_pos)->blk = + extent_get_start(ext - 1) + + extent_get_width(ext - 1); + /* + * allocate new block numbers for protected nodes + */ + allocate_blocks_unformatted(reiser4_pos_hint(flush_pos), + protected, + &first_allocated, &allocated, + block_stage); + + if (state == ALLOCATED_EXTENT) + /* + * on relocating - free nodes which are going to be + * relocated + */ + reiser4_dealloc_blocks(&start, &allocated, 0, BA_DEFER); + + /* assign new block numbers to protected nodes */ + assign_real_blocknrs(flush_pos, oid, index, allocated, first_allocated); + + /* prepare extent which will replace current one */ + reiser4_set_extent(&replace_ext, first_allocated, allocated); + + /* adjust extent item */ + result = convert_extent(coord, &replace_ext); + if (result != 0 && result != -ENOMEM) { + warning("vs-1461", + "Failed to allocate extent. Should not happen\n"); + *exit = 1; + return result; + } + /* + * break flush: we prepared for flushing as many blocks as we + * were asked for + */ + if (flush_pos->nr_to_write == allocated) + flush_pos->state = POS_INVALID; + return 0; +} + +static squeeze_result squeeze_relocate_unformatted(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *key, + reiser4_key *stop_key) +{ + int result; + reiser4_extent *ext; + __u64 index; + __u64 width; + reiser4_block_nr start; + extent_state state; + oid_t oid; + reiser4_block_nr first_allocated; + __u64 allocated; + __u64 protected; + reiser4_extent copy_extent; + block_stage_t block_stage; + + assert("edward-1610", flush_pos->pos_in_unit == 0); + assert("edward-1611", coord_is_leftmost_unit(coord)); + assert("edward-1612", item_is_extent(coord)); + + ext = extent_by_coord(coord); + index = extent_unit_index(coord); + start = extent_get_start(ext); + width = extent_get_width(ext); + state = state_of_extent(ext); + unit_key_by_coord(coord, key); + oid = get_key_objectid(key); + + assert("edward-1613", state != HOLE_EXTENT); + + if (state == ALLOCATED_EXTENT) { + /* + * all protected nodes are not flushprepped, + * therefore they are counted as flush_reserved + */ + block_stage = BLOCK_FLUSH_RESERVED; + protected = allocated_extent_slum_size(flush_pos, oid, + index, width); + if (protected == 0) { + flush_pos->state = POS_INVALID; + flush_pos->pos_in_unit = 0; + return 0; + } + } else { + block_stage = BLOCK_UNALLOCATED; + protected = width; + } + /* + * look at previous unit if possible. If it is allocated, make + * preceder more precise + */ + if (coord->unit_pos && + (state_of_extent(ext - 1) == ALLOCATED_EXTENT)) + reiser4_pos_hint(flush_pos)->blk = + extent_get_start(ext - 1) + + extent_get_width(ext - 1); + /* + * allocate new block numbers for protected nodes + */ + allocate_blocks_unformatted(reiser4_pos_hint(flush_pos), + protected, + &first_allocated, &allocated, + block_stage); + /* + * prepare extent which will be copied to left + */ + reiser4_set_extent(©_extent, first_allocated, allocated); + result = put_unit_to_end(left, key, ©_extent); + + if (result == -E_NODE_FULL) { + /* + * free blocks which were just allocated + */ + reiser4_dealloc_blocks(&first_allocated, &allocated, + (state == ALLOCATED_EXTENT) + ? BLOCK_FLUSH_RESERVED + : BLOCK_UNALLOCATED, + BA_PERMANENT); + /* + * rewind the preceder + */ + flush_pos->preceder.blk = first_allocated; + check_preceder(flush_pos->preceder.blk); + return SQUEEZE_TARGET_FULL; + } + if (state == ALLOCATED_EXTENT) { + /* + * free nodes which were relocated + */ + reiser4_dealloc_blocks(&start, &allocated, 0, BA_DEFER); + } + /* + * assign new block numbers to protected nodes + */ + assign_real_blocknrs(flush_pos, oid, index, allocated, + first_allocated); + set_key_offset(key, + get_key_offset(key) + + (allocated << current_blocksize_bits)); + return SQUEEZE_CONTINUE; +} + +/** + * forward_overwrite_unformatted - put bunch of jnodes to overwrite set + * @flush_pos: flush position + * @oid: objectid of file jnodes belong to + * @index: starting index + * @width: extent width + * + * Puts nodes of one extent (file objectid @oid, extent width @width) to atom's + * overwrite set. Starting from the one with index @index. If end of slum is + * detected (node is not found or flushprepped) - stop iterating and set flush + * position's state to POS_INVALID. + */ +static void forward_overwrite_unformatted(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, + reiser4_block_nr width) +{ + unsigned long i; + reiser4_tree *tree; + jnode *node; + txn_atom *atom; + LIST_HEAD(jnodes); + + tree = current_tree; + + atom = atom_locked_by_fq(reiser4_pos_fq(flush_pos)); + assert("vs-1478", atom); + + for (i = flush_pos->pos_in_unit; i < width; i++, index++) { + node = jlookup(tree, oid, index); + if (!node) { + flush_pos->state = POS_INVALID; + break; + } + if (jnode_check_flushprepped(node)) { + flush_pos->state = POS_INVALID; + atomic_dec(&node->x_count); + break; + } + if (node->atom != atom) { + flush_pos->state = POS_INVALID; + atomic_dec(&node->x_count); + break; + } + mark_jnode_overwrite(&jnodes, node); + atomic_dec(&node->x_count); + } + + list_splice_init(&jnodes, ATOM_OVRWR_LIST(atom)->prev); + spin_unlock_atom(atom); +} + +static squeeze_result squeeze_overwrite_unformatted(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *key, + reiser4_key *stop_key) +{ + int result; + reiser4_extent *ext; + __u64 index; + __u64 width; + reiser4_block_nr start; + extent_state state; + oid_t oid; + reiser4_extent copy_extent; + + assert("vs-1457", flush_pos->pos_in_unit == 0); + assert("vs-1467", coord_is_leftmost_unit(coord)); + assert("vs-1467", item_is_extent(coord)); + + ext = extent_by_coord(coord); + index = extent_unit_index(coord); + start = extent_get_start(ext); + width = extent_get_width(ext); + state = state_of_extent(ext); + unit_key_by_coord(coord, key); + oid = get_key_objectid(key); + /* + * try to copy unit as it is to left neighbor + * and make all first not flushprepped nodes + * overwrite nodes + */ + reiser4_set_extent(©_extent, start, width); + + result = put_unit_to_end(left, key, ©_extent); + if (result == -E_NODE_FULL) + return SQUEEZE_TARGET_FULL; + + if (state != HOLE_EXTENT) + forward_overwrite_unformatted(flush_pos, oid, index, width); + + set_key_offset(key, + get_key_offset(key) + (width << current_blocksize_bits)); + return SQUEEZE_CONTINUE; +} + +/************************ HYBRID TRANSACTION MODEL ****************************/ + +/** + * This is the default transaction model suggested by Josh MacDonald and + * Hans Reiser. This was the single hardcoded transaction mode till Feb 2014 + * when Edward introduced pure Journalling and pure Write-Anywhere. + * + * In this mode all relocate-overwrite decisions are result of attempts to + * defragment atom's locality. + */ + +/* REVERSE PARENT-FIRST RELOCATION POLICIES */ + +/* This implements the is-it-close-enough-to-its-preceder? test for relocation + in the reverse parent-first relocate context. Here all we know is the + preceder and the block number. Since we are going in reverse, the preceder + may still be relocated as well, so we can't ask the block allocator "is there + a closer block available to relocate?" here. In the _forward_ parent-first + relocate context (not here) we actually call the block allocator to try and + find a closer location. +*/ +static int reverse_try_defragment_if_close(const reiser4_block_nr * pblk, + const reiser4_block_nr * nblk) +{ + reiser4_block_nr dist; + + assert("jmacd-7710", *pblk != 0 && *nblk != 0); + assert("jmacd-7711", !reiser4_blocknr_is_fake(pblk)); + assert("jmacd-7712", !reiser4_blocknr_is_fake(nblk)); + + /* Distance is the absolute value. */ + dist = (*pblk > *nblk) ? (*pblk - *nblk) : (*nblk - *pblk); + + /* If the block is less than FLUSH_RELOCATE_DISTANCE blocks away from + its preceder block, do not relocate. */ + if (dist <= get_current_super_private()->flush.relocate_distance) + return 0; + + return 1; +} + +/** + * This function is a predicate that tests for relocation. Always called in the + * reverse-parent-first context, when we are asking whether the current node + * should be relocated in order to expand the flush by dirtying the parent level + * (and thus proceeding to flush that level). When traversing in the forward + * parent-first direction (not here), relocation decisions are handled in two + * places: allocate_znode() and extent_needs_allocation(). + */ +static int reverse_alloc_formatted_hybrid(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + reiser4_block_nr pblk = 0; + reiser4_block_nr nblk = 0; + + assert("jmacd-8989", !jnode_is_root(node)); + /* + * This function is called only from the + * reverse_relocate_check_dirty_parent() and only if the parent + * node is clean. This implies that the parent has the real (i.e., not + * fake) block number, and, so does the child, because otherwise the + * parent would be dirty. + */ + + /* New nodes are treated as if they are being relocated. */ + if (JF_ISSET(node, JNODE_CREATED) || + (pos->leaf_relocate && jnode_get_level(node) == LEAF_LEVEL)) + return 1; + + /* Find the preceder. FIXME(B): When the child is an unformatted, + previously existing node, the coord may be leftmost even though the + child is not the parent-first preceder of the parent. If the first + dirty node appears somewhere in the middle of the first extent unit, + this preceder calculation is wrong. + Needs more logic in here. */ + if (coord_is_leftmost_unit(parent_coord)) { + pblk = *znode_get_block(parent_coord->node); + } else { + pblk = pos->preceder.blk; + } + check_preceder(pblk); + + /* If (pblk == 0) then the preceder isn't allocated or isn't known: + relocate. */ + if (pblk == 0) + return 1; + + nblk = *jnode_get_block(node); + + if (reiser4_blocknr_is_fake(&nblk)) + /* child is unallocated, mark parent dirty */ + return 1; + + return reverse_try_defragment_if_close(&pblk, &nblk); +} + +/** + * A subroutine of forward_alloc_formatted_hybrid(), this is called first to see + * if there is a close position to relocate to. It may return ENOSPC if there is + * no close position. If there is no close position it may not relocate. This + * takes care of updating the parent node with the relocated block address. + * + * was allocate_znode_update() + */ +static int forward_try_defragment_locality(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + reiser4_block_nr blk; + lock_handle uber_lock; + int flush_reserved_used = 0; + int grabbed; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + init_lh(&uber_lock); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + grabbed = ctx->grabbed_blocks; + + ret = zload(node); + if (ret) + return ret; + + if (ZF_ISSET(node, JNODE_CREATED)) { + assert("zam-816", reiser4_blocknr_is_fake(znode_get_block(node))); + pos->preceder.block_stage = BLOCK_UNALLOCATED; + } else { + pos->preceder.block_stage = BLOCK_GRABBED; + + /* The disk space for relocating the @node is already reserved + * in "flush reserved" counter if @node is leaf, otherwise we + * grab space using BA_RESERVED (means grab space from whole + * disk not from only 95%). */ + if (znode_get_level(node) == LEAF_LEVEL) { + /* + * earlier (during do_jnode_make_dirty()) we decided + * that @node can possibly go into overwrite set and + * reserved block for its wandering location. + */ + txn_atom *atom = get_current_atom_locked(); + assert("nikita-3449", + ZF_ISSET(node, JNODE_FLUSH_RESERVED)); + flush_reserved2grabbed(atom, (__u64) 1); + spin_unlock_atom(atom); + /* + * we are trying to move node into relocate + * set. Allocation of relocated position "uses" + * reserved block. + */ + ZF_CLR(node, JNODE_FLUSH_RESERVED); + flush_reserved_used = 1; + } else { + ret = reiser4_grab_space_force((__u64) 1, BA_RESERVED); + if (ret != 0) + goto exit; + } + } + + /* We may do not use 5% of reserved disk space here and flush will not + pack tightly. */ + ret = reiser4_alloc_block(&pos->preceder, &blk, + BA_FORMATTED | BA_PERMANENT); + if (ret) + goto exit; + + if (!ZF_ISSET(node, JNODE_CREATED) && + (ret = reiser4_dealloc_block(znode_get_block(node), 0, + BA_DEFER | BA_FORMATTED))) + goto exit; + + if (likely(!znode_is_root(node))) { + item_plugin *iplug; + + iplug = item_plugin_by_coord(parent_coord); + assert("nikita-2954", iplug->f.update != NULL); + iplug->f.update(parent_coord, &blk); + + znode_make_dirty(parent_coord->node); + + } else { + reiser4_tree *tree = znode_get_tree(node); + znode *uber; + + /* We take a longterm lock on the fake node in order to change + the root block number. This may cause atom fusion. */ + ret = get_uber_znode(tree, ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI, + &uber_lock); + /* The fake node cannot be deleted, and we must have priority + here, and may not be confused with ENOSPC. */ + assert("jmacd-74412", + ret != -EINVAL && ret != -E_DEADLOCK && ret != -ENOSPC); + + if (ret) + goto exit; + + uber = uber_lock.node; + + write_lock_tree(tree); + tree->root_block = blk; + write_unlock_tree(tree); + + znode_make_dirty(uber); + } + ret = znode_rehash(node, &blk); +exit: + if (ret) { + /* Get flush reserved block back if something fails, because + * callers assume that on error block wasn't relocated and its + * flush reserved block wasn't used. */ + if (flush_reserved_used) { + /* + * ok, we failed to move node into relocate + * set. Restore status quo. + */ + grabbed2flush_reserved((__u64) 1); + ZF_SET(node, JNODE_FLUSH_RESERVED); + } + } + zrelse(node); + done_lh(&uber_lock); + grabbed2free_mark(grabbed); + return ret; +} + +/* + * Make the final relocate/wander decision during + * forward parent-first squalloc for a formatted node + */ +static int forward_alloc_formatted_hybrid(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + reiser4_super_info_data *sbinfo = get_current_super_private(); + /** + * FIXME(D): We have the node write-locked and should have checked for ! + * allocated() somewhere before reaching this point, but there can be a + * race, so this assertion is bogus. + */ + assert("edward-1614", znode_is_loaded(node)); + assert("jmacd-7987", !jnode_check_flushprepped(ZJNODE(node))); + assert("jmacd-7988", znode_is_write_locked(node)); + assert("jmacd-7989", coord_is_invalid(parent_coord) + || znode_is_write_locked(parent_coord->node)); + + if (ZF_ISSET(node, JNODE_REPACK) || ZF_ISSET(node, JNODE_CREATED) || + znode_is_root(node) || + /* + * We have enough nodes to relocate no matter what. + */ + (pos->leaf_relocate != 0 && znode_get_level(node) == LEAF_LEVEL)) { + /* + * No need to decide with new nodes, they are treated the same + * as relocate. If the root node is dirty, relocate. + */ + if (pos->preceder.blk == 0) { + /* + * preceder is unknown and we have decided to relocate + * node -- using of default value for search start is + * better than search from block #0. + */ + get_blocknr_hint_default(&pos->preceder.blk); + check_preceder(pos->preceder.blk); + } + goto best_reloc; + + } else if (pos->preceder.blk == 0) { + /* If we don't know the preceder, leave it where it is. */ + jnode_make_wander(ZJNODE(node)); + } else { + /* Make a decision based on block distance. */ + reiser4_block_nr dist; + reiser4_block_nr nblk = *znode_get_block(node); + + assert("jmacd-6172", !reiser4_blocknr_is_fake(&nblk)); + assert("jmacd-6173", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + assert("jmacd-6174", pos->preceder.blk != 0); + + if (pos->preceder.blk == nblk - 1) { + /* Ideal. */ + jnode_make_wander(ZJNODE(node)); + } else { + + dist = + (nblk < + pos->preceder.blk) ? (pos->preceder.blk - + nblk) : (nblk - + pos->preceder.blk); + + /* See if we can find a closer block + (forward direction only). */ + pos->preceder.max_dist = + min((reiser4_block_nr) sbinfo->flush. + relocate_distance, dist); + pos->preceder.level = znode_get_level(node); + + ret = forward_try_defragment_locality(node, + parent_coord, + pos); + pos->preceder.max_dist = 0; + + if (ret && (ret != -ENOSPC)) + return ret; + + if (ret == 0) { + /* Got a better allocation. */ + znode_make_reloc(node, pos->fq); + } else if (dist < sbinfo->flush.relocate_distance) { + /* The present allocation is good enough. */ + jnode_make_wander(ZJNODE(node)); + } else { + /* + * Otherwise, try to relocate to the best + * position. + */ + best_reloc: + ret = forward_try_defragment_locality(node, + parent_coord, + pos); + if (ret != 0) + return ret; + /* + * set JNODE_RELOC bit _after_ node gets + * allocated + */ + znode_make_reloc(node, pos->fq); + } + } + } + /* + * This is the new preceder + */ + pos->preceder.blk = *znode_get_block(node); + check_preceder(pos->preceder.blk); + pos->alloc_cnt += 1; + + assert("jmacd-4277", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + + return 0; +} + +static int forward_alloc_unformatted_hybrid(flush_pos_t *flush_pos) +{ + coord_t *coord; + reiser4_extent *ext; + oid_t oid; + __u64 index; + __u64 width; + extent_state state; + reiser4_key key; + + assert("vs-1468", flush_pos->state == POS_ON_EPOINT); + assert("vs-1469", coord_is_existing_unit(&flush_pos->coord) + && item_is_extent(&flush_pos->coord)); + + coord = &flush_pos->coord; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + if (state == HOLE_EXTENT) { + flush_pos->state = POS_INVALID; + return 0; + } + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + index = extent_unit_index(coord) + flush_pos->pos_in_unit; + width = extent_get_width(ext); + + assert("vs-1457", width > flush_pos->pos_in_unit); + + if (flush_pos->leaf_relocate || state == UNALLOCATED_EXTENT) { + int exit; + int result; + result = forward_relocate_unformatted(flush_pos, ext, state, + oid, + index, width, &exit); + if (exit) + return result; + } else + forward_overwrite_unformatted(flush_pos, oid, index, width); + + flush_pos->pos_in_unit = 0; + return 0; +} + +static squeeze_result squeeze_alloc_unformatted_hybrid(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key) +{ + squeeze_result ret; + reiser4_key key; + reiser4_extent *ext; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + + if ((flush_pos->leaf_relocate && state == ALLOCATED_EXTENT) || + (state == UNALLOCATED_EXTENT)) + /* + * relocate + */ + ret = squeeze_relocate_unformatted(left, coord, + flush_pos, &key, stop_key); + else + /* + * (state == ALLOCATED_EXTENT && !flush_pos->leaf_relocate) || + * state == HOLE_EXTENT - overwrite + */ + ret = squeeze_overwrite_unformatted(left, coord, + flush_pos, &key, stop_key); + if (ret == SQUEEZE_CONTINUE) + *stop_key = key; + return ret; +} + +/*********************** JOURNAL TRANSACTION MODEL ****************************/ + +static int forward_alloc_formatted_journal(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + + if (ZF_ISSET(node, JNODE_CREATED)) { + if (pos->preceder.blk == 0) { + /* + * preceder is unknown and we have decided to relocate + * node -- using of default value for search start is + * better than search from block #0. + */ + get_blocknr_hint_default(&pos->preceder.blk); + check_preceder(pos->preceder.blk); + } + ret = forward_try_defragment_locality(node, + parent_coord, + pos); + if (ret != 0) { + warning("edward-1615", + "forward defrag failed (%d)", ret); + return ret; + } + /* + * set JNODE_RELOC bit _after_ node gets + * allocated + */ + znode_make_reloc(node, pos->fq); + } + else + jnode_make_wander(ZJNODE(node)); + /* + * This is the new preceder + */ + pos->preceder.blk = *znode_get_block(node); + check_preceder(pos->preceder.blk); + pos->alloc_cnt += 1; + + assert("edward-1616", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + return 0; +} + +static int forward_alloc_unformatted_journal(flush_pos_t *flush_pos) +{ + + coord_t *coord; + reiser4_extent *ext; + oid_t oid; + __u64 index; + __u64 width; + extent_state state; + reiser4_key key; + + assert("edward-1617", flush_pos->state == POS_ON_EPOINT); + assert("edward-1618", coord_is_existing_unit(&flush_pos->coord) + && item_is_extent(&flush_pos->coord)); + + coord = &flush_pos->coord; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + if (state == HOLE_EXTENT) { + flush_pos->state = POS_INVALID; + return 0; + } + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + index = extent_unit_index(coord) + flush_pos->pos_in_unit; + width = extent_get_width(ext); + + assert("edward-1619", width > flush_pos->pos_in_unit); + + if (state == UNALLOCATED_EXTENT) { + int exit; + int result; + result = forward_relocate_unformatted(flush_pos, ext, state, + oid, + index, width, &exit); + if (exit) + return result; + } + else + /* + * state == ALLOCATED_EXTENT + * keep old allocation + */ + forward_overwrite_unformatted(flush_pos, oid, index, width); + + flush_pos->pos_in_unit = 0; + return 0; +} + +static squeeze_result squeeze_alloc_unformatted_journal(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key) +{ + squeeze_result ret; + reiser4_key key; + reiser4_extent *ext; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + + if (state == UNALLOCATED_EXTENT) + ret = squeeze_relocate_unformatted(left, coord, + flush_pos, &key, stop_key); + else + /* + * state == ALLOCATED_EXTENT || state == HOLE_EXTENT + */ + ret = squeeze_overwrite_unformatted(left, coord, + flush_pos, &key, stop_key); + if (ret == SQUEEZE_CONTINUE) + *stop_key = key; + return ret; +} + +/********************** WA (Write-Anywhere) TRANSACTION MODEL ***************/ + +static int forward_alloc_formatted_wa(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + + assert("edward-1620", znode_is_loaded(node)); + assert("edward-1621", !jnode_check_flushprepped(ZJNODE(node))); + assert("edward-1622", znode_is_write_locked(node)); + assert("edward-1623", coord_is_invalid(parent_coord) + || znode_is_write_locked(parent_coord->node)); + + if (pos->preceder.blk == 0) { + /* + * preceder is unknown and we have decided to relocate + * node -- using of default value for search start is + * better than search from block #0. + */ + get_blocknr_hint_default(&pos->preceder.blk); + check_preceder(pos->preceder.blk); + } + ret = forward_try_defragment_locality(node, parent_coord, pos); + if (ret && (ret != -ENOSPC)) { + warning("edward-1624", + "forward defrag failed (%d)", ret); + return ret; + } + if (ret == 0) + znode_make_reloc(node, pos->fq); + else { + ret = forward_try_defragment_locality(node, parent_coord, pos); + if (ret) { + warning("edward-1625", + "forward defrag failed (%d)", ret); + return ret; + } + /* set JNODE_RELOC bit _after_ node gets allocated */ + znode_make_reloc(node, pos->fq); + } + /* + * This is the new preceder + */ + pos->preceder.blk = *znode_get_block(node); + check_preceder(pos->preceder.blk); + pos->alloc_cnt += 1; + + assert("edward-1626", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + return 0; +} + +static int forward_alloc_unformatted_wa(flush_pos_t *flush_pos) +{ + int exit; + int result; + + coord_t *coord; + reiser4_extent *ext; + oid_t oid; + __u64 index; + __u64 width; + extent_state state; + reiser4_key key; + + assert("edward-1627", flush_pos->state == POS_ON_EPOINT); + assert("edward-1628", coord_is_existing_unit(&flush_pos->coord) + && item_is_extent(&flush_pos->coord)); + + coord = &flush_pos->coord; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + if (state == HOLE_EXTENT) { + flush_pos->state = POS_INVALID; + return 0; + } + + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + index = extent_unit_index(coord) + flush_pos->pos_in_unit; + width = extent_get_width(ext); + + assert("edward-1629", width > flush_pos->pos_in_unit); + assert("edward-1630", + state == ALLOCATED_EXTENT || state == UNALLOCATED_EXTENT); + /* + * always relocate + */ + result = forward_relocate_unformatted(flush_pos, ext, state, oid, + index, width, &exit); + if (exit) + return result; + flush_pos->pos_in_unit = 0; + return 0; +} + +static squeeze_result squeeze_alloc_unformatted_wa(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key) +{ + squeeze_result ret; + reiser4_key key; + reiser4_extent *ext; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + + if (state == HOLE_EXTENT) + /* + * hole extents are handled in squeeze_overwrite + */ + ret = squeeze_overwrite_unformatted(left, coord, + flush_pos, &key, stop_key); + else + ret = squeeze_relocate_unformatted(left, coord, + flush_pos, &key, stop_key); + if (ret == SQUEEZE_CONTINUE) + *stop_key = key; + return ret; +} + +/******************************************************************************/ + +txmod_plugin txmod_plugins[LAST_TXMOD_ID] = { + [HYBRID_TXMOD_ID] = { + .h = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .id = HYBRID_TXMOD_ID, + .pops = NULL, + .label = "hybrid", + .desc = "Hybrid Transaction Model", + .linkage = {NULL, NULL} + }, + .forward_alloc_formatted = forward_alloc_formatted_hybrid, + .reverse_alloc_formatted = reverse_alloc_formatted_hybrid, + .forward_alloc_unformatted = forward_alloc_unformatted_hybrid, + .squeeze_alloc_unformatted = squeeze_alloc_unformatted_hybrid + }, + [JOURNAL_TXMOD_ID] = { + .h = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .id = JOURNAL_TXMOD_ID, + .pops = NULL, + .label = "journal", + .desc = "Journalling Transaction Model", + .linkage = {NULL, NULL} + }, + .forward_alloc_formatted = forward_alloc_formatted_journal, + .reverse_alloc_formatted = NULL, + .forward_alloc_unformatted = forward_alloc_unformatted_journal, + .squeeze_alloc_unformatted = squeeze_alloc_unformatted_journal + }, + [WA_TXMOD_ID] = { + .h = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .id = WA_TXMOD_ID, + .pops = NULL, + .label = "wa", + .desc = "Write-Anywhere Transaction Model", + .linkage = {NULL, NULL} + }, + .forward_alloc_formatted = forward_alloc_formatted_wa, + .reverse_alloc_formatted = NULL, + .forward_alloc_unformatted = forward_alloc_unformatted_wa, + .squeeze_alloc_unformatted = squeeze_alloc_unformatted_wa + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/pool.c b/fs/reiser4/pool.c new file mode 100644 index 000000000000..56636381eb2a --- /dev/null +++ b/fs/reiser4/pool.c @@ -0,0 +1,231 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Fast pool allocation. + + There are situations when some sub-system normally asks memory allocator + for only few objects, but under some circumstances could require much + more. Typical and actually motivating example is tree balancing. It needs + to keep track of nodes that were involved into it, and it is well-known + that in reasonable packed balanced tree most (92.938121%) percent of all + balancings end up after working with only few nodes (3.141592 on + average). But in rare cases balancing can involve much more nodes + (3*tree_height+1 in extremal situation). + + On the one hand, we don't want to resort to dynamic allocation (slab, + malloc(), etc.) to allocate data structures required to keep track of + nodes during balancing. On the other hand, we cannot statically allocate + required amount of space on the stack, because first: it is useless wastage + of precious resource, and second: this amount is unknown in advance (tree + height can change). + + Pools, implemented in this file are solution for this problem: + + - some configurable amount of objects is statically preallocated on the + stack + + - if this preallocated pool is exhausted and more objects is requested + they are allocated dynamically. + + Pools encapsulate distinction between statically and dynamically allocated + objects. Both allocation and recycling look exactly the same. + + To keep track of dynamically allocated objects, pool adds its own linkage + to each object. + + NOTE-NIKITA This linkage also contains some balancing-specific data. This + is not perfect. On the other hand, balancing is currently the only client + of pool code. + + NOTE-NIKITA Another desirable feature is to rewrite all pool manipulation + functions in the style of tslist/tshash, i.e., make them unreadable, but + type-safe. + +*/ + +#include "debug.h" +#include "pool.h" +#include "super.h" + +#include +#include + +/* initialize new pool object @h */ +static void reiser4_init_pool_obj(struct reiser4_pool_header *h) +{ + INIT_LIST_HEAD(&h->usage_linkage); + INIT_LIST_HEAD(&h->level_linkage); + INIT_LIST_HEAD(&h->extra_linkage); +} + +/* initialize new pool */ +void reiser4_init_pool(struct reiser4_pool *pool /* pool to initialize */ , + size_t obj_size /* size of objects in @pool */ , + int num_of_objs /* number of preallocated objects */ , + char *data/* area for preallocated objects */) +{ + struct reiser4_pool_header *h; + int i; + + assert("nikita-955", pool != NULL); + assert("nikita-1044", obj_size > 0); + assert("nikita-956", num_of_objs >= 0); + assert("nikita-957", data != NULL); + + memset(pool, 0, sizeof *pool); + pool->obj_size = obj_size; + pool->data = data; + INIT_LIST_HEAD(&pool->free); + INIT_LIST_HEAD(&pool->used); + INIT_LIST_HEAD(&pool->extra); + memset(data, 0, obj_size * num_of_objs); + for (i = 0; i < num_of_objs; ++i) { + h = (struct reiser4_pool_header *) (data + i * obj_size); + reiser4_init_pool_obj(h); + /* add pool header to the end of pool's free list */ + list_add_tail(&h->usage_linkage, &pool->free); + } +} + +/* release pool resources + + Release all resources acquired by this pool, specifically, dynamically + allocated objects. + +*/ +void reiser4_done_pool(struct reiser4_pool *pool UNUSED_ARG) +{ +} + +/* allocate carry object from @pool + + First, try to get preallocated object. If this fails, resort to dynamic + allocation. + +*/ +static void *reiser4_pool_alloc(struct reiser4_pool *pool) +{ + struct reiser4_pool_header *result; + + assert("nikita-959", pool != NULL); + + if (!list_empty(&pool->free)) { + struct list_head *linkage; + + linkage = pool->free.next; + list_del(linkage); + INIT_LIST_HEAD(linkage); + result = list_entry(linkage, struct reiser4_pool_header, + usage_linkage); + BUG_ON(!list_empty(&result->level_linkage) || + !list_empty(&result->extra_linkage)); + } else { + /* pool is empty. Extra allocations don't deserve dedicated + slab to be served from, as they are expected to be rare. */ + result = kmalloc(pool->obj_size, reiser4_ctx_gfp_mask_get()); + if (result != 0) { + reiser4_init_pool_obj(result); + list_add(&result->extra_linkage, &pool->extra); + } else + return ERR_PTR(RETERR(-ENOMEM)); + BUG_ON(!list_empty(&result->usage_linkage) || + !list_empty(&result->level_linkage)); + } + ++pool->objs; + list_add(&result->usage_linkage, &pool->used); + memset(result + 1, 0, pool->obj_size - sizeof *result); + return result; +} + +/* return object back to the pool */ +void reiser4_pool_free(struct reiser4_pool *pool, + struct reiser4_pool_header *h) +{ + assert("nikita-961", h != NULL); + assert("nikita-962", pool != NULL); + + --pool->objs; + assert("nikita-963", pool->objs >= 0); + + list_del_init(&h->usage_linkage); + list_del_init(&h->level_linkage); + + if (list_empty(&h->extra_linkage)) + /* + * pool header is not an extra one. Push it onto free list + * using usage_linkage + */ + list_add(&h->usage_linkage, &pool->free); + else { + /* remove pool header from pool's extra list and kfree it */ + list_del(&h->extra_linkage); + kfree(h); + } +} + +/* add new object to the carry level list + + Carry level is FIFO most of the time, but not always. Complications arise + when make_space() function tries to go to the left neighbor and thus adds + carry node before existing nodes, and also, when updating delimiting keys + after moving data between two nodes, we want left node to be locked before + right node. + + Latter case is confusing at the first glance. Problem is that COP_UPDATE + opration that updates delimiting keys is sometimes called with two nodes + (when data are moved between two nodes) and sometimes with only one node + (when leftmost item is deleted in a node). In any case operation is + supplied with at least node whose left delimiting key is to be updated + (that is "right" node). + + @pool - from which to allocate new object; + @list - where to add object; + @reference - after (or before) which existing object to add +*/ +struct reiser4_pool_header *reiser4_add_obj(struct reiser4_pool *pool, + struct list_head *list, + pool_ordering order, + struct reiser4_pool_header *reference) +{ + struct reiser4_pool_header *result; + + assert("nikita-972", pool != NULL); + + result = reiser4_pool_alloc(pool); + if (IS_ERR(result)) + return result; + + assert("nikita-973", result != NULL); + + switch (order) { + case POOLO_BEFORE: + __list_add(&result->level_linkage, + reference->level_linkage.prev, + &reference->level_linkage); + break; + case POOLO_AFTER: + __list_add(&result->level_linkage, + &reference->level_linkage, + reference->level_linkage.next); + break; + case POOLO_LAST: + list_add_tail(&result->level_linkage, list); + break; + case POOLO_FIRST: + list_add(&result->level_linkage, list); + break; + default: + wrong_return_value("nikita-927", "order"); + } + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/pool.h b/fs/reiser4/pool.h new file mode 100644 index 000000000000..d0f91fe09ff5 --- /dev/null +++ b/fs/reiser4/pool.h @@ -0,0 +1,57 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Fast pool allocation */ + +#ifndef __REISER4_POOL_H__ +#define __REISER4_POOL_H__ + +#include + +struct reiser4_pool { + size_t obj_size; + int objs; + char *data; + struct list_head free; + struct list_head used; + struct list_head extra; +}; + +struct reiser4_pool_header { + /* object is either on free or "used" lists */ + struct list_head usage_linkage; + struct list_head level_linkage; + struct list_head extra_linkage; +}; + +typedef enum { + POOLO_BEFORE, + POOLO_AFTER, + POOLO_LAST, + POOLO_FIRST +} pool_ordering; + +/* pool manipulation functions */ + +extern void reiser4_init_pool(struct reiser4_pool *pool, size_t obj_size, + int num_of_objs, char *data); +extern void reiser4_done_pool(struct reiser4_pool *pool); +extern void reiser4_pool_free(struct reiser4_pool *pool, + struct reiser4_pool_header *h); +struct reiser4_pool_header *reiser4_add_obj(struct reiser4_pool *pool, + struct list_head *list, + pool_ordering order, + struct reiser4_pool_header *reference); + +/* __REISER4_POOL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/readahead.c b/fs/reiser4/readahead.c new file mode 100644 index 000000000000..0be94b646640 --- /dev/null +++ b/fs/reiser4/readahead.c @@ -0,0 +1,140 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "forward.h" +#include "tree.h" +#include "tree_walk.h" +#include "super.h" +#include "inode.h" +#include "key.h" +#include "znode.h" + +#include /* for totalram_pages */ + +void reiser4_init_ra_info(ra_info_t *rai) +{ + rai->key_to_stop = *reiser4_min_key(); +} + +/* global formatted node readahead parameter. It can be set by mount option + * -o readahead:NUM:1 */ +static inline int ra_adjacent_only(int flags) +{ + return flags & RA_ADJACENT_ONLY; +} + +/* this is used by formatted_readahead to decide whether read for right neighbor + * of node is to be issued. It returns 1 if right neighbor's first key is less + * or equal to readahead's stop key */ +static int should_readahead_neighbor(znode * node, ra_info_t *info) +{ + int result; + + read_lock_dk(znode_get_tree(node)); + result = keyle(znode_get_rd_key(node), &info->key_to_stop); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +#define LOW_MEM_PERCENTAGE (5) + +static int low_on_memory(void) +{ + unsigned int freepages; + + freepages = nr_free_pages(); + return freepages < (totalram_pages * LOW_MEM_PERCENTAGE / 100); +} + +/* start read for @node and for a few of its right neighbors */ +void formatted_readahead(znode * node, ra_info_t *info) +{ + struct formatted_ra_params *ra_params; + znode *cur; + int i; + int grn_flags; + lock_handle next_lh; + + /* do nothing if node block number has not been assigned to node (which + * means it is still in cache). */ + if (reiser4_blocknr_is_fake(znode_get_block(node))) + return; + + ra_params = get_current_super_ra_params(); + + if (znode_page(node) == NULL) + jstartio(ZJNODE(node)); + + if (znode_get_level(node) != LEAF_LEVEL) + return; + + /* don't waste memory for read-ahead when low on memory */ + if (low_on_memory()) + return; + + /* We can have locked nodes on upper tree levels, in this situation lock + priorities do not help to resolve deadlocks, we have to use TRY_LOCK + here. */ + grn_flags = (GN_CAN_USE_UPPER_LEVELS | GN_TRY_LOCK); + + i = 0; + cur = zref(node); + init_lh(&next_lh); + while (i < ra_params->max) { + const reiser4_block_nr * nextblk; + + if (!should_readahead_neighbor(cur, info)) + break; + + if (reiser4_get_right_neighbor + (&next_lh, cur, ZNODE_READ_LOCK, grn_flags)) + break; + + nextblk = znode_get_block(next_lh.node); + if (reiser4_blocknr_is_fake(nextblk) || + (ra_adjacent_only(ra_params->flags) + && *nextblk != *znode_get_block(cur) + 1)) + break; + + zput(cur); + cur = zref(next_lh.node); + done_lh(&next_lh); + if (znode_page(cur) == NULL) + jstartio(ZJNODE(cur)); + else + /* Do not scan read-ahead window if pages already + * allocated (and i/o already started). */ + break; + + i++; + } + zput(cur); + done_lh(&next_lh); +} + +void reiser4_readdir_readahead_init(struct inode *dir, tap_t *tap) +{ + reiser4_key *stop_key; + + assert("nikita-3542", dir != NULL); + assert("nikita-3543", tap != NULL); + + stop_key = &tap->ra_info.key_to_stop; + /* initialize readdir readahead information: include into readahead + * stat data of all files of the directory */ + set_key_locality(stop_key, get_inode_oid(dir)); + set_key_type(stop_key, KEY_SD_MINOR); + set_key_ordering(stop_key, get_key_ordering(reiser4_max_key())); + set_key_objectid(stop_key, get_key_objectid(reiser4_max_key())); + set_key_offset(stop_key, get_key_offset(reiser4_max_key())); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/readahead.h b/fs/reiser4/readahead.h new file mode 100644 index 000000000000..de42234b4af4 --- /dev/null +++ b/fs/reiser4/readahead.h @@ -0,0 +1,42 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#ifndef __READAHEAD_H__ +#define __READAHEAD_H__ + +#include "key.h" + +typedef enum { + RA_ADJACENT_ONLY = 1, /* only requests nodes which are adjacent. + Default is NO (not only adjacent) */ +} ra_global_flags; + +/* reiser4 super block has a field of this type. + It controls readahead during tree traversals */ +struct formatted_ra_params { + unsigned long max; /* request not more than this amount of nodes. + Default is totalram_pages / 4 */ + int flags; +}; + +typedef struct { + reiser4_key key_to_stop; +} ra_info_t; + +void formatted_readahead(znode * , ra_info_t *); +void reiser4_init_ra_info(ra_info_t *rai); + +extern void reiser4_readdir_readahead_init(struct inode *dir, tap_t *tap); + +/* __READAHEAD_H__ */ +#endif + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/reiser4.h b/fs/reiser4/reiser4.h new file mode 100644 index 000000000000..e244656398be --- /dev/null +++ b/fs/reiser4/reiser4.h @@ -0,0 +1,260 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + + +/* definitions of common constants used by reiser4 */ + +#if !defined( __REISER4_H__ ) +#define __REISER4_H__ + +#include /* for HZ */ +#include +#include +#include +#include +#include + +/* + * reiser4 compilation options. + */ + +#if defined(CONFIG_REISER4_DEBUG) +/* turn on assertion checks */ +#define REISER4_DEBUG (1) +#else +#define REISER4_DEBUG (0) +#endif + +#define REISER4_SHA256 (0) + +/* + * Turn on large keys mode. In his mode (which is default), reiser4 key has 4 + * 8-byte components. In the old "small key" mode, it's 3 8-byte + * components. Additional component, referred to as "ordering" is used to + * order items from which given object is composed of. As such, ordering is + * placed between locality and objectid. For directory item ordering contains + * initial prefix of the file name this item is for. This sorts all directory + * items within given directory lexicographically (but see + * fibration.[ch]). For file body and stat-data, ordering contains initial + * prefix of the name file was initially created with. In the common case + * (files with single name) this allows to order file bodies and stat-datas in + * the same order as their respective directory entries, thus speeding up + * readdir. + * + * Note, that kernel can only mount file system with the same key size as one + * it is compiled for, so flipping this option may render your data + * inaccessible. + */ +#define REISER4_LARGE_KEY (1) +/*#define REISER4_LARGE_KEY (0)*/ + +/*#define GUESS_EXISTS 1*/ + +/* + * PLEASE update fs/reiser4/kattr.c:show_options() when adding new compilation + * option + */ + +#define REISER4_SUPER_MAGIC_STRING "ReIsEr4" +extern const int REISER4_MAGIC_OFFSET; /* offset to magic string from the + * beginning of device */ + +/* here go tunable parameters that are not worth special entry in kernel + configuration */ + +/* default number of slots in coord-by-key caches */ +#define CBK_CACHE_SLOTS (16) +/* how many elementary tree operation to carry on the next level */ +#define CARRIES_POOL_SIZE (5) +/* size of pool of preallocated nodes for carry process. */ +#define NODES_LOCKED_POOL_SIZE (5) + +#define REISER4_NEW_NODE_FLAGS (COPI_LOAD_LEFT | COPI_LOAD_RIGHT | COPI_GO_LEFT) +#define REISER4_NEW_EXTENT_FLAGS (COPI_LOAD_LEFT | COPI_LOAD_RIGHT | COPI_GO_LEFT) +#define REISER4_PASTE_FLAGS (COPI_GO_LEFT) +#define REISER4_INSERT_FLAGS (COPI_GO_LEFT) + +/* we are supporting reservation of disk space on uid basis */ +#define REISER4_SUPPORT_UID_SPACE_RESERVATION (0) +/* we are supporting reservation of disk space for groups */ +#define REISER4_SUPPORT_GID_SPACE_RESERVATION (0) +/* we are supporting reservation of disk space for root */ +#define REISER4_SUPPORT_ROOT_SPACE_RESERVATION (0) +/* we use rapid flush mode, see flush.c for comments. */ +#define REISER4_USE_RAPID_FLUSH (1) + +/* + * set this to 0 if you don't want to use wait-for-flush in ->writepage(). + */ +#define REISER4_USE_ENTD (1) + +/* key allocation is Plan-A */ +#define REISER4_PLANA_KEY_ALLOCATION (1) +/* key allocation follows good old 3.x scheme */ +#define REISER4_3_5_KEY_ALLOCATION (0) + +/* size of hash-table for znodes */ +#define REISER4_ZNODE_HASH_TABLE_SIZE (1 << 13) + +/* number of buckets in lnode hash-table */ +#define LNODE_HTABLE_BUCKETS (1024) + +/* some ridiculously high maximal limit on height of znode tree. This + is used in declaration of various per level arrays and + to allocate stattistics gathering array for per-level stats. */ +#define REISER4_MAX_ZTREE_HEIGHT (8) + +#define REISER4_PANIC_MSG_BUFFER_SIZE (1024) + +/* If array contains less than REISER4_SEQ_SEARCH_BREAK elements then, + sequential search is on average faster than binary. This is because + of better optimization and because sequential search is more CPU + cache friendly. This number (25) was found by experiments on dual AMD + Athlon(tm), 1400MHz. + + NOTE: testing in kernel has shown that binary search is more effective than + implied by results of the user level benchmarking. Probably because in the + node keys are separated by other data. So value was adjusted after few + tests. More thorough tuning is needed. +*/ +#define REISER4_SEQ_SEARCH_BREAK (3) + +/* don't allow tree to be lower than this */ +#define REISER4_MIN_TREE_HEIGHT (TWIG_LEVEL) + +/* NOTE NIKITA this is no longer used: maximal atom size is auto-adjusted to + * available memory. */ +/* Default value of maximal atom size. Can be ovewritten by + tmgr.atom_max_size mount option. By default infinity. */ +#define REISER4_ATOM_MAX_SIZE ((unsigned)(~0)) + +/* Default value of maximal atom age (in jiffies). After reaching this age + atom will be forced to commit, either synchronously or asynchronously. Can + be overwritten by tmgr.atom_max_age mount option. */ +#define REISER4_ATOM_MAX_AGE (600 * HZ) + +/* sleeping period for ktxnmrgd */ +#define REISER4_TXNMGR_TIMEOUT (5 * HZ) + +/* timeout to wait for ent thread in writepage. Default: 3 milliseconds. */ +#define REISER4_ENTD_TIMEOUT (3 * HZ / 1000) + +/* start complaining after that many restarts in coord_by_key(). + + This either means incredibly heavy contention for this part of a tree, or + some corruption or bug. +*/ +#define REISER4_CBK_ITERATIONS_LIMIT (100) + +/* return -EIO after that many iterations in coord_by_key(). + + I have witnessed more than 800 iterations (in 30 thread test) before cbk + finished. --nikita +*/ +#define REISER4_MAX_CBK_ITERATIONS 500000 + +/* put a per-inode limit on maximal number of directory entries with identical + keys in hashed directory. + + Disable this until inheritance interfaces stabilize: we need some way to + set per directory limit. +*/ +#define REISER4_USE_COLLISION_LIMIT (0) + +/* If flush finds more than FLUSH_RELOCATE_THRESHOLD adjacent dirty leaf-level + blocks it will force them to be relocated. */ +#define FLUSH_RELOCATE_THRESHOLD 64 +/* If flush finds can find a block allocation closer than at most + FLUSH_RELOCATE_DISTANCE from the preceder it will relocate to that position. + */ +#define FLUSH_RELOCATE_DISTANCE 64 + +/* If we have written this much or more blocks before encountering busy jnode + in flush list - abort flushing hoping that next time we get called + this jnode will be clean already, and we will save some seeks. */ +#define FLUSH_WRITTEN_THRESHOLD 50 + +/* The maximum number of nodes to scan left on a level during flush. */ +#define FLUSH_SCAN_MAXNODES 10000 + +/* per-atom limit of flushers */ +#define ATOM_MAX_FLUSHERS (1) + +/* default tracing buffer size */ +#define REISER4_TRACE_BUF_SIZE (1 << 15) + +/* what size units of IO we would like cp, etc., to use, in writing to + reiser4. In bytes. + + Can be overwritten by optimal_io_size mount option. +*/ +#define REISER4_OPTIMAL_IO_SIZE (64 * 1024) + +/* see comments in inode.c:oid_to_uino() */ +#define REISER4_UINO_SHIFT (1 << 30) + +/* Mark function argument as unused to avoid compiler warnings. */ +#define UNUSED_ARG __attribute__((unused)) + +#if ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) +#define NONNULL __attribute__((nonnull)) +#else +#define NONNULL +#endif + +/* master super block offset in bytes.*/ +#define REISER4_MASTER_OFFSET 65536 + +/* size of VFS block */ +#define VFS_BLKSIZE 512 +/* number of bits in size of VFS block (512==2^9) */ +#define VFS_BLKSIZE_BITS 9 + +#define REISER4_I reiser4_inode_data + +/* implication */ +#define ergo(antecedent, consequent) (!(antecedent) || (consequent)) +/* logical equivalence */ +#define equi(p1, p2) (ergo((p1), (p2)) && ergo((p2), (p1))) + +#define sizeof_array(x) ((int) (sizeof(x) / sizeof(x[0]))) + +#define NOT_YET (0) + +/** Reiser4 specific error codes **/ + +#define REISER4_ERROR_CODE_BASE 10000 + +/* Neighbor is not available (side neighbor or parent) */ +#define E_NO_NEIGHBOR (REISER4_ERROR_CODE_BASE) + +/* Node was not found in cache */ +#define E_NOT_IN_CACHE (REISER4_ERROR_CODE_BASE + 1) + +/* node has no free space enough for completion of balancing operation */ +#define E_NODE_FULL (REISER4_ERROR_CODE_BASE + 2) + +/* repeat operation */ +#define E_REPEAT (REISER4_ERROR_CODE_BASE + 3) + +/* deadlock happens */ +#define E_DEADLOCK (REISER4_ERROR_CODE_BASE + 4) + +/* operation cannot be performed, because it would block and non-blocking mode + * was requested. */ +#define E_BLOCK (REISER4_ERROR_CODE_BASE + 5) + +/* wait some event (depends on context), then repeat */ +#define E_WAIT (REISER4_ERROR_CODE_BASE + 6) + +#endif /* __REISER4_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/safe_link.c b/fs/reiser4/safe_link.c new file mode 100644 index 000000000000..d59f6f0f129e --- /dev/null +++ b/fs/reiser4/safe_link.c @@ -0,0 +1,354 @@ +/* Copyright 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Safe-links. */ + +/* + * Safe-links are used to maintain file system consistency during operations + * that spawns multiple transactions. For example: + * + * 1. Unlink. UNIX supports "open-but-unlinked" files, that is files + * without user-visible names in the file system, but still opened by some + * active process. What happens here is that unlink proper (i.e., removal + * of the last file name) and file deletion (truncate of file body to zero + * and deletion of stat-data, that happens when last file descriptor is + * closed), may belong to different transactions T1 and T2. If a crash + * happens after T1 commit, but before T2 commit, on-disk file system has + * a file without name, that is, disk space leak. + * + * 2. Truncate. Truncate of large file may spawn multiple transactions. If + * system crashes while truncate was in-progress, file is left partially + * truncated, which violates "atomicity guarantees" of reiser4, viz. that + * every system is atomic. + * + * Safe-links address both above cases. Basically, safe-link is a way post + * some operation to be executed during commit of some other transaction than + * current one. (Another way to look at the safe-link is to interpret it as a + * logical logging.) + * + * Specifically, at the beginning of unlink safe-link in inserted in the + * tree. This safe-link is normally removed by file deletion code (during + * transaction T2 in the above terms). Truncate also inserts safe-link that is + * normally removed when truncate operation is finished. + * + * This means, that in the case of "clean umount" there are no safe-links in + * the tree. If safe-links are observed during mount, it means that (a) system + * was terminated abnormally, and (b) safe-link correspond to the "pending" + * (i.e., not finished) operations that were in-progress during system + * termination. Each safe-link record enough information to complete + * corresponding operation, and mount simply "replays" them (hence, the + * analogy with the logical logging). + * + * Safe-links are implemented as blackbox items (see + * plugin/item/blackbox.[ch]). + * + * For the reference: ext3 also has similar mechanism, it's called "an orphan + * list" there. + */ + +#include "safe_link.h" +#include "debug.h" +#include "inode.h" + +#include "plugin/item/blackbox.h" + +#include + +/* + * On-disk format of safe-link. + */ +typedef struct safelink { + reiser4_key sdkey; /* key of stat-data for the file safe-link is + * for */ + d64 size; /* size to which file should be truncated */ +} safelink_t; + +/* + * locality where safe-link items are stored. Next to the objectid of root + * directory. + */ +static oid_t safe_link_locality(reiser4_tree * tree) +{ + return get_key_objectid(get_super_private(tree->super)->df_plug-> + root_dir_key(tree->super)) + 1; +} + +/* + Construct a key for the safe-link. Key has the following format: + +| 60 | 4 | 64 | 4 | 60 | 64 | ++---------------+---+------------------+---+---------------+------------------+ +| locality | 0 | 0 | 0 | objectid | link type | ++---------------+---+------------------+---+---------------+------------------+ +| | | | | +| 8 bytes | 8 bytes | 8 bytes | 8 bytes | + + This is in large keys format. In small keys format second 8 byte chunk is + out. Locality is a constant returned by safe_link_locality(). objectid is + an oid of a file on which operation protected by this safe-link is + performed. link-type is used to distinguish safe-links for different + operations. + + */ +static reiser4_key *build_link_key(reiser4_tree * tree, oid_t oid, + reiser4_safe_link_t link, reiser4_key * key) +{ + reiser4_key_init(key); + set_key_locality(key, safe_link_locality(tree)); + set_key_objectid(key, oid); + set_key_offset(key, link); + return key; +} + +/* + * how much disk space is necessary to insert and remove (in the + * error-handling path) safe-link. + */ +static __u64 safe_link_tograb(reiser4_tree * tree) +{ + return + /* insert safe link */ + estimate_one_insert_item(tree) + + /* remove safe link */ + estimate_one_item_removal(tree) + + /* drill to the leaf level during insertion */ + 1 + estimate_one_insert_item(tree) + + /* + * possible update of existing safe-link. Actually, if + * safe-link existed already (we failed to remove it), then no + * insertion is necessary, so this term is already "covered", + * but for simplicity let's left it. + */ + 1; +} + +/* + * grab enough disk space to insert and remove (in the error-handling path) + * safe-link. + */ +int safe_link_grab(reiser4_tree * tree, reiser4_ba_flags_t flags) +{ + int result; + + grab_space_enable(); + /* The sbinfo->delete_mutex can be taken here. + * safe_link_release() should be called before leaving reiser4 + * context. */ + result = + reiser4_grab_reserved(tree->super, safe_link_tograb(tree), flags); + grab_space_enable(); + return result; +} + +/* + * release unused disk space reserved by safe_link_grab(). + */ +void safe_link_release(reiser4_tree * tree) +{ + reiser4_release_reserved(tree->super); +} + +/* + * insert into tree safe-link for operation @link on inode @inode. + */ +int safe_link_add(struct inode *inode, reiser4_safe_link_t link) +{ + reiser4_key key; + safelink_t sl; + int length; + int result; + reiser4_tree *tree; + + build_sd_key(inode, &sl.sdkey); + length = sizeof sl.sdkey; + + if (link == SAFE_TRUNCATE) { + /* + * for truncate we have to store final file length also, + * expand item. + */ + length += sizeof(sl.size); + put_unaligned(cpu_to_le64(inode->i_size), &sl.size); + } + tree = reiser4_tree_by_inode(inode); + build_link_key(tree, get_inode_oid(inode), link, &key); + + result = store_black_box(tree, &key, &sl, length); + if (result == -EEXIST) + result = update_black_box(tree, &key, &sl, length); + return result; +} + +/* + * remove safe-link corresponding to the operation @link on inode @inode from + * the tree. + */ +int safe_link_del(reiser4_tree * tree, oid_t oid, reiser4_safe_link_t link) +{ + reiser4_key key; + + return kill_black_box(tree, build_link_key(tree, oid, link, &key)); +} + +/* + * in-memory structure to keep information extracted from safe-link. This is + * used to iterate over all safe-links. + */ +struct safe_link_context { + reiser4_tree *tree; /* internal tree */ + reiser4_key key; /* safe-link key */ + reiser4_key sdkey; /* key of object stat-data */ + reiser4_safe_link_t link; /* safe-link type */ + oid_t oid; /* object oid */ + __u64 size; /* final size for truncate */ +}; + +/* + * start iterating over all safe-links. + */ +static void safe_link_iter_begin(reiser4_tree * tree, + struct safe_link_context *ctx) +{ + ctx->tree = tree; + reiser4_key_init(&ctx->key); + set_key_locality(&ctx->key, safe_link_locality(tree)); + set_key_objectid(&ctx->key, get_key_objectid(reiser4_max_key())); + set_key_offset(&ctx->key, get_key_offset(reiser4_max_key())); +} + +/* + * return next safe-link. + */ +static int safe_link_iter_next(struct safe_link_context *ctx) +{ + int result; + safelink_t sl; + + result = load_black_box(ctx->tree, &ctx->key, &sl, sizeof sl, 0); + if (result == 0) { + ctx->oid = get_key_objectid(&ctx->key); + ctx->link = get_key_offset(&ctx->key); + ctx->sdkey = sl.sdkey; + if (ctx->link == SAFE_TRUNCATE) + ctx->size = le64_to_cpu(get_unaligned(&sl.size)); + } + return result; +} + +/* + * check are there any more safe-links left in the tree. + */ +static int safe_link_iter_finished(struct safe_link_context *ctx) +{ + return get_key_locality(&ctx->key) != safe_link_locality(ctx->tree); +} + +/* + * finish safe-link iteration. + */ +static void safe_link_iter_end(struct safe_link_context *ctx) +{ + /* nothing special */ +} + +/* + * process single safe-link. + */ +static int process_safelink(struct super_block *super, reiser4_safe_link_t link, + reiser4_key * sdkey, oid_t oid, __u64 size) +{ + struct inode *inode; + int result; + + /* + * obtain object inode by reiser4_iget(), then call object plugin + * ->safelink() method to do actual work, then delete safe-link on + * success. + */ + inode = reiser4_iget(super, sdkey, 1); + if (!IS_ERR(inode)) { + file_plugin *fplug; + + fplug = inode_file_plugin(inode); + assert("nikita-3428", fplug != NULL); + assert("", oid == get_inode_oid(inode)); + if (fplug->safelink != NULL) { + /* reiser4_txn_restart_current is not necessary because + * mounting is signle thread. However, without it + * deadlock detection code will complain (see + * nikita-3361). */ + reiser4_txn_restart_current(); + result = fplug->safelink(inode, link, size); + } else { + warning("nikita-3430", + "Cannot handle safelink for %lli", + (unsigned long long)oid); + reiser4_print_key("key", sdkey); + result = 0; + } + if (result != 0) { + warning("nikita-3431", + "Error processing safelink for %lli: %i", + (unsigned long long)oid, result); + } + reiser4_iget_complete(inode); + iput(inode); + if (result == 0) { + result = safe_link_grab(reiser4_get_tree(super), + BA_CAN_COMMIT); + if (result == 0) + result = + safe_link_del(reiser4_get_tree(super), oid, + link); + safe_link_release(reiser4_get_tree(super)); + /* + * restart transaction: if there was large number of + * safe-links, their processing may fail to fit into + * single transaction. + */ + if (result == 0) + reiser4_txn_restart_current(); + } + } else + result = PTR_ERR(inode); + return result; +} + +/* + * iterate over all safe-links in the file-system processing them one by one. + */ +int process_safelinks(struct super_block *super) +{ + struct safe_link_context ctx; + int result; + + if (rofs_super(super)) + /* do nothing on the read-only file system */ + return 0; + safe_link_iter_begin(&get_super_private(super)->tree, &ctx); + result = 0; + do { + result = safe_link_iter_next(&ctx); + if (safe_link_iter_finished(&ctx) || result == -ENOENT) { + result = 0; + break; + } + if (result == 0) + result = process_safelink(super, ctx.link, + &ctx.sdkey, ctx.oid, + ctx.size); + } while (result == 0); + safe_link_iter_end(&ctx); + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/safe_link.h b/fs/reiser4/safe_link.h new file mode 100644 index 000000000000..65252b624972 --- /dev/null +++ b/fs/reiser4/safe_link.h @@ -0,0 +1,29 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Safe-links. See safe_link.c for details. */ + +#if !defined(__FS_SAFE_LINK_H__) +#define __FS_SAFE_LINK_H__ + +#include "tree.h" + +int safe_link_grab(reiser4_tree * tree, reiser4_ba_flags_t flags); +void safe_link_release(reiser4_tree * tree); +int safe_link_add(struct inode *inode, reiser4_safe_link_t link); +int safe_link_del(reiser4_tree *, oid_t oid, reiser4_safe_link_t link); + +int process_safelinks(struct super_block *super); + +/* __FS_SAFE_LINK_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/seal.c b/fs/reiser4/seal.c new file mode 100644 index 000000000000..daeef6f3f5ac --- /dev/null +++ b/fs/reiser4/seal.c @@ -0,0 +1,219 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* Seals implementation. */ +/* Seals are "weak" tree pointers. They are analogous to tree coords in + allowing to bypass tree traversal. But normal usage of coords implies that + node pointed to by coord is locked, whereas seals don't keep a lock (or + even a reference) to znode. In stead, each znode contains a version number, + increased on each znode modification. This version number is copied into a + seal when seal is created. Later, one can "validate" seal by calling + reiser4_seal_validate(). If znode is in cache and its version number is + still the same, seal is "pristine" and coord associated with it can be + re-used immediately. + + If, on the other hand, znode is out of cache, or it is obviously different + one from the znode seal was initially attached to (for example, it is on + the different level, or is being removed from the tree), seal is + irreparably invalid ("burned") and tree traversal has to be repeated. + + Otherwise, there is some hope, that while znode was modified (and seal was + "broken" as a result), key attached to the seal is still in the node. This + is checked by first comparing this key with delimiting keys of node and, if + key is ok, doing intra-node lookup. + + Znode version is maintained in the following way: + + there is reiser4_tree.znode_epoch counter. Whenever new znode is created, + znode_epoch is incremented and its new value is stored in ->version field + of new znode. Whenever znode is dirtied (which means it was probably + modified), znode_epoch is also incremented and its new value is stored in + znode->version. This is done so, because just incrementing znode->version + on each update is not enough: it may so happen, that znode get deleted, new + znode is allocated for the same disk block and gets the same version + counter, tricking seal code into false positive. +*/ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "seal.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "znode.h" +#include "super.h" + +static znode *seal_node(const seal_t *seal); +static int seal_matches(const seal_t *seal, znode * node); + +/* initialise seal. This can be called several times on the same seal. @coord + and @key can be NULL. */ +void reiser4_seal_init(seal_t *seal /* seal to initialise */ , + const coord_t *coord /* coord @seal will be + * attached to */ , + const reiser4_key * key UNUSED_ARG /* key @seal will be + * attached to */ ) +{ + assert("nikita-1886", seal != NULL); + memset(seal, 0, sizeof *seal); + if (coord != NULL) { + znode *node; + + node = coord->node; + assert("nikita-1987", node != NULL); + spin_lock_znode(node); + seal->version = node->version; + assert("nikita-1988", seal->version != 0); + seal->block = *znode_get_block(node); +#if REISER4_DEBUG + seal->coord1 = *coord; + if (key != NULL) + seal->key = *key; +#endif + spin_unlock_znode(node); + } +} + +/* finish with seal */ +void reiser4_seal_done(seal_t *seal/* seal to clear */) +{ + assert("nikita-1887", seal != NULL); + seal->version = 0; +} + +/* true if seal was initialised */ +int reiser4_seal_is_set(const seal_t *seal/* seal to query */) +{ + assert("nikita-1890", seal != NULL); + return seal->version != 0; +} + +#if REISER4_DEBUG +/* helper function for reiser4_seal_validate(). It checks that item at @coord + * has expected key. This is to detect cases where node was modified but wasn't + * marked dirty. */ +static inline int check_seal_match(const coord_t *coord /* coord to check */ , + const reiser4_key *k__/* expected key */) +{ + reiser4_key ukey; + + /* FIXME-VS: we only can compare keys for items whose units + represent exactly one key */ + if (coord->between != AT_UNIT) + return 1; + if (!coord_is_existing_unit(coord)) + return 0; + if (item_is_extent(coord)) + return 1; + if (item_is_ctail(coord)) + return keyge(k__, unit_key_by_coord(coord, &ukey)); + return keyeq(k__, unit_key_by_coord(coord, &ukey)); +} +#endif + +/* this is used by reiser4_seal_validate. It accepts return value of + * longterm_lock_znode and returns 1 if it can be interpreted as seal + * validation failure. For instance, when longterm_lock_znode returns -EINVAL, + * reiser4_seal_validate returns -E_REPEAT and caller will call tre search. + * We cannot do this in longterm_lock_znode(), because sometimes we want to + * distinguish between -EINVAL and -E_REPEAT. */ +static int should_repeat(int return_code) +{ + return return_code == -EINVAL; +} + +/* (re-)validate seal. + + Checks whether seal is pristine, and try to revalidate it if possible. + + If seal was burned, or broken irreparably, return -E_REPEAT. + + NOTE-NIKITA currently reiser4_seal_validate() returns -E_REPEAT if key we are + looking for is in range of keys covered by the sealed node, but item wasn't + found by node ->lookup() method. Alternative is to return -ENOENT in this + case, but this would complicate callers logic. + +*/ +int reiser4_seal_validate(seal_t *seal /* seal to validate */, + coord_t *coord /* coord to validate against */, + const reiser4_key * key /* key to validate against */, + lock_handle * lh /* resulting lock handle */, + znode_lock_mode mode /* lock node */, + znode_lock_request request/* locking priority */) +{ + znode *node; + int result; + + assert("nikita-1889", seal != NULL); + assert("nikita-1881", reiser4_seal_is_set(seal)); + assert("nikita-1882", key != NULL); + assert("nikita-1883", coord != NULL); + assert("nikita-1884", lh != NULL); + assert("nikita-1885", keyeq(&seal->key, key)); + assert("nikita-1989", coords_equal(&seal->coord1, coord)); + + /* obtain znode by block number */ + node = seal_node(seal); + if (!node) + /* znode wasn't in cache */ + return RETERR(-E_REPEAT); + /* znode was in cache, lock it */ + result = longterm_lock_znode(lh, node, mode, request); + zput(node); + if (result == 0) { + if (seal_matches(seal, node)) { + /* if seal version and znode version + coincide */ + ON_DEBUG(coord_update_v(coord)); + assert("nikita-1990", + node == seal->coord1.node); + assert("nikita-1898", + WITH_DATA_RET(coord->node, 1, + check_seal_match(coord, + key))); + } else + result = RETERR(-E_REPEAT); + } + if (result != 0) { + if (should_repeat(result)) + result = RETERR(-E_REPEAT); + /* unlock node on failure */ + done_lh(lh); + } + return result; +} + +/* helpers functions */ + +/* obtain reference to znode seal points to, if in cache */ +static znode *seal_node(const seal_t *seal/* seal to query */) +{ + assert("nikita-1891", seal != NULL); + return zlook(current_tree, &seal->block); +} + +/* true if @seal version and @node version coincide */ +static int seal_matches(const seal_t *seal /* seal to check */ , + znode * node/* node to check */) +{ + int result; + + assert("nikita-1991", seal != NULL); + assert("nikita-1993", node != NULL); + + spin_lock_znode(node); + result = (seal->version == node->version); + spin_unlock_znode(node); + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/seal.h b/fs/reiser4/seal.h new file mode 100644 index 000000000000..19d5d521f75c --- /dev/null +++ b/fs/reiser4/seal.h @@ -0,0 +1,49 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Declaration of seals: "weak" tree pointers. See seal.c for comments. */ + +#ifndef __SEAL_H__ +#define __SEAL_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" + +/* for __u?? types */ +/*#include */ + +/* seal. See comment at the top of seal.c */ +typedef struct seal_s { + /* version of znode recorder at the time of seal creation */ + __u64 version; + /* block number of znode attached to this seal */ + reiser4_block_nr block; +#if REISER4_DEBUG + /* coord this seal is attached to. For debugging. */ + coord_t coord1; + /* key this seal is attached to. For debugging. */ + reiser4_key key; +#endif +} seal_t; + +extern void reiser4_seal_init(seal_t *, const coord_t *, const reiser4_key *); +extern void reiser4_seal_done(seal_t *); +extern int reiser4_seal_is_set(const seal_t *); +extern int reiser4_seal_validate(seal_t *, coord_t *, + const reiser4_key *, lock_handle * , + znode_lock_mode mode, znode_lock_request request); + +/* __SEAL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/search.c b/fs/reiser4/search.c new file mode 100644 index 000000000000..0fa6bdb3645e --- /dev/null +++ b/fs/reiser4/search.c @@ -0,0 +1,1612 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "seal.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "tree.h" +#include "reiser4.h" +#include "super.h" +#include "inode.h" + +#include + +static const char *bias_name(lookup_bias bias); + +/* tree searching algorithm, intranode searching algorithms are in + plugin/node/ */ + +/* tree lookup cache + * + * The coord by key cache consists of small list of recently accessed nodes + * maintained according to the LRU discipline. Before doing real top-to-down + * tree traversal this cache is scanned for nodes that can contain key + * requested. + * + * The efficiency of coord cache depends heavily on locality of reference for + * tree accesses. Our user level simulations show reasonably good hit ratios + * for coord cache under most loads so far. + */ + +/* Initialise coord cache slot */ +static void cbk_cache_init_slot(cbk_cache_slot *slot) +{ + assert("nikita-345", slot != NULL); + + INIT_LIST_HEAD(&slot->lru); + slot->node = NULL; +} + +/* Initialize coord cache */ +int cbk_cache_init(cbk_cache * cache/* cache to init */) +{ + int i; + + assert("nikita-346", cache != NULL); + + cache->slot = + kmalloc(sizeof(cbk_cache_slot) * cache->nr_slots, + reiser4_ctx_gfp_mask_get()); + if (cache->slot == NULL) + return RETERR(-ENOMEM); + + INIT_LIST_HEAD(&cache->lru); + for (i = 0; i < cache->nr_slots; ++i) { + cbk_cache_init_slot(cache->slot + i); + list_add_tail(&((cache->slot + i)->lru), &cache->lru); + } + rwlock_init(&cache->guard); + return 0; +} + +/* free cbk cache data */ +void cbk_cache_done(cbk_cache * cache/* cache to release */) +{ + assert("nikita-2493", cache != NULL); + if (cache->slot != NULL) { + kfree(cache->slot); + cache->slot = NULL; + } +} + +/* macro to iterate over all cbk cache slots */ +#define for_all_slots(cache, slot) \ + for ((slot) = list_entry((cache)->lru.next, cbk_cache_slot, lru); \ + &(cache)->lru != &(slot)->lru; \ + (slot) = list_entry(slot->lru.next, cbk_cache_slot, lru)) + +#if REISER4_DEBUG +/* this function assures that [cbk-cache-invariant] invariant holds */ +static int cbk_cache_invariant(const cbk_cache * cache) +{ + cbk_cache_slot *slot; + int result; + int unused; + + if (cache->nr_slots == 0) + return 1; + + assert("nikita-2469", cache != NULL); + unused = 0; + result = 1; + read_lock(&((cbk_cache *)cache)->guard); + for_all_slots(cache, slot) { + /* in LRU first go all `used' slots followed by `unused' */ + if (unused && (slot->node != NULL)) + result = 0; + if (slot->node == NULL) + unused = 1; + else { + cbk_cache_slot *scan; + + /* all cached nodes are different */ + scan = slot; + while (result) { + scan = list_entry(scan->lru.next, + cbk_cache_slot, lru); + if (&cache->lru == &scan->lru) + break; + if (slot->node == scan->node) + result = 0; + } + } + if (!result) + break; + } + read_unlock(&((cbk_cache *)cache)->guard); + return result; +} + +#endif + +/* Remove references, if any, to @node from coord cache */ +void cbk_cache_invalidate(const znode * node /* node to remove from cache */ , + reiser4_tree * tree/* tree to remove node from */) +{ + cbk_cache_slot *slot; + cbk_cache *cache; + int i; + + assert("nikita-350", node != NULL); + assert("nikita-1479", LOCK_CNT_GTZ(rw_locked_tree)); + + cache = &tree->cbk_cache; + assert("nikita-2470", cbk_cache_invariant(cache)); + + write_lock(&(cache->guard)); + for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) { + if (slot->node == node) { + list_move_tail(&slot->lru, &cache->lru); + slot->node = NULL; + break; + } + } + write_unlock(&(cache->guard)); + assert("nikita-2471", cbk_cache_invariant(cache)); +} + +/* add to the cbk-cache in the "tree" information about "node". This + can actually be update of existing slot in a cache. */ +static void cbk_cache_add(const znode * node/* node to add to the cache */) +{ + cbk_cache *cache; + + cbk_cache_slot *slot; + int i; + + assert("nikita-352", node != NULL); + + cache = &znode_get_tree(node)->cbk_cache; + assert("nikita-2472", cbk_cache_invariant(cache)); + + if (cache->nr_slots == 0) + return; + + write_lock(&(cache->guard)); + /* find slot to update/add */ + for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) { + /* oops, this node is already in a cache */ + if (slot->node == node) + break; + } + /* if all slots are used, reuse least recently used one */ + if (i == cache->nr_slots) { + slot = list_entry(cache->lru.prev, cbk_cache_slot, lru); + slot->node = (znode *) node; + } + list_move(&slot->lru, &cache->lru); + write_unlock(&(cache->guard)); + assert("nikita-2473", cbk_cache_invariant(cache)); +} + +static int setup_delimiting_keys(cbk_handle * h); +static lookup_result coord_by_handle(cbk_handle * handle); +static lookup_result traverse_tree(cbk_handle * h); +static int cbk_cache_search(cbk_handle * h); + +static level_lookup_result cbk_level_lookup(cbk_handle * h); +static level_lookup_result cbk_node_lookup(cbk_handle * h); + +/* helper functions */ + +static void update_stale_dk(reiser4_tree * tree, znode * node); + +/* release parent node during traversal */ +static void put_parent(cbk_handle * h); +/* check consistency of fields */ +static int sanity_check(cbk_handle * h); +/* release resources in handle */ +static void hput(cbk_handle * h); + +static level_lookup_result search_to_left(cbk_handle * h); + +/* pack numerous (numberous I should say) arguments of coord_by_key() into + * cbk_handle */ +static cbk_handle *cbk_pack(cbk_handle * handle, + reiser4_tree * tree, + const reiser4_key * key, + coord_t *coord, + lock_handle * active_lh, + lock_handle * parent_lh, + znode_lock_mode lock_mode, + lookup_bias bias, + tree_level lock_level, + tree_level stop_level, + __u32 flags, ra_info_t *info) +{ + memset(handle, 0, sizeof *handle); + + handle->tree = tree; + handle->key = key; + handle->lock_mode = lock_mode; + handle->bias = bias; + handle->lock_level = lock_level; + handle->stop_level = stop_level; + handle->coord = coord; + /* set flags. See comment in tree.h:cbk_flags */ + handle->flags = flags | CBK_TRUST_DK | CBK_USE_CRABLOCK; + + handle->active_lh = active_lh; + handle->parent_lh = parent_lh; + handle->ra_info = info; + return handle; +} + +/* main tree lookup procedure + + Check coord cache. If key we are looking for is not found there, call cbk() + to do real tree traversal. + + As we have extents on the twig level, @lock_level and @stop_level can + be different from LEAF_LEVEL and each other. + + Thread cannot keep any reiser4 locks (tree, znode, dk spin-locks, or znode + long term locks) while calling this. +*/ +lookup_result coord_by_key(reiser4_tree * tree /* tree to perform search + * in. Usually this tree is + * part of file-system + * super-block */ , + const reiser4_key * key /* key to look for */ , + coord_t *coord /* where to store found + * position in a tree. Fields + * in "coord" are only valid if + * coord_by_key() returned + * "CBK_COORD_FOUND" */ , + lock_handle * lh, /* resulting lock handle */ + znode_lock_mode lock_mode /* type of lookup we + * want on node. Pass + * ZNODE_READ_LOCK here + * if you only want to + * read item found and + * ZNODE_WRITE_LOCK if + * you want to modify + * it */ , + lookup_bias bias /* what to return if coord + * with exactly the @key is + * not in the tree */ , + tree_level lock_level/* tree level where to start + * taking @lock type of + * locks */ , + tree_level stop_level/* tree level to stop. Pass + * LEAF_LEVEL or TWIG_LEVEL + * here Item being looked + * for has to be between + * @lock_level and + * @stop_level, inclusive */ , + __u32 flags /* search flags */ , + ra_info_t * + info + /* information about desired tree traversal + * readahead */ + ) +{ + cbk_handle handle; + lock_handle parent_lh; + lookup_result result; + + init_lh(lh); + init_lh(&parent_lh); + + assert("nikita-3023", reiser4_schedulable()); + + assert("nikita-353", tree != NULL); + assert("nikita-354", key != NULL); + assert("nikita-355", coord != NULL); + assert("nikita-356", (bias == FIND_EXACT) + || (bias == FIND_MAX_NOT_MORE_THAN)); + assert("nikita-357", stop_level >= LEAF_LEVEL); + /* no locks can be held during tree traversal */ + assert("nikita-2104", lock_stack_isclean(get_current_lock_stack())); + + cbk_pack(&handle, + tree, + key, + coord, + lh, + &parent_lh, + lock_mode, bias, lock_level, stop_level, flags, info); + + result = coord_by_handle(&handle); + assert("nikita-3247", + ergo(!IS_CBKERR(result), coord->node == lh->node)); + return result; +} + +/* like coord_by_key(), but starts traversal from vroot of @object rather than + * from tree root. */ +lookup_result reiser4_object_lookup(struct inode *object, + const reiser4_key * key, + coord_t *coord, + lock_handle * lh, + znode_lock_mode lock_mode, + lookup_bias bias, + tree_level lock_level, + tree_level stop_level, __u32 flags, + ra_info_t *info) +{ + cbk_handle handle; + lock_handle parent_lh; + lookup_result result; + + init_lh(lh); + init_lh(&parent_lh); + + assert("nikita-3023", reiser4_schedulable()); + + assert("nikita-354", key != NULL); + assert("nikita-355", coord != NULL); + assert("nikita-356", (bias == FIND_EXACT) + || (bias == FIND_MAX_NOT_MORE_THAN)); + assert("nikita-357", stop_level >= LEAF_LEVEL); + /* no locks can be held during tree search by key */ + assert("nikita-2104", lock_stack_isclean(get_current_lock_stack())); + + cbk_pack(&handle, + object != NULL ? reiser4_tree_by_inode(object) : current_tree, + key, + coord, + lh, + &parent_lh, + lock_mode, bias, lock_level, stop_level, flags, info); + handle.object = object; + + result = coord_by_handle(&handle); + assert("nikita-3247", + ergo(!IS_CBKERR(result), coord->node == lh->node)); + return result; +} + +/* lookup by cbk_handle. Common part of coord_by_key() and + reiser4_object_lookup(). */ +static lookup_result coord_by_handle(cbk_handle * handle) +{ + /* + * first check cbk_cache (which is look-aside cache for our tree) and + * of this fails, start traversal. + */ + /* first check whether "key" is in cache of recent lookups. */ + if (cbk_cache_search(handle) == 0) + return handle->result; + else + return traverse_tree(handle); +} + +/* Execute actor for each item (or unit, depending on @through_units_p), + starting from @coord, right-ward, until either: + + - end of the tree is reached + - unformatted node is met + - error occurred + - @actor returns 0 or less + + Error code, or last actor return value is returned. + + This is used by plugin/dir/hashe_dir.c:reiser4_find_entry() to move through + sequence of entries with identical keys and alikes. +*/ +int reiser4_iterate_tree(reiser4_tree * tree /* tree to scan */ , + coord_t *coord /* coord to start from */ , + lock_handle * lh /* lock handle to start with and to + * update along the way */ , + tree_iterate_actor_t actor /* function to call on each + * item/unit */ , + void *arg /* argument to pass to @actor */ , + znode_lock_mode mode /* lock mode on scanned nodes */ , + int through_units_p /* call @actor on each item or on + * each unit */ ) +{ + int result; + + assert("nikita-1143", tree != NULL); + assert("nikita-1145", coord != NULL); + assert("nikita-1146", lh != NULL); + assert("nikita-1147", actor != NULL); + + result = zload(coord->node); + coord_clear_iplug(coord); + if (result != 0) + return result; + if (!coord_is_existing_unit(coord)) { + zrelse(coord->node); + return -ENOENT; + } + while ((result = actor(tree, coord, lh, arg)) > 0) { + /* move further */ + if ((through_units_p && coord_next_unit(coord)) || + (!through_units_p && coord_next_item(coord))) { + do { + lock_handle couple; + + /* move to the next node */ + init_lh(&couple); + result = + reiser4_get_right_neighbor(&couple, + coord->node, + (int)mode, + GN_CAN_USE_UPPER_LEVELS); + zrelse(coord->node); + if (result == 0) { + + result = zload(couple.node); + if (result != 0) { + done_lh(&couple); + return result; + } + + coord_init_first_unit(coord, + couple.node); + done_lh(lh); + move_lh(lh, &couple); + } else + return result; + } while (node_is_empty(coord->node)); + } + + assert("nikita-1149", coord_is_existing_unit(coord)); + } + zrelse(coord->node); + return result; +} + +/* return locked uber znode for @tree */ +int get_uber_znode(reiser4_tree * tree, znode_lock_mode mode, + znode_lock_request pri, lock_handle * lh) +{ + int result; + + result = longterm_lock_znode(lh, tree->uber, mode, pri); + return result; +} + +/* true if @key is strictly within @node + + we are looking for possibly non-unique key and it is item is at the edge of + @node. May be it is in the neighbor. +*/ +static int znode_contains_key_strict(znode * node /* node to check key + * against */ , + const reiser4_key * + key /* key to check */ , + int isunique) +{ + int answer; + + assert("nikita-1760", node != NULL); + assert("nikita-1722", key != NULL); + + if (keyge(key, &node->rd_key)) + return 0; + + answer = keycmp(&node->ld_key, key); + + if (isunique) + return answer != GREATER_THAN; + else + return answer == LESS_THAN; +} + +/* + * Virtual Root (vroot) code. + * + * For given file system object (e.g., regular file or directory) let's + * define its "virtual root" as lowest in the tree (that is, furtherest + * from the tree root) node such that all body items of said object are + * located in a tree rooted at this node. + * + * Once vroot of object is found all tree lookups for items within body of + * this object ("object lookups") can be started from its vroot rather + * than from real root. This has following advantages: + * + * 1. amount of nodes traversed during lookup (and, hence, amount of + * key comparisons made) decreases, and + * + * 2. contention on tree root is decreased. This latter was actually + * motivating reason behind vroot, because spin lock of root node, + * which is taken when acquiring long-term lock on root node is the + * hottest lock in the reiser4. + * + * How to find vroot. + * + * When vroot of object F is not yet determined, all object lookups start + * from the root of the tree. At each tree level during traversal we have + * a node N such that a key we are looking for (which is the key inside + * object's body) is located within N. In function handle_vroot() called + * from cbk_level_lookup() we check whether N is possible vroot for + * F. Check is trivial---if neither leftmost nor rightmost item of N + * belongs to F (and we already have helpful ->owns_item() method of + * object plugin for this), then N is possible vroot of F. This, of + * course, relies on the assumption that each object occupies contiguous + * range of keys in the tree. + * + * Thus, traversing tree downward and checking each node as we go, we can + * find lowest such node, which, by definition, is vroot. + * + * How to track vroot. + * + * Nohow. If actual vroot changes, next object lookup will just restart + * from the actual tree root, refreshing object's vroot along the way. + * + */ + +/* + * Check whether @node is possible vroot of @object. + */ +static void handle_vroot(struct inode *object, znode * node) +{ + file_plugin *fplug; + coord_t coord; + + fplug = inode_file_plugin(object); + assert("nikita-3353", fplug != NULL); + assert("nikita-3354", fplug->owns_item != NULL); + + if (unlikely(node_is_empty(node))) + return; + + coord_init_first_unit(&coord, node); + /* + * if leftmost item of @node belongs to @object, we cannot be sure + * that @node is vroot of @object, because, some items of @object are + * probably in the sub-tree rooted at the left neighbor of @node. + */ + if (fplug->owns_item(object, &coord)) + return; + coord_init_last_unit(&coord, node); + /* mutatis mutandis for the rightmost item */ + if (fplug->owns_item(object, &coord)) + return; + /* otherwise, @node is possible vroot of @object */ + inode_set_vroot(object, node); +} + +/* + * helper function used by traverse tree to start tree traversal not from the + * tree root, but from @h->object's vroot, if possible. + */ +static int prepare_object_lookup(cbk_handle * h) +{ + znode *vroot; + int result; + + vroot = inode_get_vroot(h->object); + if (vroot == NULL) { + /* + * object doesn't have known vroot, start from real tree root. + */ + return LOOKUP_CONT; + } + + h->level = znode_get_level(vroot); + /* take a long-term lock on vroot */ + h->result = longterm_lock_znode(h->active_lh, vroot, + cbk_lock_mode(h->level, h), + ZNODE_LOCK_LOPRI); + result = LOOKUP_REST; + if (h->result == 0) { + int isunique; + int inside; + + isunique = h->flags & CBK_UNIQUE; + /* check that key is inside vroot */ + read_lock_dk(h->tree); + inside = (znode_contains_key_strict(vroot, h->key, isunique) && + !ZF_ISSET(vroot, JNODE_HEARD_BANSHEE)); + read_unlock_dk(h->tree); + if (inside) { + h->result = zload(vroot); + if (h->result == 0) { + /* search for key in vroot. */ + result = cbk_node_lookup(h); + zrelse(vroot); /*h->active_lh->node); */ + if (h->active_lh->node != vroot) { + result = LOOKUP_REST; + } else if (result == LOOKUP_CONT) { + move_lh(h->parent_lh, h->active_lh); + h->flags &= ~CBK_DKSET; + } + } + } + } + + zput(vroot); + + if (IS_CBKERR(h->result) || result == LOOKUP_REST) + hput(h); + return result; +} + +/* main function that handles common parts of tree traversal: starting + (fake znode handling), restarts, error handling, completion */ +static lookup_result traverse_tree(cbk_handle * h/* search handle */) +{ + int done; + int iterations; + int vroot_used; + + assert("nikita-365", h != NULL); + assert("nikita-366", h->tree != NULL); + assert("nikita-367", h->key != NULL); + assert("nikita-368", h->coord != NULL); + assert("nikita-369", (h->bias == FIND_EXACT) + || (h->bias == FIND_MAX_NOT_MORE_THAN)); + assert("nikita-370", h->stop_level >= LEAF_LEVEL); + assert("nikita-2949", !(h->flags & CBK_DKSET)); + assert("zam-355", lock_stack_isclean(get_current_lock_stack())); + + done = 0; + iterations = 0; + vroot_used = 0; + + /* loop for restarts */ +restart: + + assert("nikita-3024", reiser4_schedulable()); + + h->result = CBK_COORD_FOUND; + /* connect_znode() needs it */ + h->ld_key = *reiser4_min_key(); + h->rd_key = *reiser4_max_key(); + h->flags |= CBK_DKSET; + h->error = NULL; + + if (!vroot_used && h->object != NULL) { + vroot_used = 1; + done = prepare_object_lookup(h); + if (done == LOOKUP_REST) + goto restart; + else if (done == LOOKUP_DONE) + return h->result; + } + if (h->parent_lh->node == NULL) { + done = + get_uber_znode(h->tree, ZNODE_READ_LOCK, ZNODE_LOCK_LOPRI, + h->parent_lh); + + assert("nikita-1637", done != -E_DEADLOCK); + + h->block = h->tree->root_block; + h->level = h->tree->height; + h->coord->node = h->parent_lh->node; + + if (done != 0) + return done; + } + + /* loop descending a tree */ + while (!done) { + + if (unlikely((iterations > REISER4_CBK_ITERATIONS_LIMIT) && + IS_POW(iterations))) { + warning("nikita-1481", "Too many iterations: %i", + iterations); + reiser4_print_key("key", h->key); + ++iterations; + } else if (unlikely(iterations > REISER4_MAX_CBK_ITERATIONS)) { + h->error = + "reiser-2018: Too many iterations. Tree corrupted, or (less likely) starvation occurring."; + h->result = RETERR(-EIO); + break; + } + switch (cbk_level_lookup(h)) { + case LOOKUP_CONT: + move_lh(h->parent_lh, h->active_lh); + continue; + default: + wrong_return_value("nikita-372", "cbk_level"); + case LOOKUP_DONE: + done = 1; + break; + case LOOKUP_REST: + hput(h); + /* deadlock avoidance is normal case. */ + if (h->result != -E_DEADLOCK) + ++iterations; + reiser4_preempt_point(); + goto restart; + } + } + /* that's all. The rest is error handling */ + if (unlikely(h->error != NULL)) { + warning("nikita-373", "%s: level: %i, " + "lock_level: %i, stop_level: %i " + "lock_mode: %s, bias: %s", + h->error, h->level, h->lock_level, h->stop_level, + lock_mode_name(h->lock_mode), bias_name(h->bias)); + reiser4_print_address("block", &h->block); + reiser4_print_key("key", h->key); + print_coord_content("coord", h->coord); + } + /* `unlikely' error case */ + if (unlikely(IS_CBKERR(h->result))) { + /* failure. do cleanup */ + hput(h); + } else { + assert("nikita-1605", WITH_DATA_RET + (h->coord->node, 1, + ergo((h->result == CBK_COORD_FOUND) && + (h->bias == FIND_EXACT) && + (!node_is_empty(h->coord->node)), + coord_is_existing_item(h->coord)))); + } + return h->result; +} + +/* find delimiting keys of child + + Determine left and right delimiting keys for child pointed to by + @parent_coord. + +*/ +static void find_child_delimiting_keys(znode * parent /* parent znode, passed + * locked */ , + const coord_t *parent_coord + /* coord where pointer + * to child is stored + */ , + reiser4_key * ld /* where to store left + * delimiting key */ , + reiser4_key * rd /* where to store right + * delimiting key */ ) +{ + coord_t neighbor; + + assert("nikita-1484", parent != NULL); + assert_rw_locked(&(znode_get_tree(parent)->dk_lock)); + + coord_dup(&neighbor, parent_coord); + + if (neighbor.between == AT_UNIT) + /* imitate item ->lookup() behavior. */ + neighbor.between = AFTER_UNIT; + + if (coord_set_to_left(&neighbor) == 0) + unit_key_by_coord(&neighbor, ld); + else { + assert("nikita-14851", 0); + *ld = *znode_get_ld_key(parent); + } + + coord_dup(&neighbor, parent_coord); + if (neighbor.between == AT_UNIT) + neighbor.between = AFTER_UNIT; + if (coord_set_to_right(&neighbor) == 0) + unit_key_by_coord(&neighbor, rd); + else + *rd = *znode_get_rd_key(parent); +} + +/* + * setup delimiting keys for a child + * + * @parent parent node + * + * @coord location in @parent where pointer to @child is + * + * @child child node + */ +int +set_child_delimiting_keys(znode * parent, const coord_t *coord, znode * child) +{ + reiser4_tree *tree; + + assert("nikita-2952", + znode_get_level(parent) == znode_get_level(coord->node)); + + /* fast check without taking dk lock. This is safe, because + * JNODE_DKSET is never cleared once set. */ + if (!ZF_ISSET(child, JNODE_DKSET)) { + tree = znode_get_tree(parent); + write_lock_dk(tree); + if (likely(!ZF_ISSET(child, JNODE_DKSET))) { + find_child_delimiting_keys(parent, coord, + &child->ld_key, + &child->rd_key); + ON_DEBUG(child->ld_key_version = + atomic_inc_return(&delim_key_version); + child->rd_key_version = + atomic_inc_return(&delim_key_version);); + ZF_SET(child, JNODE_DKSET); + } + write_unlock_dk(tree); + return 1; + } + return 0; +} + +/* Perform tree lookup at one level. This is called from cbk_traverse() + function that drives lookup through tree and calls cbk_node_lookup() to + perform lookup within one node. + + See comments in a code. +*/ +static level_lookup_result cbk_level_lookup(cbk_handle * h/* search handle */) +{ + int ret; + int setdk; + int ldkeyset = 0; + reiser4_key ldkey; + reiser4_key key; + znode *active; + + assert("nikita-3025", reiser4_schedulable()); + + /* acquire reference to @active node */ + active = + zget(h->tree, &h->block, h->parent_lh->node, h->level, + reiser4_ctx_gfp_mask_get()); + + if (IS_ERR(active)) { + h->result = PTR_ERR(active); + return LOOKUP_DONE; + } + + /* lock @active */ + h->result = longterm_lock_znode(h->active_lh, + active, + cbk_lock_mode(h->level, h), + ZNODE_LOCK_LOPRI); + /* longterm_lock_znode() acquires additional reference to znode (which + will be later released by longterm_unlock_znode()). Release + reference acquired by zget(). + */ + zput(active); + if (unlikely(h->result != 0)) + goto fail_or_restart; + + setdk = 0; + /* if @active is accessed for the first time, setup delimiting keys on + it. Delimiting keys are taken from the parent node. See + setup_delimiting_keys() for details. + */ + if (h->flags & CBK_DKSET) { + setdk = setup_delimiting_keys(h); + h->flags &= ~CBK_DKSET; + } else { + znode *parent; + + parent = h->parent_lh->node; + h->result = zload(parent); + if (unlikely(h->result != 0)) + goto fail_or_restart; + + if (!ZF_ISSET(active, JNODE_DKSET)) + setdk = set_child_delimiting_keys(parent, + h->coord, active); + else { + read_lock_dk(h->tree); + find_child_delimiting_keys(parent, h->coord, &ldkey, + &key); + read_unlock_dk(h->tree); + ldkeyset = 1; + } + zrelse(parent); + } + + /* this is ugly kludge. Reminder: this is necessary, because + ->lookup() method returns coord with ->between field probably set + to something different from AT_UNIT. + */ + h->coord->between = AT_UNIT; + + if (znode_just_created(active) && (h->coord->node != NULL)) { + write_lock_tree(h->tree); + /* if we are going to load znode right now, setup + ->in_parent: coord where pointer to this node is stored in + parent. + */ + coord_to_parent_coord(h->coord, &active->in_parent); + write_unlock_tree(h->tree); + } + + /* check connectedness without holding tree lock---false negatives + * will be re-checked by connect_znode(), and false positives are + * impossible---@active cannot suddenly turn into unconnected + * state. */ + if (!znode_is_connected(active)) { + h->result = connect_znode(h->coord, active); + if (unlikely(h->result != 0)) { + put_parent(h); + goto fail_or_restart; + } + } + + jload_prefetch(ZJNODE(active)); + + if (setdk) + update_stale_dk(h->tree, active); + + /* put_parent() cannot be called earlier, because connect_znode() + assumes parent node is referenced; */ + put_parent(h); + + if ((!znode_contains_key_lock(active, h->key) && + (h->flags & CBK_TRUST_DK)) + || ZF_ISSET(active, JNODE_HEARD_BANSHEE)) { + /* 1. key was moved out of this node while this thread was + waiting for the lock. Restart. More elaborate solution is + to determine where key moved (to the left, or to the right) + and try to follow it through sibling pointers. + + 2. or, node itself is going to be removed from the + tree. Release lock and restart. + */ + h->result = -E_REPEAT; + } + if (h->result == -E_REPEAT) + return LOOKUP_REST; + + h->result = zload_ra(active, h->ra_info); + if (h->result) + return LOOKUP_DONE; + + /* sanity checks */ + if (sanity_check(h)) { + zrelse(active); + return LOOKUP_DONE; + } + + /* check that key of leftmost item in the @active is the same as in + * its parent */ + if (ldkeyset && !node_is_empty(active) && + !keyeq(leftmost_key_in_node(active, &key), &ldkey)) { + warning("vs-3533", "Keys are inconsistent. Fsck?"); + reiser4_print_key("inparent", &ldkey); + reiser4_print_key("inchild", &key); + h->result = RETERR(-EIO); + zrelse(active); + return LOOKUP_DONE; + } + + if (h->object != NULL) + handle_vroot(h->object, active); + + ret = cbk_node_lookup(h); + + /* h->active_lh->node might change, but active is yet to be zrelsed */ + zrelse(active); + + return ret; + +fail_or_restart: + if (h->result == -E_DEADLOCK) + return LOOKUP_REST; + return LOOKUP_DONE; +} + +#if REISER4_DEBUG +/* check left and right delimiting keys of a znode */ +void check_dkeys(znode * node) +{ + znode *left; + znode *right; + + read_lock_tree(current_tree); + read_lock_dk(current_tree); + + assert("vs-1710", znode_is_any_locked(node)); + assert("vs-1197", + !keygt(znode_get_ld_key(node), znode_get_rd_key(node))); + + left = node->left; + right = node->right; + + if (ZF_ISSET(node, JNODE_LEFT_CONNECTED) && ZF_ISSET(node, JNODE_DKSET) + && left != NULL && ZF_ISSET(left, JNODE_DKSET)) + /* check left neighbor. Note that left neighbor is not locked, + so it might get wrong delimiting keys therefore */ + assert("vs-1198", + (keyeq(znode_get_rd_key(left), znode_get_ld_key(node)) + || ZF_ISSET(left, JNODE_HEARD_BANSHEE))); + + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && ZF_ISSET(node, JNODE_DKSET) + && right != NULL && ZF_ISSET(right, JNODE_DKSET)) + /* check right neighbor. Note that right neighbor is not + locked, so it might get wrong delimiting keys therefore */ + assert("vs-1199", + (keyeq(znode_get_rd_key(node), znode_get_ld_key(right)) + || ZF_ISSET(right, JNODE_HEARD_BANSHEE))); + + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); +} +#endif + +/* true if @key is left delimiting key of @node */ +static int key_is_ld(znode * node, const reiser4_key * key) +{ + int ld; + + assert("nikita-1716", node != NULL); + assert("nikita-1758", key != NULL); + + read_lock_dk(znode_get_tree(node)); + assert("nikita-1759", znode_contains_key(node, key)); + ld = keyeq(znode_get_ld_key(node), key); + read_unlock_dk(znode_get_tree(node)); + return ld; +} + +/* Process one node during tree traversal. + + This is called by cbk_level_lookup(). */ +static level_lookup_result cbk_node_lookup(cbk_handle * h/* search handle */) +{ + /* node plugin of @active */ + node_plugin *nplug; + /* item plugin of item that was found */ + item_plugin *iplug; + /* search bias */ + lookup_bias node_bias; + /* node we are operating upon */ + znode *active; + /* tree we are searching in */ + reiser4_tree *tree; + /* result */ + int result; + + assert("nikita-379", h != NULL); + + active = h->active_lh->node; + tree = h->tree; + + nplug = active->nplug; + assert("nikita-380", nplug != NULL); + + ON_DEBUG(check_dkeys(active)); + + /* return item from "active" node with maximal key not greater than + "key" */ + node_bias = h->bias; + result = nplug->lookup(active, h->key, node_bias, h->coord); + if (unlikely(result != NS_FOUND && result != NS_NOT_FOUND)) { + /* error occurred */ + h->result = result; + return LOOKUP_DONE; + } + if (h->level == h->stop_level) { + /* welcome to the stop level */ + assert("nikita-381", h->coord->node == active); + if (result == NS_FOUND) { + /* success of tree lookup */ + if (!(h->flags & CBK_UNIQUE) + && key_is_ld(active, h->key)) + return search_to_left(h); + else + h->result = CBK_COORD_FOUND; + } else { + h->result = CBK_COORD_NOTFOUND; + } + if (!(h->flags & CBK_IN_CACHE)) + cbk_cache_add(active); + return LOOKUP_DONE; + } + + if (h->level > TWIG_LEVEL && result == NS_NOT_FOUND) { + h->error = "not found on internal node"; + h->result = result; + return LOOKUP_DONE; + } + + assert("vs-361", h->level > h->stop_level); + + if (handle_eottl(h, &result)) { + assert("vs-1674", (result == LOOKUP_DONE || + result == LOOKUP_REST)); + return result; + } + + /* go down to next level */ + check_me("vs-12", zload(h->coord->node) == 0); + assert("nikita-2116", item_is_internal(h->coord)); + iplug = item_plugin_by_coord(h->coord); + iplug->s.internal.down_link(h->coord, h->key, &h->block); + zrelse(h->coord->node); + --h->level; + return LOOKUP_CONT; /* continue */ +} + +/* scan cbk_cache slots looking for a match for @h */ +static int cbk_cache_scan_slots(cbk_handle * h/* cbk handle */) +{ + level_lookup_result llr; + znode *node; + reiser4_tree *tree; + cbk_cache_slot *slot; + cbk_cache *cache; + tree_level level; + int isunique; + const reiser4_key *key; + int result; + + assert("nikita-1317", h != NULL); + assert("nikita-1315", h->tree != NULL); + assert("nikita-1316", h->key != NULL); + + tree = h->tree; + cache = &tree->cbk_cache; + if (cache->nr_slots == 0) + /* size of cbk cache was set to 0 by mount time option. */ + return RETERR(-ENOENT); + + assert("nikita-2474", cbk_cache_invariant(cache)); + node = NULL; /* to keep gcc happy */ + level = h->level; + key = h->key; + isunique = h->flags & CBK_UNIQUE; + result = RETERR(-ENOENT); + + /* + * this is time-critical function and dragons had, hence, been settled + * here. + * + * Loop below scans cbk cache slots trying to find matching node with + * suitable range of delimiting keys and located at the h->level. + * + * Scan is done under cbk cache spin lock that protects slot->node + * pointers. If suitable node is found we want to pin it in + * memory. But slot->node can point to the node with x_count 0 + * (unreferenced). Such node can be recycled at any moment, or can + * already be in the process of being recycled (within jput()). + * + * As we found node in the cbk cache, it means that jput() hasn't yet + * called cbk_cache_invalidate(). + * + * We acquire reference to the node without holding tree lock, and + * later, check node's RIP bit. This avoids races with jput(). + */ + + rcu_read_lock(); + read_lock(&((cbk_cache *)cache)->guard); + + slot = list_entry(cache->lru.next, cbk_cache_slot, lru); + slot = list_entry(slot->lru.prev, cbk_cache_slot, lru); + BUG_ON(&slot->lru != &cache->lru);/*????*/ + while (1) { + + slot = list_entry(slot->lru.next, cbk_cache_slot, lru); + + if (&cache->lru != &slot->lru) + node = slot->node; + else + node = NULL; + + if (unlikely(node == NULL)) + break; + + /* + * this is (hopefully) the only place in the code where we are + * working with delimiting keys without holding dk lock. This + * is fine here, because this is only "guess" anyway---keys + * are rechecked under dk lock below. + */ + if (znode_get_level(node) == level && + /* reiser4_min_key < key < reiser4_max_key */ + znode_contains_key_strict(node, key, isunique)) { + zref(node); + result = 0; + spin_lock_prefetch(&tree->tree_lock); + break; + } + } + read_unlock(&((cbk_cache *)cache)->guard); + + assert("nikita-2475", cbk_cache_invariant(cache)); + + if (unlikely(result == 0 && ZF_ISSET(node, JNODE_RIP))) + result = -ENOENT; + + rcu_read_unlock(); + + if (result != 0) { + h->result = CBK_COORD_NOTFOUND; + return RETERR(-ENOENT); + } + + result = + longterm_lock_znode(h->active_lh, node, cbk_lock_mode(level, h), + ZNODE_LOCK_LOPRI); + zput(node); + if (result != 0) + return result; + result = zload(node); + if (result != 0) + return result; + + /* recheck keys */ + read_lock_dk(tree); + result = (znode_contains_key_strict(node, key, isunique) && + !ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + read_unlock_dk(tree); + if (result) { + /* do lookup inside node */ + llr = cbk_node_lookup(h); + /* if cbk_node_lookup() wandered to another node (due to eottl + or non-unique keys), adjust @node */ + /*node = h->active_lh->node; */ + + if (llr != LOOKUP_DONE) { + /* restart or continue on the next level */ + result = RETERR(-ENOENT); + } else if (IS_CBKERR(h->result)) + /* io or oom */ + result = RETERR(-ENOENT); + else { + /* good. Either item found or definitely not found. */ + result = 0; + + write_lock(&(cache->guard)); + if (slot->node == h->active_lh->node) { + /* if this node is still in cbk cache---move + its slot to the head of the LRU list. */ + list_move(&slot->lru, &cache->lru); + } + write_unlock(&(cache->guard)); + } + } else { + /* race. While this thread was waiting for the lock, node was + rebalanced and item we are looking for, shifted out of it + (if it ever was here). + + Continuing scanning is almost hopeless: node key range was + moved to, is almost certainly at the beginning of the LRU + list at this time, because it's hot, but restarting + scanning from the very beginning is complex. Just return, + so that cbk() will be performed. This is not that + important, because such races should be rare. Are they? + */ + result = RETERR(-ENOENT); /* -ERAUGHT */ + } + zrelse(node); + assert("nikita-2476", cbk_cache_invariant(cache)); + return result; +} + +/* look for item with given key in the coord cache + + This function, called by coord_by_key(), scans "coord cache" (&cbk_cache) + which is a small LRU list of znodes accessed lately. For each znode in + znode in this list, it checks whether key we are looking for fits into key + range covered by this node. If so, and in addition, node lies at allowed + level (this is to handle extents on a twig level), node is locked, and + lookup inside it is performed. + + we need a measurement of the cost of this cache search compared to the cost + of coord_by_key. + +*/ +static int cbk_cache_search(cbk_handle * h/* cbk handle */) +{ + int result = 0; + tree_level level; + + /* add CBK_IN_CACHE to the handle flags. This means that + * cbk_node_lookup() assumes that cbk_cache is scanned and would add + * found node to the cache. */ + h->flags |= CBK_IN_CACHE; + for (level = h->stop_level; level <= h->lock_level; ++level) { + h->level = level; + result = cbk_cache_scan_slots(h); + if (result != 0) { + done_lh(h->active_lh); + done_lh(h->parent_lh); + } else { + assert("nikita-1319", !IS_CBKERR(h->result)); + break; + } + } + h->flags &= ~CBK_IN_CACHE; + return result; +} + +/* type of lock we want to obtain during tree traversal. On stop level + we want type of lock user asked for, on upper levels: read lock. */ +znode_lock_mode cbk_lock_mode(tree_level level, cbk_handle * h) +{ + assert("nikita-382", h != NULL); + + return (level <= h->lock_level) ? h->lock_mode : ZNODE_READ_LOCK; +} + +/* update outdated delimiting keys */ +static void stale_dk(reiser4_tree * tree, znode * node) +{ + znode *right; + + read_lock_tree(tree); + write_lock_dk(tree); + right = node->right; + + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && + right && ZF_ISSET(right, JNODE_DKSET) && + !keyeq(znode_get_rd_key(node), znode_get_ld_key(right))) + znode_set_rd_key(node, znode_get_ld_key(right)); + + write_unlock_dk(tree); + read_unlock_tree(tree); +} + +/* check for possibly outdated delimiting keys, and update them if + * necessary. */ +static void update_stale_dk(reiser4_tree * tree, znode * node) +{ + znode *right; + reiser4_key rd; + + read_lock_tree(tree); + read_lock_dk(tree); + rd = *znode_get_rd_key(node); + right = node->right; + if (unlikely(ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && + right && ZF_ISSET(right, JNODE_DKSET) && + !keyeq(&rd, znode_get_ld_key(right)))) { + assert("nikita-38211", ZF_ISSET(node, JNODE_DKSET)); + read_unlock_dk(tree); + read_unlock_tree(tree); + stale_dk(tree, node); + return; + } + read_unlock_dk(tree); + read_unlock_tree(tree); +} + +/* + * handle searches a the non-unique key. + * + * Suppose that we are looking for an item with possibly non-unique key 100. + * + * Root node contains two pointers: one to a node with left delimiting key 0, + * and another to a node with left delimiting key 100. Item we interested in + * may well happen in the sub-tree rooted at the first pointer. + * + * To handle this search_to_left() is called when search reaches stop + * level. This function checks it is _possible_ that item we are looking for + * is in the left neighbor (this can be done by comparing delimiting keys) and + * if so, tries to lock left neighbor (this is low priority lock, so it can + * deadlock, tree traversal is just restarted if it did) and then checks + * whether left neighbor actually contains items with our key. + * + * Note that this is done on the stop level only. It is possible to try such + * left-check on each level, but as duplicate keys are supposed to be rare + * (very unlikely that more than one node is completely filled with items with + * duplicate keys), it sis cheaper to scan to the left on the stop level once. + * + */ +static level_lookup_result search_to_left(cbk_handle * h/* search handle */) +{ + level_lookup_result result; + coord_t *coord; + znode *node; + znode *neighbor; + + lock_handle lh; + + assert("nikita-1761", h != NULL); + assert("nikita-1762", h->level == h->stop_level); + + init_lh(&lh); + coord = h->coord; + node = h->active_lh->node; + assert("nikita-1763", coord_is_leftmost_unit(coord)); + + h->result = + reiser4_get_left_neighbor(&lh, node, (int)h->lock_mode, + GN_CAN_USE_UPPER_LEVELS); + neighbor = NULL; + switch (h->result) { + case -E_DEADLOCK: + result = LOOKUP_REST; + break; + case 0:{ + node_plugin *nplug; + coord_t crd; + lookup_bias bias; + + neighbor = lh.node; + h->result = zload(neighbor); + if (h->result != 0) { + result = LOOKUP_DONE; + break; + } + + nplug = neighbor->nplug; + + coord_init_zero(&crd); + bias = h->bias; + h->bias = FIND_EXACT; + h->result = + nplug->lookup(neighbor, h->key, h->bias, &crd); + h->bias = bias; + + if (h->result == NS_NOT_FOUND) { + case -E_NO_NEIGHBOR: + h->result = CBK_COORD_FOUND; + if (!(h->flags & CBK_IN_CACHE)) + cbk_cache_add(node); + default: /* some other error */ + result = LOOKUP_DONE; + } else if (h->result == NS_FOUND) { + read_lock_dk(znode_get_tree(neighbor)); + h->rd_key = *znode_get_ld_key(node); + leftmost_key_in_node(neighbor, &h->ld_key); + read_unlock_dk(znode_get_tree(neighbor)); + h->flags |= CBK_DKSET; + + h->block = *znode_get_block(neighbor); + /* clear coord->node so that cbk_level_lookup() + wouldn't overwrite parent hint in neighbor. + + Parent hint was set up by + reiser4_get_left_neighbor() + */ + /* FIXME: why do we have to spinlock here? */ + write_lock_tree(znode_get_tree(neighbor)); + h->coord->node = NULL; + write_unlock_tree(znode_get_tree(neighbor)); + result = LOOKUP_CONT; + } else { + result = LOOKUP_DONE; + } + if (neighbor != NULL) + zrelse(neighbor); + } + } + done_lh(&lh); + return result; +} + +/* debugging aid: return symbolic name of search bias */ +static const char *bias_name(lookup_bias bias/* bias to get name of */) +{ + if (bias == FIND_EXACT) + return "exact"; + else if (bias == FIND_MAX_NOT_MORE_THAN) + return "left-slant"; +/* else if( bias == RIGHT_SLANT_BIAS ) */ +/* return "right-bias"; */ + else { + static char buf[30]; + + sprintf(buf, "unknown: %i", bias); + return buf; + } +} + +#if REISER4_DEBUG +/* debugging aid: print human readable information about @p */ +void print_coord_content(const char *prefix /* prefix to print */ , + coord_t *p/* coord to print */) +{ + reiser4_key key; + + if (p == NULL) { + printk("%s: null\n", prefix); + return; + } + if ((p->node != NULL) && znode_is_loaded(p->node) + && coord_is_existing_item(p)) + printk("%s: data: %p, length: %i\n", prefix, + item_body_by_coord(p), item_length_by_coord(p)); + if (znode_is_loaded(p->node)) { + item_key_by_coord(p, &key); + reiser4_print_key(prefix, &key); + } +} + +/* debugging aid: print human readable information about @block */ +void reiser4_print_address(const char *prefix /* prefix to print */ , + const reiser4_block_nr * block/* block number to print */) +{ + printk("%s: %s\n", prefix, sprint_address(block)); +} +#endif + +/* return string containing human readable representation of @block */ +char *sprint_address(const reiser4_block_nr * + block/* block number to print */) +{ + static char address[30]; + + if (block == NULL) + sprintf(address, "null"); + else if (reiser4_blocknr_is_fake(block)) + sprintf(address, "%llx", (unsigned long long)(*block)); + else + sprintf(address, "%llu", (unsigned long long)(*block)); + return address; +} + +/* release parent node during traversal */ +static void put_parent(cbk_handle * h/* search handle */) +{ + assert("nikita-383", h != NULL); + if (h->parent_lh->node != NULL) + longterm_unlock_znode(h->parent_lh); +} + +/* helper function used by coord_by_key(): release reference to parent znode + stored in handle before processing its child. */ +static void hput(cbk_handle * h/* search handle */) +{ + assert("nikita-385", h != NULL); + done_lh(h->parent_lh); + done_lh(h->active_lh); +} + +/* Helper function used by cbk(): update delimiting keys of child node (stored + in h->active_lh->node) using key taken from parent on the parent level. */ +static int setup_delimiting_keys(cbk_handle * h/* search handle */) +{ + znode *active; + reiser4_tree *tree; + + assert("nikita-1088", h != NULL); + + active = h->active_lh->node; + + /* fast check without taking dk lock. This is safe, because + * JNODE_DKSET is never cleared once set. */ + if (!ZF_ISSET(active, JNODE_DKSET)) { + tree = znode_get_tree(active); + write_lock_dk(tree); + if (!ZF_ISSET(active, JNODE_DKSET)) { + znode_set_ld_key(active, &h->ld_key); + znode_set_rd_key(active, &h->rd_key); + ZF_SET(active, JNODE_DKSET); + } + write_unlock_dk(tree); + return 1; + } + return 0; +} + +/* true if @block makes sense for the @tree. Used to detect corrupted node + * pointers */ +static int +block_nr_is_correct(reiser4_block_nr * block /* block number to check */ , + reiser4_tree * tree/* tree to check against */) +{ + assert("nikita-757", block != NULL); + assert("nikita-758", tree != NULL); + + /* check to see if it exceeds the size of the device. */ + return reiser4_blocknr_is_sane_for(tree->super, block); +} + +/* check consistency of fields */ +static int sanity_check(cbk_handle * h/* search handle */) +{ + assert("nikita-384", h != NULL); + + if (h->level < h->stop_level) { + h->error = "Buried under leaves"; + h->result = RETERR(-EIO); + return LOOKUP_DONE; + } else if (!block_nr_is_correct(&h->block, h->tree)) { + h->error = "bad block number"; + h->result = RETERR(-EIO); + return LOOKUP_DONE; + } else + return 0; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/status_flags.c b/fs/reiser4/status_flags.c new file mode 100644 index 000000000000..574005e0677a --- /dev/null +++ b/fs/reiser4/status_flags.c @@ -0,0 +1,180 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Functions that deal with reiser4 status block, query status and update it, + * if needed */ + +#include +#include +#include +#include +#include "debug.h" +#include "dformat.h" +#include "status_flags.h" +#include "super.h" + +/* This is our end I/O handler that marks page uptodate if IO was successful. + It also unconditionally unlocks the page, so we can see that io was done. + We do not free bio, because we hope to reuse that. */ +static void reiser4_status_endio(struct bio *bio) +{ + if (!bio->bi_status) + SetPageUptodate(bio->bi_io_vec->bv_page); + else { + ClearPageUptodate(bio->bi_io_vec->bv_page); + SetPageError(bio->bi_io_vec->bv_page); + } + unlock_page(bio->bi_io_vec->bv_page); +} + +/* Initialise status code. This is expected to be called from the disk format + code. block paremeter is where status block lives. */ +int reiser4_status_init(reiser4_block_nr block) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct reiser4_status *statuspage; + struct bio *bio; + struct page *page; + + get_super_private(sb)->status_page = NULL; + get_super_private(sb)->status_bio = NULL; + + page = alloc_pages(reiser4_ctx_gfp_mask_get(), 0); + if (!page) + return -ENOMEM; + + bio = bio_alloc(reiser4_ctx_gfp_mask_get(), 1); + if (bio != NULL) { + bio->bi_iter.bi_sector = block * (sb->s_blocksize >> 9); + bio_set_dev(bio, sb->s_bdev); + bio->bi_io_vec[0].bv_page = page; + bio->bi_io_vec[0].bv_len = sb->s_blocksize; + bio->bi_io_vec[0].bv_offset = 0; + bio->bi_vcnt = 1; + bio->bi_iter.bi_size = sb->s_blocksize; + bio->bi_end_io = reiser4_status_endio; + } else { + __free_pages(page, 0); + return -ENOMEM; + } + lock_page(page); + bio_set_op_attrs(bio, READ, 0); + submit_bio(bio); + wait_on_page_locked(page); + if (!PageUptodate(page)) { + warning("green-2007", + "I/O error while tried to read status page\n"); + return -EIO; + } + + statuspage = (struct reiser4_status *)kmap_atomic(page); + if (memcmp + (statuspage->magic, REISER4_STATUS_MAGIC, + sizeof(REISER4_STATUS_MAGIC))) { + /* Magic does not match. */ + kunmap_atomic((char *)statuspage); + warning("green-2008", "Wrong magic in status block\n"); + __free_pages(page, 0); + bio_put(bio); + return -EINVAL; + } + kunmap_atomic((char *)statuspage); + + get_super_private(sb)->status_page = page; + get_super_private(sb)->status_bio = bio; + return 0; +} + +/* Query the status of fs. Returns if the FS can be safely mounted. + Also if "status" and "extended" parameters are given, it will fill + actual parts of status from disk there. */ +int reiser4_status_query(u64 *status, u64 *extended) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct reiser4_status *statuspage; + int retval; + + if (!get_super_private(sb)->status_page) + /* No status page? */ + return REISER4_STATUS_MOUNT_UNKNOWN; + statuspage = (struct reiser4_status *) + kmap_atomic(get_super_private(sb)->status_page); + switch ((long)le64_to_cpu(get_unaligned(&statuspage->status))) { + /* FIXME: this cast is a hack for 32 bit arches to work. */ + case REISER4_STATUS_OK: + retval = REISER4_STATUS_MOUNT_OK; + break; + case REISER4_STATUS_CORRUPTED: + retval = REISER4_STATUS_MOUNT_WARN; + break; + case REISER4_STATUS_DAMAGED: + case REISER4_STATUS_DESTROYED: + case REISER4_STATUS_IOERROR: + retval = REISER4_STATUS_MOUNT_RO; + break; + default: + retval = REISER4_STATUS_MOUNT_UNKNOWN; + break; + } + + if (status) + *status = le64_to_cpu(get_unaligned(&statuspage->status)); + if (extended) + *extended = le64_to_cpu(get_unaligned(&statuspage->extended_status)); + + kunmap_atomic((char *)statuspage); + return retval; +} + +/* This function should be called when something bad happens (e.g. from + reiser4_panic). It fills the status structure and tries to push it to disk.*/ +int reiser4_status_write(__u64 status, __u64 extended_status, char *message) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct reiser4_status *statuspage; + struct bio *bio = get_super_private(sb)->status_bio; + + if (!get_super_private(sb)->status_page) + /* No status page? */ + return -1; + statuspage = (struct reiser4_status *) + kmap_atomic(get_super_private(sb)->status_page); + + put_unaligned(cpu_to_le64(status), &statuspage->status); + put_unaligned(cpu_to_le64(extended_status), &statuspage->extended_status); + strncpy(statuspage->texterror, message, REISER4_TEXTERROR_LEN); + + kunmap_atomic((char *)statuspage); + bio_reset(bio); + bio_set_dev(bio, sb->s_bdev); + bio->bi_io_vec[0].bv_page = get_super_private(sb)->status_page; + bio->bi_io_vec[0].bv_len = sb->s_blocksize; + bio->bi_io_vec[0].bv_offset = 0; + bio->bi_vcnt = 1; + bio->bi_iter.bi_size = sb->s_blocksize; + bio->bi_end_io = reiser4_status_endio; + lock_page(get_super_private(sb)->status_page); /* Safe as nobody should + * touch our page. */ + /* + * We can block now, but we have no other choice anyway + */ + bio_set_op_attrs(bio, WRITE, 0); + submit_bio(bio); + /* + * We do not wait for IO completon + */ + return 0; +} + +/* Frees the page with status and bio structure. Should be called by disk format + * at umount time */ +int reiser4_status_finish(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + + __free_pages(get_super_private(sb)->status_page, 0); + get_super_private(sb)->status_page = NULL; + bio_put(get_super_private(sb)->status_bio); + get_super_private(sb)->status_bio = NULL; + return 0; +} diff --git a/fs/reiser4/status_flags.h b/fs/reiser4/status_flags.h new file mode 100644 index 000000000000..bee9d2ee22ca --- /dev/null +++ b/fs/reiser4/status_flags.h @@ -0,0 +1,47 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Here we declare structures and flags that store reiser4 status on disk. + The status that helps us to find out if the filesystem is valid or if it + contains some critical, or not so critical errors */ + +#if !defined(__REISER4_STATUS_FLAGS_H__) +#define __REISER4_STATUS_FLAGS_H__ + +#include "dformat.h" +/* These are major status flags */ +#define REISER4_STATUS_OK 0 +#define REISER4_STATUS_CORRUPTED 0x1 +#define REISER4_STATUS_DAMAGED 0x2 +#define REISER4_STATUS_DESTROYED 0x4 +#define REISER4_STATUS_IOERROR 0x8 + +/* Return values for reiser4_status_query() */ +#define REISER4_STATUS_MOUNT_OK 0 +#define REISER4_STATUS_MOUNT_WARN 1 +#define REISER4_STATUS_MOUNT_RO 2 +#define REISER4_STATUS_MOUNT_UNKNOWN -1 + +#define REISER4_TEXTERROR_LEN 256 + +#define REISER4_STATUS_MAGIC "ReiSeR4StATusBl" +/* We probably need to keep its size under sector size which is 512 bytes */ +struct reiser4_status { + char magic[16]; + d64 status; /* Current FS state */ + d64 extended_status; /* Any additional info that might have sense in + * addition to "status". E.g. last sector where + * io error happened if status is + * "io error encountered" */ + d64 stacktrace[10]; /* Last ten functional calls made (addresses) */ + char texterror[REISER4_TEXTERROR_LEN]; /* Any error message if + * appropriate, otherwise filled + * with zeroes */ +}; + +int reiser4_status_init(reiser4_block_nr block); +int reiser4_status_query(u64 *status, u64 *extended); +int reiser4_status_write(u64 status, u64 extended_status, char *message); +int reiser4_status_finish(void); + +#endif diff --git a/fs/reiser4/super.c b/fs/reiser4/super.c new file mode 100644 index 000000000000..511b74e8c263 --- /dev/null +++ b/fs/reiser4/super.c @@ -0,0 +1,306 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Super-block manipulations. */ + +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "plugin/security/perm.h" +#include "plugin/space/space_allocator.h" +#include "plugin/plugin.h" +#include "tree.h" +#include "vfs_ops.h" +#include "super.h" +#include "reiser4.h" + +#include /* for __u?? */ +#include /* for struct super_block */ + +static __u64 reserved_for_gid(const struct super_block *super, gid_t gid); +static __u64 reserved_for_uid(const struct super_block *super, uid_t uid); +static __u64 reserved_for_root(const struct super_block *super); + +/* Return reiser4-specific part of super block */ +reiser4_super_info_data *get_super_private_nocheck(const struct super_block *super) +{ + return (reiser4_super_info_data *) super->s_fs_info; +} + +/* Return reiser4 fstype: value that is returned in ->f_type field by statfs() + */ +long reiser4_statfs_type(const struct super_block *super UNUSED_ARG) +{ + assert("nikita-448", super != NULL); + assert("nikita-449", is_reiser4_super(super)); + return (long)REISER4_SUPER_MAGIC; +} + +/* functions to read/modify fields of reiser4_super_info_data */ + +/* get number of blocks in file system */ +__u64 reiser4_block_count(const struct super_block *super /* super block + queried */ ) +{ + assert("vs-494", super != NULL); + assert("vs-495", is_reiser4_super(super)); + return get_super_private(super)->block_count; +} + +#if REISER4_DEBUG +/* + * number of blocks in the current file system + */ +__u64 reiser4_current_block_count(void) +{ + return get_current_super_private()->block_count; +} +#endif /* REISER4_DEBUG */ + +/* set number of block in filesystem */ +void reiser4_set_block_count(const struct super_block *super, __u64 nr) +{ + assert("vs-501", super != NULL); + assert("vs-502", is_reiser4_super(super)); + get_super_private(super)->block_count = nr; + /* + * The proper calculation of the reserved space counter (%5 of device + * block counter) we need a 64 bit division which is missing in Linux + * on i386 platform. Because we do not need a precise calculation here + * we can replace a div64 operation by this combination of + * multiplication and shift: 51. / (2^10) == .0498 . + * FIXME: this is a bug. It comes up only for very small filesystems + * which probably are never used. Nevertheless, it is a bug. Number of + * reserved blocks must be not less than maximal number of blocks which + * get grabbed with BA_RESERVED. + */ + get_super_private(super)->blocks_reserved = ((nr * 51) >> 10); +} + +/* amount of blocks used (allocated for data) in file system */ +__u64 reiser4_data_blocks(const struct super_block *super /* super block + queried */ ) +{ + assert("nikita-452", super != NULL); + assert("nikita-453", is_reiser4_super(super)); + return get_super_private(super)->blocks_used; +} + +/* set number of block used in filesystem */ +void reiser4_set_data_blocks(const struct super_block *super, __u64 nr) +{ + assert("vs-503", super != NULL); + assert("vs-504", is_reiser4_super(super)); + get_super_private(super)->blocks_used = nr; +} + +/* amount of free blocks in file system */ +__u64 reiser4_free_blocks(const struct super_block *super /* super block + queried */ ) +{ + assert("nikita-454", super != NULL); + assert("nikita-455", is_reiser4_super(super)); + return get_super_private(super)->blocks_free; +} + +/* set number of blocks free in filesystem */ +void reiser4_set_free_blocks(const struct super_block *super, __u64 nr) +{ + assert("vs-505", super != NULL); + assert("vs-506", is_reiser4_super(super)); + get_super_private(super)->blocks_free = nr; +} + +/* get mkfs unique identifier */ +__u32 reiser4_mkfs_id(const struct super_block *super /* super block + queried */ ) +{ + assert("vpf-221", super != NULL); + assert("vpf-222", is_reiser4_super(super)); + return get_super_private(super)->mkfs_id; +} + +/* amount of free blocks in file system */ +__u64 reiser4_free_committed_blocks(const struct super_block *super) +{ + assert("vs-497", super != NULL); + assert("vs-498", is_reiser4_super(super)); + return get_super_private(super)->blocks_free_committed; +} + +/* amount of blocks in the file system reserved for @uid and @gid */ +long reiser4_reserved_blocks(const struct super_block *super /* super block + queried */ , + uid_t uid /* user id */ , + gid_t gid/* group id */) +{ + long reserved; + + assert("nikita-456", super != NULL); + assert("nikita-457", is_reiser4_super(super)); + + reserved = 0; + if (REISER4_SUPPORT_GID_SPACE_RESERVATION) + reserved += reserved_for_gid(super, gid); + if (REISER4_SUPPORT_UID_SPACE_RESERVATION) + reserved += reserved_for_uid(super, uid); + if (REISER4_SUPPORT_ROOT_SPACE_RESERVATION && (uid == 0)) + reserved += reserved_for_root(super); + return reserved; +} + +/* get/set value of/to grabbed blocks counter */ +__u64 reiser4_grabbed_blocks(const struct super_block * super) +{ + assert("zam-512", super != NULL); + assert("zam-513", is_reiser4_super(super)); + + return get_super_private(super)->blocks_grabbed; +} + +__u64 reiser4_flush_reserved(const struct super_block *super) +{ + assert("vpf-285", super != NULL); + assert("vpf-286", is_reiser4_super(super)); + + return get_super_private(super)->blocks_flush_reserved; +} + +/* get/set value of/to counter of fake allocated formatted blocks */ +__u64 reiser4_fake_allocated(const struct super_block *super) +{ + assert("zam-516", super != NULL); + assert("zam-517", is_reiser4_super(super)); + + return get_super_private(super)->blocks_fake_allocated; +} + +/* get/set value of/to counter of fake allocated unformatted blocks */ +__u64 reiser4_fake_allocated_unformatted(const struct super_block *super) +{ + assert("zam-516", super != NULL); + assert("zam-517", is_reiser4_super(super)); + + return get_super_private(super)->blocks_fake_allocated_unformatted; +} + +/* get/set value of/to counter of clustered blocks */ +__u64 reiser4_clustered_blocks(const struct super_block *super) +{ + assert("edward-601", super != NULL); + assert("edward-602", is_reiser4_super(super)); + + return get_super_private(super)->blocks_clustered; +} + +/* space allocator used by this file system */ +reiser4_space_allocator * reiser4_get_space_allocator(const struct super_block + *super) +{ + assert("nikita-1965", super != NULL); + assert("nikita-1966", is_reiser4_super(super)); + return &get_super_private(super)->space_allocator; +} + +/* return fake inode used to bind formatted nodes in the page cache */ +struct inode *reiser4_get_super_fake(const struct super_block *super) +{ + assert("nikita-1757", super != NULL); + return get_super_private(super)->fake; +} + +/* return fake inode used to bind copied on capture nodes in the page cache */ +struct inode *reiser4_get_cc_fake(const struct super_block *super) +{ + assert("nikita-1757", super != NULL); + return get_super_private(super)->cc; +} + +/* return fake inode used to bind bitmaps and journlal heads */ +struct inode *reiser4_get_bitmap_fake(const struct super_block *super) +{ + assert("nikita-17571", super != NULL); + return get_super_private(super)->bitmap; +} + +/* tree used by this file system */ +reiser4_tree *reiser4_get_tree(const struct super_block *super) +{ + assert("nikita-460", super != NULL); + assert("nikita-461", is_reiser4_super(super)); + return &get_super_private(super)->tree; +} + +/* Check that @super is (looks like) reiser4 super block. This is mainly for + use in assertions. */ +int is_reiser4_super(const struct super_block *super) +{ + return + super != NULL && + get_super_private(super) != NULL && + super->s_op == &(get_super_private(super)->ops.super); +} + +int reiser4_is_set(const struct super_block *super, reiser4_fs_flag f) +{ + return test_bit((int)f, &get_super_private(super)->fs_flags); +} + +/* amount of blocks reserved for given group in file system */ +static __u64 reserved_for_gid(const struct super_block *super UNUSED_ARG, + gid_t gid UNUSED_ARG/* group id */) +{ + return 0; +} + +/* amount of blocks reserved for given user in file system */ +static __u64 reserved_for_uid(const struct super_block *super UNUSED_ARG, + uid_t uid UNUSED_ARG/* user id */) +{ + return 0; +} + +/* amount of blocks reserved for super user in file system */ +static __u64 reserved_for_root(const struct super_block *super UNUSED_ARG) +{ + return 0; +} + +/* + * true if block number @blk makes sense for the file system at @super. + */ +int +reiser4_blocknr_is_sane_for(const struct super_block *super, + const reiser4_block_nr * blk) +{ + reiser4_super_info_data *sbinfo; + + assert("nikita-2957", super != NULL); + assert("nikita-2958", blk != NULL); + + if (reiser4_blocknr_is_fake(blk)) + return 1; + + sbinfo = get_super_private(super); + return *blk < sbinfo->block_count; +} + +#if REISER4_DEBUG +/* + * true, if block number @blk makes sense for the current file system + */ +int reiser4_blocknr_is_sane(const reiser4_block_nr * blk) +{ + return reiser4_blocknr_is_sane_for(reiser4_get_current_sb(), blk); +} +#endif /* REISER4_DEBUG */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/super.h b/fs/reiser4/super.h new file mode 100644 index 000000000000..ecc8973175ea --- /dev/null +++ b/fs/reiser4/super.h @@ -0,0 +1,472 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Super-block functions. See super.c for details. */ + +#if !defined(__REISER4_SUPER_H__) +#define __REISER4_SUPER_H__ + +#include + +#include "tree.h" +#include "entd.h" +#include "wander.h" +#include "fsdata.h" +#include "plugin/object.h" +#include "plugin/space/space_allocator.h" + +/* + * Flush algorithms parameters. + */ +struct flush_params { + unsigned relocate_threshold; + unsigned relocate_distance; + unsigned written_threshold; + unsigned scan_maxnodes; +}; + +typedef enum { + /* + * True if this file system doesn't support hard-links (multiple names) + * for directories: this is default UNIX behavior. + * + * If hard-links on directoires are not allowed, file system is Acyclic + * Directed Graph (modulo dot, and dotdot, of course). + * + * This is used by reiser4_link(). + */ + REISER4_ADG = 0, + /* + * set if all nodes in internal tree have the same node layout plugin. + * If so, znode_guess_plugin() will return tree->node_plugin in stead + * of guessing plugin by plugin id stored in the node. + */ + REISER4_ONE_NODE_PLUGIN = 1, + /* if set, bsd gid assignment is supported. */ + REISER4_BSD_GID = 2, + /* [mac]_time are 32 bit in inode */ + REISER4_32_BIT_TIMES = 3, + /* load all bitmap blocks at mount time */ + REISER4_DONT_LOAD_BITMAP = 5, + /* enforce atomicity during write(2) */ + REISER4_ATOMIC_WRITE = 6, + /* enable issuing of discard requests */ + REISER4_DISCARD = 8, + /* disable hole punching at flush time */ + REISER4_DONT_PUNCH_HOLES = 9 +} reiser4_fs_flag; + +/* + * VFS related operation vectors. + */ +struct object_ops { + struct super_operations super; + struct dentry_operations dentry; + struct export_operations export; +}; + +/* reiser4-specific part of super block + + Locking + + Fields immutable after mount: + + ->oid* + ->space* + ->default_[ug]id + ->mkfs_id + ->trace_flags + ->debug_flags + ->fs_flags + ->df_plug + ->optimal_io_size + ->plug + ->flush + ->u (bad name) + ->txnmgr + ->ra_params + ->fsuid + ->journal_header + ->journal_footer + + Fields protected by ->lnode_guard + + ->lnode_htable + + Fields protected by per-super block spin lock + + ->block_count + ->blocks_used + ->blocks_free + ->blocks_free_committed + ->blocks_grabbed + ->blocks_fake_allocated_unformatted + ->blocks_fake_allocated + ->blocks_flush_reserved + ->eflushed + ->blocknr_hint_default + + After journal replaying during mount, + + ->last_committed_tx + + is protected by ->tmgr.commit_mutex + + Invariants involving this data-type: + + [sb-block-counts] + [sb-grabbed] + [sb-fake-allocated] +*/ +struct reiser4_super_info_data { + /* + * guard spinlock which protects reiser4 super block fields (currently + * blocks_free, blocks_free_committed) + */ + spinlock_t guard; + + /* next oid that will be returned by oid_allocate() */ + oid_t next_to_use; + /* total number of used oids */ + oid_t oids_in_use; + + /* space manager plugin */ + reiser4_space_allocator space_allocator; + + /* transaction model */ + reiser4_txmod_id txmod; + + /* reiser4 internal tree */ + reiser4_tree tree; + + /* + * default user id used for light-weight files without their own + * stat-data. + */ + __u32 default_uid; + + /* + * default group id used for light-weight files without their own + * stat-data. + */ + __u32 default_gid; + + /* mkfs identifier generated at mkfs time. */ + __u32 mkfs_id; + /* amount of blocks in a file system */ + __u64 block_count; + + /* inviolable reserve */ + __u64 blocks_reserved; + + /* amount of blocks used by file system data and meta-data. */ + __u64 blocks_used; + + /* + * amount of free blocks. This is "working" free blocks counter. It is + * like "working" bitmap, please see block_alloc.c for description. + */ + __u64 blocks_free; + + /* + * free block count for fs committed state. This is "commit" version of + * free block counter. + */ + __u64 blocks_free_committed; + + /* + * number of blocks reserved for further allocation, for all + * threads. + */ + __u64 blocks_grabbed; + + /* number of fake allocated unformatted blocks in tree. */ + __u64 blocks_fake_allocated_unformatted; + + /* number of fake allocated formatted blocks in tree. */ + __u64 blocks_fake_allocated; + + /* number of blocks reserved for flush operations. */ + __u64 blocks_flush_reserved; + + /* number of blocks reserved for cluster operations. */ + __u64 blocks_clustered; + + /* unique file-system identifier */ + __u32 fsuid; + + /* On-disk format version. If does not equal to the disk_format + plugin version, some format updates (e.g. enlarging plugin + set, etc) may have place on mount. */ + int version; + + /* file-system wide flags. See reiser4_fs_flag enum */ + unsigned long fs_flags; + + /* transaction manager */ + txn_mgr tmgr; + + /* ent thread */ + entd_context entd; + + /* fake inode used to bind formatted nodes */ + struct inode *fake; + /* inode used to bind bitmaps (and journal heads) */ + struct inode *bitmap; + /* inode used to bind copied on capture nodes */ + struct inode *cc; + + /* disk layout plugin */ + disk_format_plugin *df_plug; + + /* disk layout specific part of reiser4 super info data */ + union { + format40_super_info format40; + } u; + + /* value we return in st_blksize on stat(2) */ + unsigned long optimal_io_size; + + /* parameters for the flush algorithm */ + struct flush_params flush; + + /* pointers to jnodes for journal header and footer */ + jnode *journal_header; + jnode *journal_footer; + + journal_location jloc; + + /* head block number of last committed transaction */ + __u64 last_committed_tx; + + /* + * we remember last written location for using as a hint for new block + * allocation + */ + __u64 blocknr_hint_default; + + /* committed number of files (oid allocator state variable ) */ + __u64 nr_files_committed; + + struct formatted_ra_params ra_params; + + /* + * A mutex for serializing cut tree operation if out-of-free-space: + * the only one cut_tree thread is allowed to grab space from reserved + * area (it is 5% of disk space) + */ + struct mutex delete_mutex; + /* task owning ->delete_mutex */ + struct task_struct *delete_mutex_owner; + + /* Diskmap's blocknumber */ + __u64 diskmap_block; + + /* What to do in case of error */ + int onerror; + + /* operations for objects on this file system */ + struct object_ops ops; + + /* + * structure to maintain d_cursors. See plugin/file_ops_readdir.c for + * more details + */ + struct d_cursor_info d_info; + struct crypto_shash *csum_tfm; + +#ifdef CONFIG_REISER4_BADBLOCKS + /* Alternative master superblock offset (in bytes) */ + unsigned long altsuper; +#endif + struct repacker *repacker; + struct page *status_page; + struct bio *status_bio; + +#if REISER4_DEBUG + /* + * minimum used blocks value (includes super blocks, bitmap blocks and + * other fs reserved areas), depends on fs format and fs size. + */ + __u64 min_blocks_used; + + /* + * when debugging is on, all jnodes (including znodes, bitmaps, etc.) + * are kept on a list anchored at sbinfo->all_jnodes. This list is + * protected by sbinfo->all_guard spin lock. This lock should be taken + * with _irq modifier, because it is also modified from interrupt + * contexts (by RCU). + */ + spinlock_t all_guard; + /* list of all jnodes */ + struct list_head all_jnodes; +#endif + struct dentry *debugfs_root; +}; + +extern reiser4_super_info_data *get_super_private_nocheck(const struct + super_block * super); + +/* Return reiser4-specific part of super block */ +static inline reiser4_super_info_data *get_super_private(const struct + super_block * super) +{ + assert("nikita-447", super != NULL); + + return (reiser4_super_info_data *) super->s_fs_info; +} + +/* get ent context for the @super */ +static inline entd_context *get_entd_context(struct super_block *super) +{ + return &get_super_private(super)->entd; +} + +/* "Current" super-block: main super block used during current system + call. Reference to this super block is stored in reiser4_context. */ +static inline struct super_block *reiser4_get_current_sb(void) +{ + return get_current_context()->super; +} + +/* Reiser4-specific part of "current" super-block: main super block used + during current system call. Reference to this super block is stored in + reiser4_context. */ +static inline reiser4_super_info_data *get_current_super_private(void) +{ + return get_super_private(reiser4_get_current_sb()); +} + +static inline struct formatted_ra_params *get_current_super_ra_params(void) +{ + return &(get_current_super_private()->ra_params); +} + +/* + * true, if file system on @super is read-only + */ +static inline int rofs_super(struct super_block *super) +{ + return super->s_flags & MS_RDONLY; +} + +/* + * true, if @tree represents read-only file system + */ +static inline int rofs_tree(reiser4_tree * tree) +{ + return rofs_super(tree->super); +} + +/* + * true, if file system where @inode lives on, is read-only + */ +static inline int rofs_inode(struct inode *inode) +{ + return rofs_super(inode->i_sb); +} + +/* + * true, if file system where @node lives on, is read-only + */ +static inline int rofs_jnode(jnode * node) +{ + return rofs_tree(jnode_get_tree(node)); +} + +extern __u64 reiser4_current_block_count(void); + +extern void build_object_ops(struct super_block *super, struct object_ops *ops); + +#define REISER4_SUPER_MAGIC 0x52345362 /* (*(__u32 *)"R4Sb"); */ + +static inline void spin_lock_reiser4_super(reiser4_super_info_data *sbinfo) +{ + spin_lock(&(sbinfo->guard)); +} + +static inline void spin_unlock_reiser4_super(reiser4_super_info_data *sbinfo) +{ + assert_spin_locked(&(sbinfo->guard)); + spin_unlock(&(sbinfo->guard)); +} + +extern __u64 reiser4_flush_reserved(const struct super_block *); +extern int reiser4_is_set(const struct super_block *super, reiser4_fs_flag f); +extern long reiser4_statfs_type(const struct super_block *super); +extern __u64 reiser4_block_count(const struct super_block *super); +extern void reiser4_set_block_count(const struct super_block *super, __u64 nr); +extern __u64 reiser4_data_blocks(const struct super_block *super); +extern void reiser4_set_data_blocks(const struct super_block *super, __u64 nr); +extern __u64 reiser4_free_blocks(const struct super_block *super); +extern void reiser4_set_free_blocks(const struct super_block *super, __u64 nr); +extern __u32 reiser4_mkfs_id(const struct super_block *super); + +extern __u64 reiser4_free_committed_blocks(const struct super_block *super); + +extern __u64 reiser4_grabbed_blocks(const struct super_block *); +extern __u64 reiser4_fake_allocated(const struct super_block *); +extern __u64 reiser4_fake_allocated_unformatted(const struct super_block *); +extern __u64 reiser4_clustered_blocks(const struct super_block *); + +extern long reiser4_reserved_blocks(const struct super_block *super, uid_t uid, + gid_t gid); + +extern reiser4_space_allocator * +reiser4_get_space_allocator(const struct super_block *super); +extern reiser4_oid_allocator * +reiser4_get_oid_allocator(const struct super_block *super); +extern struct inode *reiser4_get_super_fake(const struct super_block *super); +extern struct inode *reiser4_get_cc_fake(const struct super_block *super); +extern struct inode *reiser4_get_bitmap_fake(const struct super_block *super); +extern reiser4_tree *reiser4_get_tree(const struct super_block *super); +extern int is_reiser4_super(const struct super_block *super); + +extern int reiser4_blocknr_is_sane(const reiser4_block_nr * blk); +extern int reiser4_blocknr_is_sane_for(const struct super_block *super, + const reiser4_block_nr * blk); +extern int reiser4_fill_super(struct super_block *s, void *data, int silent); +extern int reiser4_done_super(struct super_block *s); + +/* step of fill super */ +extern int reiser4_init_fs_info(struct super_block *); +extern void reiser4_done_fs_info(struct super_block *); +extern int reiser4_init_super_data(struct super_block *, char *opt_string); +extern int reiser4_init_read_super(struct super_block *, int silent); +extern int reiser4_init_root_inode(struct super_block *); +extern reiser4_plugin *get_default_plugin(pset_member memb); + +/* Maximal possible object id. */ +#define ABSOLUTE_MAX_OID ((oid_t)~0) + +#define OIDS_RESERVED (1 << 16) +int oid_init_allocator(struct super_block *, oid_t nr_files, oid_t next); +oid_t oid_allocate(struct super_block *); +int oid_release(struct super_block *, oid_t); +oid_t oid_next(const struct super_block *); +void oid_count_allocated(void); +void oid_count_released(void); +long oids_used(const struct super_block *); + +#if REISER4_DEBUG +void print_fs_info(const char *prefix, const struct super_block *); +#endif + +extern void destroy_reiser4_cache(struct kmem_cache **); + +extern struct super_operations reiser4_super_operations; +extern struct export_operations reiser4_export_operations; +extern struct dentry_operations reiser4_dentry_operations; + +/* __REISER4_SUPER_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/super_ops.c b/fs/reiser4/super_ops.c new file mode 100644 index 000000000000..80ae510aed91 --- /dev/null +++ b/fs/reiser4/super_ops.c @@ -0,0 +1,783 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "inode.h" +#include "page_cache.h" +#include "ktxnmgrd.h" +#include "flush.h" +#include "safe_link.h" +#include "checksum.h" + +#include +#include +#include +#include +#include +#include +#include + +/* slab cache for inodes */ +static struct kmem_cache *inode_cache; + +static struct dentry *reiser4_debugfs_root = NULL; + +/** + * init_once - constructor for reiser4 inodes + * @cache: cache @obj belongs to + * @obj: inode to be initialized + * + * Initialization function to be called when new page is allocated by reiser4 + * inode cache. It is set on inode cache creation. + */ +static void init_once(void *obj) +{ + struct reiser4_inode_object *info; + + info = obj; + + /* initialize vfs inode */ + inode_init_once(&info->vfs_inode); + + /* + * initialize reiser4 specific part fo inode. + * NOTE-NIKITA add here initializations for locks, list heads, + * etc. that will be added to our private inode part. + */ + INIT_LIST_HEAD(get_readdir_list(&info->vfs_inode)); + init_rwsem(&info->p.conv_sem); + /* init semaphore which is used during inode loading */ + loading_init_once(&info->p); + INIT_RADIX_TREE(jnode_tree_by_reiser4_inode(&info->p), + GFP_ATOMIC); +#if REISER4_DEBUG + info->p.nr_jnodes = 0; +#endif +} + +/** + * init_inodes - create znode cache + * + * Initializes slab cache of inodes. It is part of reiser4 module initialization + */ +static int init_inodes(void) +{ + inode_cache = kmem_cache_create("reiser4_inode", + sizeof(struct reiser4_inode_object), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, init_once); + if (inode_cache == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * done_inodes - delete inode cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +static void done_inodes(void) +{ + destroy_reiser4_cache(&inode_cache); +} + +/** + * reiser4_alloc_inode - alloc_inode of super operations + * @super: super block new inode is allocated for + * + * Allocates new inode, initializes reiser4 specific part of it. + */ +static struct inode *reiser4_alloc_inode(struct super_block *super) +{ + struct reiser4_inode_object *obj; + + assert("nikita-1696", super != NULL); + obj = kmem_cache_alloc(inode_cache, reiser4_ctx_gfp_mask_get()); + if (obj != NULL) { + reiser4_inode *info; + + info = &obj->p; + + info->pset = plugin_set_get_empty(); + info->hset = plugin_set_get_empty(); + info->extmask = 0; + info->locality_id = 0ull; + info->plugin_mask = 0; + info->heir_mask = 0; +#if !REISER4_INO_IS_OID + info->oid_hi = 0; +#endif + reiser4_seal_init(&info->sd_seal, NULL, NULL); + coord_init_invalid(&info->sd_coord, NULL); + info->flags = 0; + spin_lock_init(&info->guard); + /* this deals with info's loading semaphore */ + loading_alloc(info); + info->vroot = UBER_TREE_ADDR; + return &obj->vfs_inode; + } else + return NULL; +} + +/** + * reiser4_destroy_inode - destroy_inode of super operations + * @inode: inode being destroyed + * + * Puts reiser4 specific portion of inode, frees memory occupied by inode. + */ +static void reiser4_destroy_inode(struct inode *inode) +{ + reiser4_inode *info; + + info = reiser4_inode_data(inode); + + assert("vs-1220", inode_has_no_jnodes(info)); + + if (!is_bad_inode(inode) && is_inode_loaded(inode)) { + file_plugin *fplug = inode_file_plugin(inode); + if (fplug->destroy_inode != NULL) + fplug->destroy_inode(inode); + } + reiser4_dispose_cursors(inode); + if (info->pset) + plugin_set_put(info->pset); + if (info->hset) + plugin_set_put(info->hset); + + /* + * cannot add similar assertion about ->i_list as prune_icache return + * inode into slab with dangling ->list.{next,prev}. This is safe, + * because they are re-initialized in the new_inode(). + */ + assert("nikita-2895", hlist_empty(&inode->i_dentry)); + assert("nikita-2896", hlist_unhashed(&inode->i_hash)); + assert("nikita-2898", list_empty_careful(get_readdir_list(inode))); + + /* this deals with info's loading semaphore */ + loading_destroy(info); + + kmem_cache_free(inode_cache, + container_of(info, struct reiser4_inode_object, p)); +} + +/** + * reiser4_dirty_inode - dirty_inode of super operations + * @inode: inode being dirtied + * + * Updates stat data. + */ +static void reiser4_dirty_inode(struct inode *inode, int flags) +{ + int result; + reiser4_context *ctx; + + if (!is_in_reiser4_context()) + return; + assert("edward-1606", !IS_RDONLY(inode)); + assert("edward-1607", + (inode_file_plugin(inode)->estimate.update(inode) <= + get_current_context()->grabbed_blocks)); + + ctx = get_current_context(); + if (ctx->locked_page) + unlock_page(ctx->locked_page); + + result = reiser4_update_sd(inode); + + if (ctx->locked_page) + lock_page(ctx->locked_page); + if (result) + warning("edward-1605", "failed to dirty inode for %llu: %d", + get_inode_oid(inode), result); +} + +/** + * ->evict_inode() of super operations + * @inode: inode to delete + * + * Calls file plugin's delete_object method to delete object items from + * filesystem tree and calls clear_inode(). + */ +static void reiser4_evict_inode(struct inode *inode) +{ + reiser4_context *ctx; + file_plugin *fplug; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + warning("vs-15", "failed to init context"); + return; + } + + if (inode->i_nlink == 0 && is_inode_loaded(inode)) { + fplug = inode_file_plugin(inode); + if (fplug != NULL && fplug->delete_object != NULL) + fplug->delete_object(inode); + } + + truncate_inode_pages_final(&inode->i_data); + inode->i_blocks = 0; + clear_inode(inode); + reiser4_exit_context(ctx); +} + +/** + * reiser4_put_super - put_super of super operations + * @super: super block to free + * + * Stops daemons, release resources, umounts in short. + */ +static void reiser4_put_super(struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + reiser4_context *ctx; + + sbinfo = get_super_private(super); + assert("vs-1699", sbinfo); + + debugfs_remove(sbinfo->tmgr.debugfs_atom_count); + debugfs_remove(sbinfo->tmgr.debugfs_id_count); + debugfs_remove(sbinfo->debugfs_root); + + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) { + warning("vs-17", "failed to init context"); + return; + } + + /* have disk format plugin to free its resources */ + if (get_super_private(super)->df_plug->release) + get_super_private(super)->df_plug->release(super); + + reiser4_done_formatted_fake(super); + reiser4_done_csum_tfm(sbinfo->csum_tfm); + + /* stop daemons: ktxnmgr and entd */ + reiser4_done_entd(super); + reiser4_done_ktxnmgrd(super); + reiser4_done_txnmgr(&sbinfo->tmgr); + + assert("edward-1890", list_empty(&get_super_private(super)->all_jnodes)); + assert("edward-1891", get_current_context()->trans->atom == NULL); + reiser4_check_block_counters(super); + + reiser4_exit_context(ctx); + reiser4_done_fs_info(super); +} + +/** + * reiser4_statfs - statfs of super operations + * @super: super block of file system in queried + * @stafs: buffer to fill with statistics + * + * Returns information about filesystem. + */ +static int reiser4_statfs(struct dentry *dentry, struct kstatfs *statfs) +{ + sector_t total; + sector_t reserved; + sector_t free; + sector_t forroot; + sector_t deleted; + reiser4_context *ctx; + struct super_block *super = dentry->d_sb; + + assert("nikita-408", super != NULL); + assert("nikita-409", statfs != NULL); + + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + statfs->f_type = reiser4_statfs_type(super); + statfs->f_bsize = super->s_blocksize; + + /* + * 5% of total block space is reserved. This is needed for flush and + * for truncates (so that we are able to perform truncate/unlink even + * on the otherwise completely full file system). If this reservation + * is hidden from statfs(2), users will mistakenly guess that they + * have enough free space to complete some operation, which is + * frustrating. + * + * Another possible solution is to subtract ->blocks_reserved from + * ->f_bfree, but changing available space seems less intrusive than + * letting user to see 5% of disk space to be used directly after + * mkfs. + */ + total = reiser4_block_count(super); + reserved = get_super_private(super)->blocks_reserved; + deleted = txnmgr_count_deleted_blocks(); + free = reiser4_free_blocks(super) + deleted; + forroot = reiser4_reserved_blocks(super, 0, 0); + + /* + * These counters may be in inconsistent state because we take the + * values without keeping any global spinlock. Here we do a sanity + * check that free block counter does not exceed the number of all + * blocks. + */ + if (free > total) + free = total; + statfs->f_blocks = total - reserved; + /* make sure statfs->f_bfree is never larger than statfs->f_blocks */ + if (free > reserved) + free -= reserved; + else + free = 0; + statfs->f_bfree = free; + + if (free > forroot) + free -= forroot; + else + free = 0; + statfs->f_bavail = free; + + statfs->f_files = 0; + statfs->f_ffree = 0; + + /* maximal acceptable name length depends on directory plugin. */ + assert("nikita-3351", super->s_root->d_inode != NULL); + statfs->f_namelen = reiser4_max_filename_len(super->s_root->d_inode); + reiser4_exit_context(ctx); + return 0; +} + +/** + * reiser4_writeback_inodes - writeback_inodes of super operations + * @super: + * @wb: + * @wbc: + * + * This method is called by background and non-backgound writeback. + * Reiser4's implementation uses generic_writeback_sb_inodes to call + * reiser4_writepages_dispatch for each of dirty inodes. + * reiser4_writepages_dispatch handles pages dirtied via shared + * mapping - dirty pages get into atoms. Writeout is called to flush + * some atoms. + */ +static long reiser4_writeback_inodes(struct super_block *super, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all) +{ + long result; + reiser4_context *ctx; + + if (wbc->for_kupdate) + /* reiser4 has its own means of periodical write-out */ + goto skip; + + spin_unlock(&wb->list_lock); + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) { + warning("vs-13", "failed to init context"); + spin_lock(&wb->list_lock); + goto skip; + } + /* + * call reiser4_writepages for each of dirty inodes to turn + * dirty pages into transactions if they were not yet. + */ + spin_lock(&wb->list_lock); + result = generic_writeback_sb_inodes(super, wb, wbc, work, flush_all); + spin_unlock(&wb->list_lock); + + if (result <= 0) + goto exit; + wbc->nr_to_write = result; + + /* flush goes here */ + reiser4_writeout(super, wbc); + exit: + /* avoid recursive calls to ->writeback_inodes */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + spin_lock(&wb->list_lock); + + return result; + skip: + writeback_skip_sb_inodes(super, wb); + return 0; +} + +/* ->sync_fs() of super operations */ +static int reiser4_sync_fs(struct super_block *super, int wait) +{ + reiser4_context *ctx; + struct bdi_writeback *wb; + struct wb_writeback_work work = { + .sb = super, + .sync_mode = WB_SYNC_ALL, + .range_cyclic = 0, + .nr_pages = LONG_MAX, + .reason = WB_REASON_SYNC, + .for_sync = 1, + }; + struct writeback_control wbc = { + .sync_mode = work.sync_mode, + .range_cyclic = work.range_cyclic, + .range_start = 0, + .range_end = LLONG_MAX, + }; + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) { + warning("edward-1567", "failed to init context"); + return PTR_ERR(ctx); + } + /* + * We don't capture superblock here. + * Superblock is captured only by operations, which change + * its fields different from free_blocks, nr_files, next_oid. + * After system crash the mentioned fields are recovered from + * journal records, see reiser4_journal_recover_sb_data(). + * Also superblock is captured at final commit when releasing + * disk format. + */ + wb = &inode_to_bdi(reiser4_get_super_fake(super))->wb; + spin_lock(&wb->list_lock); + generic_writeback_sb_inodes(super, wb, &wbc, &work, true); + spin_unlock(&wb->list_lock); + wbc.nr_to_write = LONG_MAX; + /* + * (flush goes here) + * commit all transactions + */ + reiser4_writeout(super, &wbc); + + reiser4_exit_context(ctx); + return 0; +} + +static int reiser4_remount(struct super_block *s, int *mount_flags, char *arg) +{ + sync_filesystem(s); + return 0; +} + +/** + * reiser4_show_options - show_options of super operations + * @m: file where to write information + * @mnt: mount structure + * + * Makes reiser4 mount options visible in /proc/mounts. + */ +static int reiser4_show_options(struct seq_file *m, struct dentry *dentry) +{ + struct super_block *super; + reiser4_super_info_data *sbinfo; + + super = dentry->d_sb; + sbinfo = get_super_private(super); + + seq_printf(m, ",atom_max_size=0x%x", sbinfo->tmgr.atom_max_size); + seq_printf(m, ",atom_max_age=0x%x", sbinfo->tmgr.atom_max_age); + seq_printf(m, ",atom_min_size=0x%x", sbinfo->tmgr.atom_min_size); + seq_printf(m, ",atom_max_flushers=0x%x", + sbinfo->tmgr.atom_max_flushers); + seq_printf(m, ",cbk_cache_slots=0x%x", + sbinfo->tree.cbk_cache.nr_slots); + + return 0; +} + +struct super_operations reiser4_super_operations = { + .alloc_inode = reiser4_alloc_inode, + .destroy_inode = reiser4_destroy_inode, + .dirty_inode = reiser4_dirty_inode, + .evict_inode = reiser4_evict_inode, + .put_super = reiser4_put_super, + .sync_fs = reiser4_sync_fs, + .statfs = reiser4_statfs, + .remount_fs = reiser4_remount, + .writeback_inodes = reiser4_writeback_inodes, + .show_options = reiser4_show_options +}; + +/** + * fill_super - initialize super block on mount + * @super: super block to fill + * @data: reiser4 specific mount option + * @silent: + * + * This is to be called by reiser4_get_sb. Mounts filesystem. + */ +static int fill_super(struct super_block *super, void *data, int silent) +{ + reiser4_context ctx; + int result; + reiser4_super_info_data *sbinfo; + + assert("zam-989", super != NULL); + + super->s_op = NULL; + init_stack_context(&ctx, super); + + /* allocate reiser4 specific super block */ + if ((result = reiser4_init_fs_info(super)) != 0) + goto failed_init_sinfo; + + sbinfo = get_super_private(super); + + if ((result = reiser4_init_csum_tfm(&sbinfo->csum_tfm)) != 0) + goto failed_init_csum_tfm; + + /* initialize various reiser4 parameters, parse mount options */ + if ((result = reiser4_init_super_data(super, data)) != 0) + goto failed_init_super_data; + + /* read reiser4 master super block, initialize disk format plugin */ + if ((result = reiser4_init_read_super(super, silent)) != 0) + goto failed_init_read_super; + + /* initialize transaction manager */ + reiser4_init_txnmgr(&sbinfo->tmgr); + + /* initialize ktxnmgrd context and start kernel thread ktxnmrgd */ + if ((result = reiser4_init_ktxnmgrd(super)) != 0) + goto failed_init_ktxnmgrd; + + /* initialize entd context and start kernel thread entd */ + if ((result = reiser4_init_entd(super)) != 0) + goto failed_init_entd; + + /* initialize address spaces for formatted nodes and bitmaps */ + if ((result = reiser4_init_formatted_fake(super)) != 0) + goto failed_init_formatted_fake; + + /* initialize disk format plugin */ + if ((result = get_super_private(super)->df_plug->init_format(super, + data)) != 0) + goto failed_init_disk_format; + + /* + * There are some 'committed' versions of reiser4 super block counters, + * which correspond to reiser4 on-disk state. These counters are + * initialized here + */ + sbinfo->blocks_free_committed = sbinfo->blocks_free; + sbinfo->nr_files_committed = oids_used(super); + + /* get inode of root directory */ + if ((result = reiser4_init_root_inode(super)) != 0) + goto failed_init_root_inode; + + if ((result = get_super_private(super)->df_plug->version_update(super)) != 0) + goto failed_update_format_version; + + process_safelinks(super); + reiser4_exit_context(&ctx); + + sbinfo->debugfs_root = debugfs_create_dir(super->s_id, + reiser4_debugfs_root); + if (sbinfo->debugfs_root) { + sbinfo->tmgr.debugfs_atom_count = + debugfs_create_u32("atom_count", S_IFREG|S_IRUSR, + sbinfo->debugfs_root, + &sbinfo->tmgr.atom_count); + sbinfo->tmgr.debugfs_id_count = + debugfs_create_u32("id_count", S_IFREG|S_IRUSR, + sbinfo->debugfs_root, + &sbinfo->tmgr.id_count); + } + printk("reiser4: %s: using %s.\n", super->s_id, + txmod_plugin_by_id(sbinfo->txmod)->h.desc); + return 0; + + failed_update_format_version: + failed_init_root_inode: + if (sbinfo->df_plug->release) + sbinfo->df_plug->release(super); + failed_init_disk_format: + reiser4_done_formatted_fake(super); + failed_init_formatted_fake: + reiser4_done_entd(super); + failed_init_entd: + reiser4_done_ktxnmgrd(super); + failed_init_ktxnmgrd: + reiser4_done_txnmgr(&sbinfo->tmgr); + failed_init_read_super: + failed_init_super_data: + failed_init_csum_tfm: + reiser4_done_fs_info(super); + failed_init_sinfo: + reiser4_exit_context(&ctx); + return result; +} + +/** + * reiser4_mount - mount of file_system_type operations + * @fs_type: + * @flags: mount flags MS_RDONLY, MS_VERBOSE, etc + * @dev_name: block device file name + * @data: specific mount options + * + * Reiser4 mount entry. + */ +static struct dentry *reiser4_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, fill_super); +} + +/* structure describing the reiser4 filesystem implementation */ +static struct file_system_type reiser4_fs_type = { + .owner = THIS_MODULE, + .name = "reiser4", + .fs_flags = FS_REQUIRES_DEV, + .mount = reiser4_mount, + .kill_sb = kill_block_super, + .next = NULL +}; + +void destroy_reiser4_cache(struct kmem_cache **cachep) +{ + BUG_ON(*cachep == NULL); + kmem_cache_destroy(*cachep); + *cachep = NULL; +} + +/** + * init_reiser4 - reiser4 initialization entry point + * + * Initializes reiser4 slabs, registers reiser4 filesystem type. It is called + * on kernel initialization or during reiser4 module load. + */ +static int __init init_reiser4(void) +{ + int result; + + printk(KERN_INFO + "Loading Reiser4 (format release: 4.%d.%d) " + "See www.namesys.com for a description of Reiser4.\n", + get_release_number_major(), + get_release_number_minor()); + + /* initialize slab cache of inodes */ + if ((result = init_inodes()) != 0) + goto failed_inode_cache; + + /* initialize cache of znodes */ + if ((result = init_znodes()) != 0) + goto failed_init_znodes; + + /* initialize all plugins */ + if ((result = init_plugins()) != 0) + goto failed_init_plugins; + + /* initialize cache of plugin_set-s and plugin_set's hash table */ + if ((result = init_plugin_set()) != 0) + goto failed_init_plugin_set; + + /* initialize caches of txn_atom-s and txn_handle-s */ + if ((result = init_txnmgr_static()) != 0) + goto failed_init_txnmgr_static; + + /* initialize cache of jnodes */ + if ((result = init_jnodes()) != 0) + goto failed_init_jnodes; + + /* initialize cache of flush queues */ + if ((result = reiser4_init_fqs()) != 0) + goto failed_init_fqs; + + /* initialize cache of structures attached to dentry->d_fsdata */ + if ((result = reiser4_init_dentry_fsdata()) != 0) + goto failed_init_dentry_fsdata; + + /* initialize cache of structures attached to file->private_data */ + if ((result = reiser4_init_file_fsdata()) != 0) + goto failed_init_file_fsdata; + + /* + * initialize cache of d_cursors. See plugin/file_ops_readdir.c for + * more details + */ + if ((result = reiser4_init_d_cursor()) != 0) + goto failed_init_d_cursor; + + /* initialize cache of blocknr set entries */ + if ((result = blocknr_set_init_static()) != 0) + goto failed_init_blocknr_set; + + /* initialize cache of blocknr list entries */ + if ((result = blocknr_list_init_static()) != 0) + goto failed_init_blocknr_list; + + if ((result = register_filesystem(&reiser4_fs_type)) == 0) { + reiser4_debugfs_root = debugfs_create_dir("reiser4", NULL); + return 0; + } + + blocknr_list_done_static(); + failed_init_blocknr_list: + blocknr_set_done_static(); + failed_init_blocknr_set: + reiser4_done_d_cursor(); + failed_init_d_cursor: + reiser4_done_file_fsdata(); + failed_init_file_fsdata: + reiser4_done_dentry_fsdata(); + failed_init_dentry_fsdata: + reiser4_done_fqs(); + failed_init_fqs: + done_jnodes(); + failed_init_jnodes: + done_txnmgr_static(); + failed_init_txnmgr_static: + done_plugin_set(); + failed_init_plugin_set: + failed_init_plugins: + done_znodes(); + failed_init_znodes: + done_inodes(); + failed_inode_cache: + return result; +} + +/** + * done_reiser4 - reiser4 exit entry point + * + * Unregister reiser4 filesystem type, deletes caches. It is called on shutdown + * or at module unload. + */ +static void __exit done_reiser4(void) +{ + int result; + + debugfs_remove(reiser4_debugfs_root); + result = unregister_filesystem(&reiser4_fs_type); + BUG_ON(result != 0); + blocknr_list_done_static(); + blocknr_set_done_static(); + reiser4_done_d_cursor(); + reiser4_done_file_fsdata(); + reiser4_done_dentry_fsdata(); + reiser4_done_fqs(); + done_jnodes(); + done_txnmgr_static(); + done_plugin_set(); + done_znodes(); + destroy_reiser4_cache(&inode_cache); +} + +module_init(init_reiser4); +module_exit(done_reiser4); + +MODULE_ALIAS_FS("reiser4"); + +MODULE_DESCRIPTION("Reiser4 filesystem"); +MODULE_AUTHOR("Hans Reiser "); + +MODULE_LICENSE("GPL"); + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/tap.c b/fs/reiser4/tap.c new file mode 100644 index 000000000000..1234188c3871 --- /dev/null +++ b/fs/reiser4/tap.c @@ -0,0 +1,376 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + Tree Access Pointer (tap). + + tap is data structure combining coord and lock handle (mostly). It is + useful when one has to scan tree nodes (for example, in readdir, or flush), + for tap functions allow to move tap in either direction transparently + crossing unit/item/node borders. + + Tap doesn't provide automatic synchronization of its fields as it is + supposed to be per-thread object. +*/ + +#include "forward.h" +#include "debug.h" +#include "coord.h" +#include "tree.h" +#include "context.h" +#include "tap.h" +#include "znode.h" +#include "tree_walk.h" + +#if REISER4_DEBUG +static int tap_invariant(const tap_t *tap); +static void tap_check(const tap_t *tap); +#else +#define tap_check(tap) noop +#endif + +/** load node tap is pointing to, if not loaded already */ +int reiser4_tap_load(tap_t *tap) +{ + tap_check(tap); + if (tap->loaded == 0) { + int result; + + result = zload_ra(tap->coord->node, &tap->ra_info); + if (result != 0) + return result; + coord_clear_iplug(tap->coord); + } + ++tap->loaded; + tap_check(tap); + return 0; +} + +/** release node tap is pointing to. Dual to tap_load() */ +void reiser4_tap_relse(tap_t *tap) +{ + tap_check(tap); + if (tap->loaded > 0) { + --tap->loaded; + if (tap->loaded == 0) + zrelse(tap->coord->node); + } + tap_check(tap); +} + +/** + * init tap to consist of @coord and @lh. Locks on nodes will be acquired with + * @mode + */ +void reiser4_tap_init(tap_t *tap, coord_t *coord, lock_handle * lh, + znode_lock_mode mode) +{ + tap->coord = coord; + tap->lh = lh; + tap->mode = mode; + tap->loaded = 0; + INIT_LIST_HEAD(&tap->linkage); + reiser4_init_ra_info(&tap->ra_info); +} + +/** add @tap to the per-thread list of all taps */ +void reiser4_tap_monitor(tap_t *tap) +{ + assert("nikita-2623", tap != NULL); + tap_check(tap); + list_add(&tap->linkage, reiser4_taps_list()); + tap_check(tap); +} + +/* duplicate @src into @dst. Copy lock handle. @dst is not initially + * loaded. */ +void reiser4_tap_copy(tap_t *dst, tap_t *src) +{ + assert("nikita-3193", src != NULL); + assert("nikita-3194", dst != NULL); + + *dst->coord = *src->coord; + if (src->lh->node) + copy_lh(dst->lh, src->lh); + dst->mode = src->mode; + dst->loaded = 0; + INIT_LIST_HEAD(&dst->linkage); + dst->ra_info = src->ra_info; +} + +/** finish with @tap */ +void reiser4_tap_done(tap_t *tap) +{ + assert("nikita-2565", tap != NULL); + tap_check(tap); + if (tap->loaded > 0) + zrelse(tap->coord->node); + done_lh(tap->lh); + tap->loaded = 0; + list_del_init(&tap->linkage); + tap->coord->node = NULL; +} + +/** + * move @tap to the new node, locked with @target. Load @target, if @tap was + * already loaded. + */ +int reiser4_tap_move(tap_t *tap, lock_handle * target) +{ + int result = 0; + + assert("nikita-2567", tap != NULL); + assert("nikita-2568", target != NULL); + assert("nikita-2570", target->node != NULL); + assert("nikita-2569", tap->coord->node == tap->lh->node); + + tap_check(tap); + if (tap->loaded > 0) + result = zload_ra(target->node, &tap->ra_info); + + if (result == 0) { + if (tap->loaded > 0) + zrelse(tap->coord->node); + done_lh(tap->lh); + copy_lh(tap->lh, target); + tap->coord->node = target->node; + coord_clear_iplug(tap->coord); + } + tap_check(tap); + return result; +} + +/** + * move @tap to @target. Acquire lock on @target, if @tap was already + * loaded. + */ +static int tap_to(tap_t *tap, znode * target) +{ + int result; + + assert("nikita-2624", tap != NULL); + assert("nikita-2625", target != NULL); + + tap_check(tap); + result = 0; + if (tap->coord->node != target) { + lock_handle here; + + init_lh(&here); + result = longterm_lock_znode(&here, target, + tap->mode, ZNODE_LOCK_HIPRI); + if (result == 0) { + result = reiser4_tap_move(tap, &here); + done_lh(&here); + } + } + tap_check(tap); + return result; +} + +/** + * move @tap to given @target, loading and locking @target->node if + * necessary + */ +int tap_to_coord(tap_t *tap, coord_t *target) +{ + int result; + + tap_check(tap); + result = tap_to(tap, target->node); + if (result == 0) + coord_dup(tap->coord, target); + tap_check(tap); + return result; +} + +/** return list of all taps */ +struct list_head *reiser4_taps_list(void) +{ + return &get_current_context()->taps; +} + +/** helper function for go_{next,prev}_{item,unit,node}() */ +int go_dir_el(tap_t *tap, sideof dir, int units_p) +{ + coord_t dup; + coord_t *coord; + int result; + + int (*coord_dir) (coord_t *); + int (*get_dir_neighbor) (lock_handle *, znode *, int, int); + void (*coord_init) (coord_t *, const znode *); + ON_DEBUG(int (*coord_check) (const coord_t *)); + + assert("nikita-2556", tap != NULL); + assert("nikita-2557", tap->coord != NULL); + assert("nikita-2558", tap->lh != NULL); + assert("nikita-2559", tap->coord->node != NULL); + + tap_check(tap); + if (dir == LEFT_SIDE) { + coord_dir = units_p ? coord_prev_unit : coord_prev_item; + get_dir_neighbor = reiser4_get_left_neighbor; + coord_init = coord_init_last_unit; + } else { + coord_dir = units_p ? coord_next_unit : coord_next_item; + get_dir_neighbor = reiser4_get_right_neighbor; + coord_init = coord_init_first_unit; + } + ON_DEBUG(coord_check = + units_p ? coord_is_existing_unit : coord_is_existing_item); + assert("nikita-2560", coord_check(tap->coord)); + + coord = tap->coord; + coord_dup(&dup, coord); + if (coord_dir(&dup) != 0) { + do { + /* move to the left neighboring node */ + lock_handle dup; + + init_lh(&dup); + result = + get_dir_neighbor(&dup, coord->node, (int)tap->mode, + GN_CAN_USE_UPPER_LEVELS); + if (result == 0) { + result = reiser4_tap_move(tap, &dup); + if (result == 0) + coord_init(tap->coord, dup.node); + done_lh(&dup); + } + /* skip empty nodes */ + } while ((result == 0) && node_is_empty(coord->node)); + } else { + result = 0; + coord_dup(coord, &dup); + } + assert("nikita-2564", ergo(!result, coord_check(tap->coord))); + tap_check(tap); + return result; +} + +/** + * move @tap to the next unit, transparently crossing item and node + * boundaries + */ +int go_next_unit(tap_t *tap) +{ + return go_dir_el(tap, RIGHT_SIDE, 1); +} + +/** + * move @tap to the previous unit, transparently crossing item and node + * boundaries + */ +int go_prev_unit(tap_t *tap) +{ + return go_dir_el(tap, LEFT_SIDE, 1); +} + +/** + * @shift times apply @actor to the @tap. This is used to move @tap by + * @shift units (or items, or nodes) in either direction. + */ +static int rewind_to(tap_t *tap, go_actor_t actor, int shift) +{ + int result; + + assert("nikita-2555", shift >= 0); + assert("nikita-2562", tap->coord->node == tap->lh->node); + + tap_check(tap); + result = reiser4_tap_load(tap); + if (result != 0) + return result; + + for (; shift > 0; --shift) { + result = actor(tap); + assert("nikita-2563", tap->coord->node == tap->lh->node); + if (result != 0) + break; + } + reiser4_tap_relse(tap); + tap_check(tap); + return result; +} + +/** move @tap @shift units rightward */ +int rewind_right(tap_t *tap, int shift) +{ + return rewind_to(tap, go_next_unit, shift); +} + +/** move @tap @shift units leftward */ +int rewind_left(tap_t *tap, int shift) +{ + return rewind_to(tap, go_prev_unit, shift); +} + +#if REISER4_DEBUG +/** debugging function: print @tap content in human readable form */ +static void print_tap(const char *prefix, const tap_t *tap) +{ + if (tap == NULL) { + printk("%s: null tap\n", prefix); + return; + } + printk("%s: loaded: %i, in-list: %i, node: %p, mode: %s\n", prefix, + tap->loaded, (&tap->linkage == tap->linkage.next && + &tap->linkage == tap->linkage.prev), + tap->lh->node, + lock_mode_name(tap->mode)); + print_coord("\tcoord", tap->coord, 0); +} + +/** check [tap-sane] invariant */ +static int tap_invariant(const tap_t *tap) +{ + /* [tap-sane] invariant */ + + if (tap == NULL) + return 1; + /* tap->mode is one of + * + * {ZNODE_NO_LOCK, ZNODE_READ_LOCK, ZNODE_WRITE_LOCK}, and + */ + if (tap->mode != ZNODE_NO_LOCK && + tap->mode != ZNODE_READ_LOCK && tap->mode != ZNODE_WRITE_LOCK) + return 2; + /* tap->coord != NULL, and */ + if (tap->coord == NULL) + return 3; + /* tap->lh != NULL, and */ + if (tap->lh == NULL) + return 4; + /* tap->loaded > 0 => znode_is_loaded(tap->coord->node), and */ + if (!ergo(tap->loaded, znode_is_loaded(tap->coord->node))) + return 5; + /* tap->coord->node == tap->lh->node if tap->lh->node is not 0 */ + if (tap->lh->node != NULL && tap->coord->node != tap->lh->node) + return 6; + return 0; +} + +/** debugging function: check internal @tap consistency */ +static void tap_check(const tap_t *tap) +{ + int result; + + result = tap_invariant(tap); + if (result != 0) { + print_tap("broken", tap); + reiser4_panic("nikita-2831", "tap broken: %i\n", result); + } +} +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tap.h b/fs/reiser4/tap.h new file mode 100644 index 000000000000..f777d4d4540a --- /dev/null +++ b/fs/reiser4/tap.h @@ -0,0 +1,70 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Tree Access Pointers. See tap.c for more details. */ + +#if !defined(__REISER4_TAP_H__) +#define __REISER4_TAP_H__ + +#include "forward.h" +#include "readahead.h" + +/** + tree_access_pointer aka tap. Data structure combining coord_t and lock + handle. + Invariants involving this data-type, see doc/lock-ordering for details: + + [tap-sane] + */ +struct tree_access_pointer { + /* coord tap is at */ + coord_t *coord; + /* lock handle on ->coord->node */ + lock_handle *lh; + /* mode of lock acquired by this tap */ + znode_lock_mode mode; + /* incremented by reiser4_tap_load(). + Decremented by reiser4_tap_relse(). */ + int loaded; + /* list of taps */ + struct list_head linkage; + /* read-ahead hint */ + ra_info_t ra_info; +}; + +typedef int (*go_actor_t) (tap_t *tap); + +extern int reiser4_tap_load(tap_t *tap); +extern void reiser4_tap_relse(tap_t *tap); +extern void reiser4_tap_init(tap_t *tap, coord_t *coord, lock_handle * lh, + znode_lock_mode mode); +extern void reiser4_tap_monitor(tap_t *tap); +extern void reiser4_tap_copy(tap_t *dst, tap_t *src); +extern void reiser4_tap_done(tap_t *tap); +extern int reiser4_tap_move(tap_t *tap, lock_handle * target); +extern int tap_to_coord(tap_t *tap, coord_t *target); + +extern int go_dir_el(tap_t *tap, sideof dir, int units_p); +extern int go_next_unit(tap_t *tap); +extern int go_prev_unit(tap_t *tap); +extern int rewind_right(tap_t *tap, int shift); +extern int rewind_left(tap_t *tap, int shift); + +extern struct list_head *reiser4_taps_list(void); + +#define for_all_taps(tap) \ + for (tap = list_entry(reiser4_taps_list()->next, tap_t, linkage); \ + reiser4_taps_list() != &tap->linkage; \ + tap = list_entry(tap->linkage.next, tap_t, linkage)) + +/* __REISER4_TAP_H__ */ +#endif +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree.c b/fs/reiser4/tree.c new file mode 100644 index 000000000000..c8d2e4665f87 --- /dev/null +++ b/fs/reiser4/tree.c @@ -0,0 +1,1884 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * KEYS IN A TREE. + * + * The tree consists of nodes located on the disk. Node in the tree is either + * formatted or unformatted. Formatted node is one that has structure + * understood by the tree balancing and traversal code. Formatted nodes are + * further classified into leaf and internal nodes. Latter distinctions is + * (almost) of only historical importance: general structure of leaves and + * internal nodes is the same in Reiser4. Unformatted nodes contain raw data + * that are part of bodies of ordinary files and attributes. + * + * Each node in the tree spawns some interval in the key space. Key ranges for + * all nodes in the tree are disjoint. Actually, this only holds in some weak + * sense, because of the non-unique keys: intersection of key ranges for + * different nodes is either empty, or consists of exactly one key. + * + * Formatted node consists of a sequence of items. Each item spawns some + * interval in key space. Key ranges for all items in a tree are disjoint, + * modulo non-unique keys again. Items within nodes are ordered in the key + * order of the smallest key in a item. + * + * Particular type of item can be further split into units. Unit is piece of + * item that can be cut from item and moved into another item of the same + * time. Units are used by balancing code to repack data during balancing. + * + * Unit can be further split into smaller entities (for example, extent unit + * represents several pages, and it is natural for extent code to operate on + * particular pages and even bytes within one unit), but this is of no + * relevance to the generic balancing and lookup code. + * + * Although item is said to "spawn" range or interval of keys, it is not + * necessary that item contains piece of data addressable by each and every + * key in this range. For example, compound directory item, consisting of + * units corresponding to directory entries and keyed by hashes of file names, + * looks more as having "discrete spectrum": only some disjoint keys inside + * range occupied by this item really address data. + * + * No than less, each item always has well-defined least (minimal) key, that + * is recorded in item header, stored in the node this item is in. Also, item + * plugin can optionally define method ->max_key_inside() returning maximal + * key that can _possibly_ be located within this item. This method is used + * (mainly) to determine when given piece of data should be merged into + * existing item, in stead of creating new one. Because of this, even though + * ->max_key_inside() can be larger that any key actually located in the item, + * intervals + * + * [ reiser4_min_key( item ), ->max_key_inside( item ) ] + * + * are still disjoint for all items within the _same_ node. + * + * In memory node is represented by znode. It plays several roles: + * + * . something locks are taken on + * + * . something tracked by transaction manager (this is going to change) + * + * . something used to access node data + * + * . something used to maintain tree structure in memory: sibling and + * parental linkage. + * + * . something used to organize nodes into "slums" + * + * More on znodes see in znode.[ch] + * + * DELIMITING KEYS + * + * To simplify balancing, allow some flexibility in locking and speed up + * important coord cache optimization, we keep delimiting keys of nodes in + * memory. Depending on disk format (implemented by appropriate node plugin) + * node on disk can record both left and right delimiting key, only one of + * them, or none. Still, our balancing and tree traversal code keep both + * delimiting keys for a node that is in memory stored in the znode. When + * node is first brought into memory during tree traversal, its left + * delimiting key is taken from its parent, and its right delimiting key is + * either next key in its parent, or is right delimiting key of parent if + * node is the rightmost child of parent. + * + * Physical consistency of delimiting key is protected by special dk + * read-write lock. That is, delimiting keys can only be inspected or + * modified under this lock. But dk lock is only sufficient for fast + * "pessimistic" check, because to simplify code and to decrease lock + * contention, balancing (carry) only updates delimiting keys right before + * unlocking all locked nodes on the given tree level. For example, + * coord-by-key cache scans LRU list of recently accessed znodes. For each + * node it first does fast check under dk spin lock. If key looked for is + * not between delimiting keys for this node, next node is inspected and so + * on. If key is inside of the key range, long term lock is taken on node + * and key range is rechecked. + * + * COORDINATES + * + * To find something in the tree, you supply a key, and the key is resolved + * by coord_by_key() into a coord (coordinate) that is valid as long as the + * node the coord points to remains locked. As mentioned above trees + * consist of nodes that consist of items that consist of units. A unit is + * the smallest and indivisible piece of tree as far as balancing and tree + * search are concerned. Each node, item, and unit can be addressed by + * giving its level in the tree and the key occupied by this entity. A node + * knows what the key ranges are of the items within it, and how to find its + * items and invoke their item handlers, but it does not know how to access + * individual units within its items except through the item handlers. + * coord is a structure containing a pointer to the node, the ordinal number + * of the item within this node (a sort of item offset), and the ordinal + * number of the unit within this item. + * + * TREE LOOKUP + * + * There are two types of access to the tree: lookup and modification. + * + * Lookup is a search for the key in the tree. Search can look for either + * exactly the key given to it, or for the largest key that is not greater + * than the key given to it. This distinction is determined by "bias" + * parameter of search routine (coord_by_key()). coord_by_key() either + * returns error (key is not in the tree, or some kind of external error + * occurred), or successfully resolves key into coord. + * + * This resolution is done by traversing tree top-to-bottom from root level + * to the desired level. On levels above twig level (level one above the + * leaf level) nodes consist exclusively of internal items. Internal item is + * nothing more than pointer to the tree node on the child level. On twig + * level nodes consist of internal items intermixed with extent + * items. Internal items form normal search tree structure used by traversal + * to descent through the tree. + * + * TREE LOOKUP OPTIMIZATIONS + * + * Tree lookup described above is expensive even if all nodes traversed are + * already in the memory: for each node binary search within it has to be + * performed and binary searches are CPU consuming and tend to destroy CPU + * caches. + * + * Several optimizations are used to work around this: + * + * . cbk_cache (look-aside cache for tree traversals, see search.c for + * details) + * + * . seals (see seal.[ch]) + * + * . vroot (see search.c) + * + * General search-by-key is layered thusly: + * + * [check seal, if any] --ok--> done + * | + * failed + * | + * V + * [vroot defined] --no--> node = tree_root + * | | + * yes | + * | | + * V | + * node = vroot | + * | | + * | | + * | | + * V V + * [check cbk_cache for key] --ok--> done + * | + * failed + * | + * V + * [start tree traversal from node] + * + */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/static_stat.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "carry.h" +#include "carry_ops.h" +#include "tap.h" +#include "tree.h" +#include "vfs_ops.h" +#include "page_cache.h" +#include "super.h" +#include "reiser4.h" +#include "inode.h" + +#include /* for struct super_block */ +#include + +/* Disk address (block number) never ever used for any real tree node. This is + used as block number of "uber" znode. + + Invalid block addresses are 0 by tradition. + +*/ +const reiser4_block_nr UBER_TREE_ADDR = 0ull; + +#define CUT_TREE_MIN_ITERATIONS 64 + +static int find_child_by_addr(znode * parent, znode * child, coord_t *result); + +/* return node plugin of coord->node */ +node_plugin *node_plugin_by_coord(const coord_t *coord) +{ + assert("vs-1", coord != NULL); + assert("vs-2", coord->node != NULL); + + return coord->node->nplug; +} + +/* insert item into tree. Fields of @coord are updated so that they can be + * used by consequent insert operation. */ +insert_result insert_by_key(reiser4_tree * tree /* tree to insert new item + * into */ , + const reiser4_key * key /* key of new item */ , + reiser4_item_data * data /* parameters for item + * creation */ , + coord_t *coord /* resulting insertion coord */ , + lock_handle * lh /* resulting lock + * handle */ , + tree_level stop_level /* level where to insert */ , + __u32 flags/* insertion flags */) +{ + int result; + + assert("nikita-358", tree != NULL); + assert("nikita-360", coord != NULL); + + result = coord_by_key(tree, key, coord, lh, ZNODE_WRITE_LOCK, + FIND_EXACT, stop_level, stop_level, + flags | CBK_FOR_INSERT, NULL/*ra_info */); + switch (result) { + default: + break; + case CBK_COORD_FOUND: + result = IBK_ALREADY_EXISTS; + break; + case CBK_COORD_NOTFOUND: + assert("nikita-2017", coord->node != NULL); + result = insert_by_coord(coord, data, key, lh, 0/*flags */); + break; + } + return result; +} + +/* insert item by calling carry. Helper function called if short-cut + insertion failed */ +static insert_result insert_with_carry_by_coord(coord_t *coord, + /* coord where to insert */ + lock_handle * lh, + /* lock handle of insertion node */ + reiser4_item_data * data, + /* parameters of new item */ + const reiser4_key * key, + /* key of new item */ + carry_opcode cop, + /* carry operation to perform */ + cop_insert_flag flags + /* carry flags */ ) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_insert_data *cdata; + carry_op *op; + + assert("umka-314", coord != NULL); + + /* allocate carry_pool and 3 carry_level-s */ + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*cdata)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, cop, coord->node, 0); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + cdata = (carry_insert_data *) (lowest_level + 3); + cdata->coord = coord; + cdata->data = data; + cdata->key = key; + op->u.insert.d = cdata; + if (flags == 0) + flags = znode_get_tree(coord->node)->carry.insert_flags; + op->u.insert.flags = flags; + op->u.insert.type = COPT_ITEM_DATA; + op->u.insert.child = NULL; + if (lh != NULL) { + assert("nikita-3245", lh->node == coord->node); + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + } + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* form carry queue to perform paste of @data with @key at @coord, and launch + its execution by calling carry(). + + Instruct carry to update @lh it after balancing insertion coord moves into + different block. + +*/ +static int paste_with_carry(coord_t *coord, /* coord of paste */ + lock_handle * lh, /* lock handle of node + * where item is + * pasted */ + reiser4_item_data * data, /* parameters of new + * item */ + const reiser4_key * key, /* key of new item */ + unsigned flags/* paste flags */) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_insert_data *cdata; + carry_op *op; + + assert("umka-315", coord != NULL); + assert("umka-316", key != NULL); + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*cdata)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, COP_PASTE, coord->node, 0); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + cdata = (carry_insert_data *) (lowest_level + 3); + cdata->coord = coord; + cdata->data = data; + cdata->key = key; + op->u.paste.d = cdata; + if (flags == 0) + flags = znode_get_tree(coord->node)->carry.paste_flags; + op->u.paste.flags = flags; + op->u.paste.type = COPT_ITEM_DATA; + if (lh != NULL) { + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + } + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* insert item at the given coord. + + First try to skip carry by directly calling ->create_item() method of node + plugin. If this is impossible (there is not enough free space in the node, + or leftmost item in the node is created), call insert_with_carry_by_coord() + that will do full carry(). + +*/ +insert_result insert_by_coord(coord_t *coord /* coord where to + * insert. coord->node has + * to be write locked by + * caller */ , + reiser4_item_data * data /* data to be + * inserted */ , + const reiser4_key * key /* key of new item */ , + lock_handle * lh /* lock handle of write + * lock on node */ , + __u32 flags/* insertion flags */) +{ + unsigned item_size; + int result; + znode *node; + + assert("vs-247", coord != NULL); + assert("vs-248", data != NULL); + assert("vs-249", data->length >= 0); + assert("nikita-1191", znode_is_write_locked(coord->node)); + + node = coord->node; + coord_clear_iplug(coord); + result = zload(node); + if (result != 0) + return result; + + item_size = space_needed(node, NULL, data, 1); + if (item_size > znode_free_space(node) && + (flags & COPI_DONT_SHIFT_LEFT) && (flags & COPI_DONT_SHIFT_RIGHT) + && (flags & COPI_DONT_ALLOCATE)) { + /* we are forced to use free space of coord->node and new item + does not fit into it. + + Currently we get here only when we allocate and copy units + of extent item from a node to its left neighbor during + "squalloc"-ing. If @node (this is left neighbor) does not + have enough free space - we do not want to attempt any + shifting and allocations because we are in squeezing and + everything to the left of @node is tightly packed. + */ + result = -E_NODE_FULL; + } else if ((item_size <= znode_free_space(node)) && + !coord_is_before_leftmost(coord) && + (node_plugin_by_node(node)->fast_insert != NULL) + && node_plugin_by_node(node)->fast_insert(coord)) { + /* shortcut insertion without carry() overhead. + + Only possible if: + + - there is enough free space + + - insertion is not into the leftmost position in a node + (otherwise it would require updating of delimiting key in a + parent) + + - node plugin agrees with this + + */ + result = + node_plugin_by_node(node)->create_item(coord, key, data, + NULL); + znode_make_dirty(node); + } else { + /* otherwise do full-fledged carry(). */ + result = + insert_with_carry_by_coord(coord, lh, data, key, COP_INSERT, + flags); + } + zrelse(node); + return result; +} + +/* @coord is set to leaf level and @data is to be inserted to twig level */ +insert_result +insert_extent_by_coord(coord_t *coord, /* coord where to insert. + * coord->node has to be write + * locked by caller */ + reiser4_item_data *data,/* data to be inserted */ + const reiser4_key *key, /* key of new item */ + lock_handle *lh /* lock handle of write lock + on node */) +{ + assert("vs-405", coord != NULL); + assert("vs-406", data != NULL); + assert("vs-407", data->length > 0); + assert("vs-408", znode_is_write_locked(coord->node)); + assert("vs-409", znode_get_level(coord->node) == LEAF_LEVEL); + + return insert_with_carry_by_coord(coord, lh, data, key, COP_EXTENT, + 0 /*flags */ ); +} + +/* Insert into the item at the given coord. + + First try to skip carry by directly calling ->paste() method of item + plugin. If this is impossible (there is not enough free space in the node, + or we are pasting into leftmost position in the node), call + paste_with_carry() that will do full carry(). + +*/ +/* paste_into_item */ +int insert_into_item(coord_t * coord /* coord of pasting */ , + lock_handle * lh /* lock handle on node involved */ , + const reiser4_key * key /* key of unit being pasted */ , + reiser4_item_data * data /* parameters for new unit */ , + unsigned flags /* insert/paste flags */ ) +{ + int result; + int size_change; + node_plugin *nplug; + item_plugin *iplug; + + assert("umka-317", coord != NULL); + assert("umka-318", key != NULL); + + iplug = item_plugin_by_coord(coord); + nplug = node_plugin_by_coord(coord); + + assert("nikita-1480", iplug == data->iplug); + + size_change = space_needed(coord->node, coord, data, 0); + if (size_change > (int)znode_free_space(coord->node) && + (flags & COPI_DONT_SHIFT_LEFT) && (flags & COPI_DONT_SHIFT_RIGHT) + && (flags & COPI_DONT_ALLOCATE)) { + /* we are forced to use free space of coord->node and new data + does not fit into it. */ + return -E_NODE_FULL; + } + + /* shortcut paste without carry() overhead. + + Only possible if: + + - there is enough free space + + - paste is not into the leftmost unit in a node (otherwise + it would require updating of delimiting key in a parent) + + - node plugin agrees with this + + - item plugin agrees with us + */ + if (size_change <= (int)znode_free_space(coord->node) && + (coord->item_pos != 0 || + coord->unit_pos != 0 || coord->between == AFTER_UNIT) && + coord->unit_pos != 0 && nplug->fast_paste != NULL && + nplug->fast_paste(coord) && + iplug->b.fast_paste != NULL && iplug->b.fast_paste(coord)) { + if (size_change > 0) + nplug->change_item_size(coord, size_change); + /* NOTE-NIKITA: huh? where @key is used? */ + result = iplug->b.paste(coord, data, NULL); + if (size_change < 0) + nplug->change_item_size(coord, size_change); + znode_make_dirty(coord->node); + } else + /* otherwise do full-fledged carry(). */ + result = paste_with_carry(coord, lh, data, key, flags); + return result; +} + +/* this either appends or truncates item @coord */ +int reiser4_resize_item(coord_t * coord /* coord of item being resized */ , + reiser4_item_data * data /* parameters of resize */ , + reiser4_key * key /* key of new unit */ , + lock_handle * lh /* lock handle of node + * being modified */ , + cop_insert_flag flags /* carry flags */ ) +{ + int result; + znode *node; + + assert("nikita-362", coord != NULL); + assert("nikita-363", data != NULL); + assert("vs-245", data->length != 0); + + node = coord->node; + coord_clear_iplug(coord); + result = zload(node); + if (result != 0) + return result; + + if (data->length < 0) + result = node_plugin_by_coord(coord)->shrink_item(coord, + -data->length); + else + result = insert_into_item(coord, lh, key, data, flags); + + zrelse(node); + return result; +} + +/* insert flow @f */ +int reiser4_insert_flow(coord_t * coord, lock_handle * lh, flow_t * f) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + reiser4_item_data *data; + carry_op *op; + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*data)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, COP_INSERT_FLOW, coord->node, + 0 /* operate directly on coord -> node */ ); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + + /* these are permanent during insert_flow */ + data = (reiser4_item_data *) (lowest_level + 3); + data->user = 1; + data->iplug = item_plugin_by_id(FORMATTING_ID); + data->arg = NULL; + /* data.length and data.data will be set before calling paste or + insert */ + data->length = 0; + data->data = NULL; + + op->u.insert_flow.flags = 0; + op->u.insert_flow.insert_point = coord; + op->u.insert_flow.flow = f; + op->u.insert_flow.data = data; + op->u.insert_flow.new_nodes = 0; + + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* Given a coord in parent node, obtain a znode for the corresponding child */ +znode *child_znode(const coord_t * parent_coord /* coord of pointer to + * child */ , + znode * parent /* parent of child */ , + int incore_p /* if !0 only return child if already in + * memory */ , + int setup_dkeys_p /* if !0 update delimiting keys of + * child */ ) +{ + znode *child; + + assert("nikita-1374", parent_coord != NULL); + assert("nikita-1482", parent != NULL); +#if REISER4_DEBUG + if (setup_dkeys_p) + assert_rw_not_locked(&(znode_get_tree(parent)->dk_lock)); +#endif + assert("nikita-2947", znode_is_any_locked(parent)); + + if (znode_get_level(parent) <= LEAF_LEVEL) { + /* trying to get child of leaf node */ + warning("nikita-1217", "Child of maize?"); + return ERR_PTR(RETERR(-EIO)); + } + if (item_is_internal(parent_coord)) { + reiser4_block_nr addr; + item_plugin *iplug; + reiser4_tree *tree; + + iplug = item_plugin_by_coord(parent_coord); + assert("vs-512", iplug->s.internal.down_link); + iplug->s.internal.down_link(parent_coord, NULL, &addr); + + tree = znode_get_tree(parent); + if (incore_p) + child = zlook(tree, &addr); + else + child = + zget(tree, &addr, parent, + znode_get_level(parent) - 1, + reiser4_ctx_gfp_mask_get()); + if ((child != NULL) && !IS_ERR(child) && setup_dkeys_p) + set_child_delimiting_keys(parent, parent_coord, child); + } else { + warning("nikita-1483", "Internal item expected"); + child = ERR_PTR(RETERR(-EIO)); + } + return child; +} + +/* remove znode from transaction */ +static void uncapture_znode(znode * node) +{ + struct page *page; + + assert("zam-1001", ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + + if (!reiser4_blocknr_is_fake(znode_get_block(node))) { + int ret; + + /* An already allocated block goes right to the atom's delete set. */ + ret = + reiser4_dealloc_block(znode_get_block(node), 0, + BA_DEFER | BA_FORMATTED); + if (ret) + warning("zam-942", + "can\'t add a block (%llu) number to atom's delete set\n", + (unsigned long long)(*znode_get_block(node))); + + spin_lock_znode(node); + /* Here we return flush reserved block which was reserved at the + * moment when this allocated node was marked dirty and still + * not used by flush in node relocation procedure. */ + if (ZF_ISSET(node, JNODE_FLUSH_RESERVED)) { + txn_atom *atom; + + atom = jnode_get_atom(ZJNODE(node)); + assert("zam-939", atom != NULL); + spin_unlock_znode(node); + flush_reserved2grabbed(atom, (__u64) 1); + spin_unlock_atom(atom); + } else + spin_unlock_znode(node); + } else { + /* znode has assigned block which is counted as "fake + allocated". Return it back to "free blocks") */ + fake_allocated2free((__u64) 1, BA_FORMATTED); + } + + /* + * uncapture page from transaction. There is a possibility of a race + * with ->releasepage(): reiser4_releasepage() detaches page from this + * jnode and we have nothing to uncapture. To avoid this, get + * reference of node->pg under jnode spin lock. reiser4_uncapture_page() + * will deal with released page itself. + */ + spin_lock_znode(node); + page = znode_page(node); + if (likely(page != NULL)) { + /* + * reiser4_uncapture_page() can only be called when we are sure + * that znode is pinned in memory, which we are, because + * forget_znode() is only called from longterm_unlock_znode(). + */ + get_page(page); + spin_unlock_znode(node); + lock_page(page); + reiser4_uncapture_page(page); + unlock_page(page); + put_page(page); + } else { + txn_atom *atom; + + /* handle "flush queued" znodes */ + while (1) { + atom = jnode_get_atom(ZJNODE(node)); + assert("zam-943", atom != NULL); + + if (!ZF_ISSET(node, JNODE_FLUSH_QUEUED) + || !atom->nr_running_queues) + break; + + spin_unlock_znode(node); + reiser4_atom_wait_event(atom); + spin_lock_znode(node); + } + + reiser4_uncapture_block(ZJNODE(node)); + spin_unlock_atom(atom); + zput(node); + } +} + +/* This is called from longterm_unlock_znode() when last lock is released from + the node that has been removed from the tree. At this point node is removed + from sibling list and its lock is invalidated. */ +void forget_znode(lock_handle * handle) +{ + znode *node; + reiser4_tree *tree; + + assert("umka-319", handle != NULL); + + node = handle->node; + tree = znode_get_tree(node); + + assert("vs-164", znode_is_write_locked(node)); + assert("nikita-1280", ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert_rw_locked(&(node->lock.guard)); + + /* We assume that this node was detached from its parent before + * unlocking, it gives no way to reach this node from parent through a + * down link. The node should have no children and, thereby, can't be + * reached from them by their parent pointers. The only way to obtain a + * reference to the node is to use sibling pointers from its left and + * right neighbors. In the next several lines we remove the node from + * the sibling list. */ + + write_lock_tree(tree); + sibling_list_remove(node); + znode_remove(node, tree); + write_unlock_tree(tree); + + /* Here we set JNODE_DYING and cancel all pending lock requests. It + * forces all lock requestor threads to repeat iterations of getting + * lock on a child, neighbor or parent node. But, those threads can't + * come to this node again, because this node is no longer a child, + * neighbor or parent of any other node. This order of znode + * invalidation does not allow other threads to waste cpu time is a busy + * loop, trying to lock dying object. The exception is in the flush + * code when we take node directly from atom's capture list.*/ + reiser4_invalidate_lock(handle); + uncapture_znode(node); +} + +/* Check that internal item at @pointer really contains pointer to @child. */ +int check_tree_pointer(const coord_t * pointer /* would-be pointer to + * @child */ , + const znode * child /* child znode */ ) +{ + assert("nikita-1016", pointer != NULL); + assert("nikita-1017", child != NULL); + assert("nikita-1018", pointer->node != NULL); + + assert("nikita-1325", znode_is_any_locked(pointer->node)); + + assert("nikita-2985", + znode_get_level(pointer->node) == znode_get_level(child) + 1); + + coord_clear_iplug((coord_t *) pointer); + + if (coord_is_existing_unit(pointer)) { + item_plugin *iplug; + reiser4_block_nr addr; + + if (item_is_internal(pointer)) { + iplug = item_plugin_by_coord(pointer); + assert("vs-513", iplug->s.internal.down_link); + iplug->s.internal.down_link(pointer, NULL, &addr); + /* check that cached value is correct */ + if (disk_addr_eq(&addr, znode_get_block(child))) { + return NS_FOUND; + } + } + } + /* warning ("jmacd-1002", "tree pointer incorrect"); */ + return NS_NOT_FOUND; +} + +/* find coord of pointer to new @child in @parent. + + Find the &coord_t in the @parent where pointer to a given @child will + be in. + +*/ +int find_new_child_ptr(znode * parent /* parent znode, passed locked */ , + znode * + child UNUSED_ARG /* child znode, passed locked */ , + znode * left /* left brother of new node */ , + coord_t * result /* where result is stored in */ ) +{ + int ret; + + assert("nikita-1486", parent != NULL); + assert("nikita-1487", child != NULL); + assert("nikita-1488", result != NULL); + + ret = find_child_ptr(parent, left, result); + if (ret != NS_FOUND) { + warning("nikita-1489", "Cannot find brother position: %i", ret); + return RETERR(-EIO); + } else { + result->between = AFTER_UNIT; + return RETERR(NS_NOT_FOUND); + } +} + +/* find coord of pointer to @child in @parent. + + Find the &coord_t in the @parent where pointer to a given @child is in. + +*/ +int find_child_ptr(znode * parent /* parent znode, passed locked */ , + znode * child /* child znode, passed locked */ , + coord_t * result /* where result is stored in */ ) +{ + int lookup_res; + node_plugin *nplug; + /* left delimiting key of a child */ + reiser4_key ld; + reiser4_tree *tree; + + assert("nikita-934", parent != NULL); + assert("nikita-935", child != NULL); + assert("nikita-936", result != NULL); + assert("zam-356", znode_is_loaded(parent)); + + coord_init_zero(result); + result->node = parent; + + nplug = parent->nplug; + assert("nikita-939", nplug != NULL); + + tree = znode_get_tree(parent); + /* NOTE-NIKITA taking read-lock on tree here assumes that @result is + * not aliased to ->in_parent of some znode. Otherwise, + * parent_coord_to_coord() below would modify data protected by tree + * lock. */ + read_lock_tree(tree); + /* fast path. Try to use cached value. Lock tree to keep + node->pos_in_parent and pos->*_blocknr consistent. */ + if (child->in_parent.item_pos + 1 != 0) { + parent_coord_to_coord(&child->in_parent, result); + if (check_tree_pointer(result, child) == NS_FOUND) { + read_unlock_tree(tree); + return NS_FOUND; + } + + child->in_parent.item_pos = (unsigned short)~0; + } + read_unlock_tree(tree); + + /* is above failed, find some key from @child. We are looking for the + least key in a child. */ + read_lock_dk(tree); + ld = *znode_get_ld_key(child); + read_unlock_dk(tree); + /* + * now, lookup parent with key just found. Note, that left delimiting + * key doesn't identify node uniquely, because (in extremely rare + * case) two nodes can have equal left delimiting keys, if one of them + * is completely filled with directory entries that all happened to be + * hash collision. But, we check block number in check_tree_pointer() + * and, so, are safe. + */ + lookup_res = nplug->lookup(parent, &ld, FIND_EXACT, result); + /* update cached pos_in_node */ + if (lookup_res == NS_FOUND) { + write_lock_tree(tree); + coord_to_parent_coord(result, &child->in_parent); + write_unlock_tree(tree); + lookup_res = check_tree_pointer(result, child); + } + if (lookup_res == NS_NOT_FOUND) + lookup_res = find_child_by_addr(parent, child, result); + return lookup_res; +} + +/* find coord of pointer to @child in @parent by scanning + + Find the &coord_t in the @parent where pointer to a given @child + is in by scanning all internal items in @parent and comparing block + numbers in them with that of @child. + +*/ +static int find_child_by_addr(znode * parent /* parent znode, passed locked */ , + znode * child /* child znode, passed locked */ , + coord_t * result /* where result is stored in */ ) +{ + int ret; + + assert("nikita-1320", parent != NULL); + assert("nikita-1321", child != NULL); + assert("nikita-1322", result != NULL); + + ret = NS_NOT_FOUND; + + for_all_units(result, parent) { + if (check_tree_pointer(result, child) == NS_FOUND) { + write_lock_tree(znode_get_tree(parent)); + coord_to_parent_coord(result, &child->in_parent); + write_unlock_tree(znode_get_tree(parent)); + ret = NS_FOUND; + break; + } + } + return ret; +} + +/* true, if @addr is "unallocated block number", which is just address, with + highest bit set. */ +int is_disk_addr_unallocated(const reiser4_block_nr * addr /* address to + * check */ ) +{ + assert("nikita-1766", addr != NULL); + cassert(sizeof(reiser4_block_nr) == 8); + return (*addr & REISER4_BLOCKNR_STATUS_BIT_MASK) == + REISER4_UNALLOCATED_STATUS_VALUE; +} + +/* returns true if removing bytes of given range of key [from_key, to_key] + causes removing of whole item @from */ +static int +item_removed_completely(coord_t * from, const reiser4_key * from_key, + const reiser4_key * to_key) +{ + item_plugin *iplug; + reiser4_key key_in_item; + + assert("umka-325", from != NULL); + assert("", item_is_extent(from)); + + /* check first key just for case */ + item_key_by_coord(from, &key_in_item); + if (keygt(from_key, &key_in_item)) + return 0; + + /* check last key */ + iplug = item_plugin_by_coord(from); + assert("vs-611", iplug && iplug->s.file.append_key); + + iplug->s.file.append_key(from, &key_in_item); + set_key_offset(&key_in_item, get_key_offset(&key_in_item) - 1); + + if (keylt(to_key, &key_in_item)) + /* last byte is not removed */ + return 0; + return 1; +} + +/* helper function for prepare_twig_kill(): @left and @right are formatted + * neighbors of extent item being completely removed. Load and lock neighbors + * and store lock handles into @cdata for later use by kill_hook_extent() */ +static int +prepare_children(znode * left, znode * right, carry_kill_data * kdata) +{ + int result; + int left_loaded; + int right_loaded; + + result = 0; + left_loaded = right_loaded = 0; + + if (left != NULL) { + result = zload(left); + if (result == 0) { + left_loaded = 1; + result = longterm_lock_znode(kdata->left, left, + ZNODE_READ_LOCK, + ZNODE_LOCK_LOPRI); + } + } + if (result == 0 && right != NULL) { + result = zload(right); + if (result == 0) { + right_loaded = 1; + result = longterm_lock_znode(kdata->right, right, + ZNODE_READ_LOCK, + ZNODE_LOCK_HIPRI | + ZNODE_LOCK_NONBLOCK); + } + } + if (result != 0) { + done_lh(kdata->left); + done_lh(kdata->right); + if (left_loaded != 0) + zrelse(left); + if (right_loaded != 0) + zrelse(right); + } + return result; +} + +static void done_children(carry_kill_data * kdata) +{ + if (kdata->left != NULL && kdata->left->node != NULL) { + zrelse(kdata->left->node); + done_lh(kdata->left); + } + if (kdata->right != NULL && kdata->right->node != NULL) { + zrelse(kdata->right->node); + done_lh(kdata->right); + } +} + +/* part of cut_node. It is called when cut_node is called to remove or cut part + of extent item. When head of that item is removed - we have to update right + delimiting of left neighbor of extent. When item is removed completely - we + have to set sibling link between left and right neighbor of removed + extent. This may return -E_DEADLOCK because of trying to get left neighbor + locked. So, caller should repeat an attempt +*/ +/* Audited by: umka (2002.06.16) */ +static int +prepare_twig_kill(carry_kill_data * kdata, znode * locked_left_neighbor) +{ + int result; + reiser4_key key; + lock_handle left_lh; + lock_handle right_lh; + coord_t left_coord; + coord_t *from; + znode *left_child; + znode *right_child; + reiser4_tree *tree; + int left_zloaded_here, right_zloaded_here; + + from = kdata->params.from; + assert("umka-326", from != NULL); + assert("umka-327", kdata->params.to != NULL); + + /* for one extent item only yet */ + assert("vs-591", item_is_extent(from)); + assert("vs-592", from->item_pos == kdata->params.to->item_pos); + + if ((kdata->params.from_key + && keygt(kdata->params.from_key, item_key_by_coord(from, &key))) + || from->unit_pos != 0) { + /* head of item @from is not removed, there is nothing to + worry about */ + return 0; + } + + result = 0; + left_zloaded_here = 0; + right_zloaded_here = 0; + + left_child = right_child = NULL; + + coord_dup(&left_coord, from); + init_lh(&left_lh); + init_lh(&right_lh); + if (coord_prev_unit(&left_coord)) { + /* @from is leftmost item in its node */ + if (!locked_left_neighbor) { + result = + reiser4_get_left_neighbor(&left_lh, from->node, + ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + switch (result) { + case 0: + break; + case -E_NO_NEIGHBOR: + /* there is no formatted node to the left of + from->node */ + warning("vs-605", + "extent item has smallest key in " + "the tree and it is about to be removed"); + return 0; + case -E_DEADLOCK: + /* need to restart */ + default: + return result; + } + + /* we have acquired left neighbor of from->node */ + result = zload(left_lh.node); + if (result) + goto done; + + locked_left_neighbor = left_lh.node; + } else { + /* squalloc_right_twig_cut should have supplied locked + * left neighbor */ + assert("vs-834", + znode_is_write_locked(locked_left_neighbor)); + result = zload(locked_left_neighbor); + if (result) + return result; + } + + left_zloaded_here = 1; + coord_init_last_unit(&left_coord, locked_left_neighbor); + } + + if (!item_is_internal(&left_coord)) { + /* what else but extent can be on twig level */ + assert("vs-606", item_is_extent(&left_coord)); + + /* there is no left formatted child */ + if (left_zloaded_here) + zrelse(locked_left_neighbor); + done_lh(&left_lh); + return 0; + } + + tree = znode_get_tree(left_coord.node); + left_child = child_znode(&left_coord, left_coord.node, 1, 0); + + if (IS_ERR(left_child)) { + result = PTR_ERR(left_child); + goto done; + } + + /* left child is acquired, calculate new right delimiting key for it + and get right child if it is necessary */ + if (item_removed_completely + (from, kdata->params.from_key, kdata->params.to_key)) { + /* try to get right child of removed item */ + coord_t right_coord; + + assert("vs-607", + kdata->params.to->unit_pos == + coord_last_unit_pos(kdata->params.to)); + coord_dup(&right_coord, kdata->params.to); + if (coord_next_unit(&right_coord)) { + /* @to is rightmost unit in the node */ + result = + reiser4_get_right_neighbor(&right_lh, from->node, + ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + switch (result) { + case 0: + result = zload(right_lh.node); + if (result) + goto done; + + right_zloaded_here = 1; + coord_init_first_unit(&right_coord, + right_lh.node); + item_key_by_coord(&right_coord, &key); + break; + + case -E_NO_NEIGHBOR: + /* there is no formatted node to the right of + from->node */ + read_lock_dk(tree); + key = *znode_get_rd_key(from->node); + read_unlock_dk(tree); + right_coord.node = NULL; + result = 0; + break; + default: + /* real error */ + goto done; + } + } else { + /* there is an item to the right of @from - take its key */ + item_key_by_coord(&right_coord, &key); + } + + /* try to get right child of @from */ + if (right_coord.node && /* there is right neighbor of @from */ + item_is_internal(&right_coord)) { /* it is internal item */ + right_child = child_znode(&right_coord, + right_coord.node, 1, 0); + + if (IS_ERR(right_child)) { + result = PTR_ERR(right_child); + goto done; + } + + } + /* whole extent is removed between znodes left_child and right_child. Prepare them for linking and + update of right delimiting key of left_child */ + result = prepare_children(left_child, right_child, kdata); + } else { + /* head of item @to is removed. left_child has to get right delimting key update. Prepare it for that */ + result = prepare_children(left_child, NULL, kdata); + } + + done: + if (right_child) + zput(right_child); + if (right_zloaded_here) + zrelse(right_lh.node); + done_lh(&right_lh); + + if (left_child) + zput(left_child); + if (left_zloaded_here) + zrelse(locked_left_neighbor); + done_lh(&left_lh); + return result; +} + +/* this is used to remove part of node content between coordinates @from and @to. Units to which @from and @to are set + are to be cut completely */ +/* for try_to_merge_with_left, delete_copied, reiser4_delete_node */ +int cut_node_content(coord_t * from, coord_t * to, const reiser4_key * from_key, /* first key to be removed */ + const reiser4_key * to_key, /* last key to be removed */ + reiser4_key * + smallest_removed /* smallest key actually removed */ ) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_cut_data *cut_data; + carry_op *op; + + assert("vs-1715", coord_compare(from, to) != COORD_CMP_ON_RIGHT); + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*cut_data)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, COP_CUT, from->node, 0); + assert("vs-1509", op != 0); + if (IS_ERR(op)) { + done_carry_pool(pool); + return PTR_ERR(op); + } + + cut_data = (carry_cut_data *) (lowest_level + 3); + cut_data->params.from = from; + cut_data->params.to = to; + cut_data->params.from_key = from_key; + cut_data->params.to_key = to_key; + cut_data->params.smallest_removed = smallest_removed; + + op->u.cut_or_kill.is_cut = 1; + op->u.cut_or_kill.u.cut = cut_data; + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* cut part of the node + + Cut part or whole content of node. + + cut data between @from and @to of @from->node and call carry() to make + corresponding changes in the tree. @from->node may become empty. If so - + pointer to it will be removed. Neighboring nodes are not changed. Smallest + removed key is stored in @smallest_removed + +*/ +int kill_node_content(coord_t * from, /* coord of the first unit/item that will be eliminated */ + coord_t * to, /* coord of the last unit/item that will be eliminated */ + const reiser4_key * from_key, /* first key to be removed */ + const reiser4_key * to_key, /* last key to be removed */ + reiser4_key * smallest_removed, /* smallest key actually removed */ + znode * locked_left_neighbor, /* this is set when kill_node_content is called with left neighbor + * locked (in squalloc_right_twig_cut, namely) */ + struct inode *inode, /* inode of file whose item (or its part) is to be killed. This is necessary to + invalidate pages together with item pointing to them */ + int truncate) +{ /* this call is made for file truncate) */ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_kill_data *kdata; + lock_handle *left_child; + lock_handle *right_child; + carry_op *op; + + assert("umka-328", from != NULL); + assert("vs-316", !node_is_empty(from->node)); + assert("nikita-1812", coord_is_existing_unit(from) + && coord_is_existing_unit(to)); + + /* allocate carry_pool, 3 carry_level-s, carry_kill_data and structures for kill_hook_extent */ + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(carry_kill_data) + + 2 * sizeof(lock_handle) + + 5 * sizeof(reiser4_key) + 2 * sizeof(coord_t)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + kdata = (carry_kill_data *) (lowest_level + 3); + left_child = (lock_handle *) (kdata + 1); + right_child = left_child + 1; + + init_lh(left_child); + init_lh(right_child); + + kdata->params.from = from; + kdata->params.to = to; + kdata->params.from_key = from_key; + kdata->params.to_key = to_key; + kdata->params.smallest_removed = smallest_removed; + kdata->params.truncate = truncate; + kdata->flags = 0; + kdata->inode = inode; + kdata->left = left_child; + kdata->right = right_child; + /* memory for 5 reiser4_key and 2 coord_t will be used in kill_hook_extent */ + kdata->buf = (char *)(right_child + 1); + + if (znode_get_level(from->node) == TWIG_LEVEL && item_is_extent(from)) { + /* left child of extent item may have to get updated right + delimiting key and to get linked with right child of extent + @from if it will be removed completely */ + result = prepare_twig_kill(kdata, locked_left_neighbor); + if (result) { + done_children(kdata); + done_carry_pool(pool); + return result; + } + } + + op = reiser4_post_carry(lowest_level, COP_CUT, from->node, 0); + if (IS_ERR(op) || (op == NULL)) { + done_children(kdata); + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + + op->u.cut_or_kill.is_cut = 0; + op->u.cut_or_kill.u.kill = kdata; + + result = reiser4_carry(lowest_level, NULL); + + done_children(kdata); + done_carry_pool(pool); + return result; +} + +void +fake_kill_hook_tail(struct inode *inode, loff_t start, loff_t end, int truncate) +{ + if (reiser4_inode_get_flag(inode, REISER4_HAS_MMAP)) { + pgoff_t start_pg, end_pg; + + start_pg = start >> PAGE_SHIFT; + end_pg = (end - 1) >> PAGE_SHIFT; + + if ((start & (PAGE_SIZE - 1)) == 0) { + /* + * kill up to the page boundary. + */ + assert("vs-123456", start_pg == end_pg); + reiser4_invalidate_pages(inode->i_mapping, start_pg, 1, + truncate); + } else if (start_pg != end_pg) { + /* + * page boundary is within killed portion of node. + */ + assert("vs-654321", end_pg - start_pg == 1); + reiser4_invalidate_pages(inode->i_mapping, end_pg, + end_pg - start_pg, 1); + } + } + inode_sub_bytes(inode, end - start); +} + +/** + * Delete whole @node from the reiser4 tree without loading it. + * + * @left: locked left neighbor, + * @node: node to be deleted, + * @smallest_removed: leftmost key of deleted node, + * @object: inode pointer, if we truncate a file body. + * @truncate: true if called for file truncate. + * + * @return: 0 if success, error code otherwise. + * + * NOTE: if @object!=NULL we assume that @smallest_removed != NULL and it + * contains the right value of the smallest removed key from the previous + * cut_worker() iteration. This is needed for proper accounting of + * "i_blocks" and "i_bytes" fields of the @object. + */ +int reiser4_delete_node(znode * node, reiser4_key * smallest_removed, + struct inode *object, int truncate) +{ + lock_handle parent_lock; + coord_t cut_from; + coord_t cut_to; + reiser4_tree *tree; + int ret; + + assert("zam-937", node != NULL); + assert("zam-933", znode_is_write_locked(node)); + assert("zam-999", smallest_removed != NULL); + + init_lh(&parent_lock); + + ret = reiser4_get_parent(&parent_lock, node, ZNODE_WRITE_LOCK); + if (ret) + return ret; + + assert("zam-934", !znode_above_root(parent_lock.node)); + + ret = zload(parent_lock.node); + if (ret) + goto failed_nozrelse; + + ret = find_child_ptr(parent_lock.node, node, &cut_from); + if (ret) + goto failed; + + /* decrement child counter and set parent pointer to NULL before + deleting the list from parent node because of checks in + internal_kill_item_hook (we can delete the last item from the parent + node, the parent node is going to be deleted and its c_count should + be zero). */ + + tree = znode_get_tree(node); + write_lock_tree(tree); + init_parent_coord(&node->in_parent, NULL); + --parent_lock.node->c_count; + write_unlock_tree(tree); + + assert("zam-989", item_is_internal(&cut_from)); + + /* @node should be deleted after unlocking. */ + ZF_SET(node, JNODE_HEARD_BANSHEE); + + /* remove a pointer from the parent node to the node being deleted. */ + coord_dup(&cut_to, &cut_from); + /* FIXME: shouldn't this be kill_node_content */ + ret = cut_node_content(&cut_from, &cut_to, NULL, NULL, NULL); + if (ret) + /* FIXME(Zam): Should we re-connect the node to its parent if + * cut_node fails? */ + goto failed; + + { + reiser4_tree *tree = current_tree; + __u64 start_offset = 0, end_offset = 0; + + read_lock_tree(tree); + write_lock_dk(tree); + if (object) { + /* We use @smallest_removed and the left delimiting of + * the current node for @object->i_blocks, i_bytes + * calculation. We assume that the items after the + * *@smallest_removed key have been deleted from the + * file body. */ + start_offset = get_key_offset(znode_get_ld_key(node)); + end_offset = get_key_offset(smallest_removed); + } + + assert("zam-1021", znode_is_connected(node)); + if (node->left) + znode_set_rd_key(node->left, znode_get_rd_key(node)); + + *smallest_removed = *znode_get_ld_key(node); + + write_unlock_dk(tree); + read_unlock_tree(tree); + + if (object) { + /* we used to perform actions which are to be performed on items on their removal from tree in + special item method - kill_hook. Here for optimization reasons we avoid reading node + containing item we remove and can not call item's kill hook. Instead we call function which + does exactly the same things as tail kill hook in assumption that node we avoid reading + contains only one item and that item is a tail one. */ + fake_kill_hook_tail(object, start_offset, end_offset, + truncate); + } + } + failed: + zrelse(parent_lock.node); + failed_nozrelse: + done_lh(&parent_lock); + + return ret; +} + +static int can_delete(const reiser4_key *key, znode *node) +{ + int result; + + read_lock_dk(current_tree); + result = keyle(key, znode_get_ld_key(node)); + read_unlock_dk(current_tree); + return result; +} + +/** + * This subroutine is not optimal but implementation seems to + * be easier). + * + * @tap: the point deletion process begins from, + * @from_key: the beginning of the deleted key range, + * @to_key: the end of the deleted key range, + * @smallest_removed: the smallest removed key, + * @truncate: true if called for file truncate. + * @progress: return true if a progress in file items deletions was made, + * @smallest_removed value is actual in that case. + * + * @return: 0 if success, error code otherwise, -E_REPEAT means that long + * reiser4_cut_tree operation was interrupted for allowing atom commit. + */ +int +cut_tree_worker_common(tap_t * tap, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, struct inode *object, + int truncate, int *progress) +{ + lock_handle next_node_lock; + coord_t left_coord; + int result; + + assert("zam-931", tap->coord->node != NULL); + assert("zam-932", znode_is_write_locked(tap->coord->node)); + + *progress = 0; + init_lh(&next_node_lock); + + while (1) { + znode *node; /* node from which items are cut */ + node_plugin *nplug; /* node plugin for @node */ + + node = tap->coord->node; + + /* Move next_node_lock to the next node on the left. */ + result = + reiser4_get_left_neighbor(&next_node_lock, node, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result != 0 && result != -E_NO_NEIGHBOR) + break; + /* Check can we delete the node as a whole. */ + if (*progress && znode_get_level(node) == LEAF_LEVEL && + can_delete(from_key, node)) { + result = reiser4_delete_node(node, smallest_removed, + object, truncate); + } else { + result = reiser4_tap_load(tap); + if (result) + return result; + + /* Prepare the second (right) point for cut_node() */ + if (*progress) + coord_init_last_unit(tap->coord, node); + + else if (item_plugin_by_coord(tap->coord)->b.lookup == + NULL) + /* set rightmost unit for the items without lookup method */ + tap->coord->unit_pos = + coord_last_unit_pos(tap->coord); + + nplug = node->nplug; + + assert("vs-686", nplug); + assert("vs-687", nplug->lookup); + + /* left_coord is leftmost unit cut from @node */ + result = nplug->lookup(node, from_key, + FIND_MAX_NOT_MORE_THAN, + &left_coord); + + if (IS_CBKERR(result)) + break; + + /* adjust coordinates so that they are set to existing units */ + if (coord_set_to_right(&left_coord) + || coord_set_to_left(tap->coord)) { + result = 0; + break; + } + + if (coord_compare(&left_coord, tap->coord) == + COORD_CMP_ON_RIGHT) { + /* keys from @from_key to @to_key are not in the tree */ + result = 0; + break; + } + + if (left_coord.item_pos != tap->coord->item_pos) { + /* do not allow to cut more than one item. It is added to solve problem of truncating + partially converted files. If file is partially converted there may exist a twig node + containing both internal item or items pointing to leaf nodes with formatting items + and extent item. We do not want to kill internal items being at twig node here + because cut_tree_worker assumes killing them from level level */ + coord_dup(&left_coord, tap->coord); + assert("vs-1652", + coord_is_existing_unit(&left_coord)); + left_coord.unit_pos = 0; + } + + /* cut data from one node */ + /* *smallest_removed = *reiser4_min_key(); */ + result = + kill_node_content(&left_coord, tap->coord, from_key, + to_key, smallest_removed, + next_node_lock.node, object, + truncate); + reiser4_tap_relse(tap); + } + if (result) + break; + + ++(*progress); + + /* Check whether all items with keys >= from_key were removed + * from the tree. */ + if (keyle(smallest_removed, from_key)) + /* result = 0; */ + break; + + if (next_node_lock.node == NULL) + break; + + result = reiser4_tap_move(tap, &next_node_lock); + done_lh(&next_node_lock); + if (result) + break; + + /* Break long reiser4_cut_tree operation (deletion of a large + file) if atom requires commit. */ + if (*progress > CUT_TREE_MIN_ITERATIONS + && current_atom_should_commit()) { + result = -E_REPEAT; + break; + } + } + done_lh(&next_node_lock); + /* assert("vs-301", !keyeq(&smallest_removed, reiser4_min_key())); */ + return result; +} + +/* there is a fundamental problem with optimizing deletes: VFS does it + one file at a time. Another problem is that if an item can be + anything, then deleting items must be done one at a time. It just + seems clean to writes this to specify a from and a to key, and cut + everything between them though. */ + +/* use this function with care if deleting more than what is part of a single file. */ +/* do not use this when cutting a single item, it is suboptimal for that */ + +/* You are encouraged to write plugin specific versions of this. It + cannot be optimal for all plugins because it works item at a time, + and some plugins could sometimes work node at a time. Regular files + however are not optimizable to work node at a time because of + extents needing to free the blocks they point to. + + Optimizations compared to v3 code: + + It does not balance (that task is left to memory pressure code). + + Nodes are deleted only if empty. + + Uses extents. + + Performs read-ahead of formatted nodes whose contents are part of + the deletion. +*/ + +/** + * Delete everything from the reiser4 tree between two keys: @from_key and + * @to_key. + * + * @from_key: the beginning of the deleted key range, + * @to_key: the end of the deleted key range, + * @smallest_removed: the smallest removed key, + * @object: owner of cutting items. + * @truncate: true if called for file truncate. + * @progress: return true if a progress in file items deletions was made, + * @smallest_removed value is actual in that case. + * + * @return: 0 if success, error code otherwise, -E_REPEAT means that long cut_tree + * operation was interrupted for allowing atom commit . + */ + +int reiser4_cut_tree_object(reiser4_tree * tree, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed_p, + struct inode *object, int truncate, int *progress) +{ + lock_handle lock; + int result; + tap_t tap; + coord_t right_coord; + reiser4_key smallest_removed; + int (*cut_tree_worker) (tap_t *, const reiser4_key *, + const reiser4_key *, reiser4_key *, + struct inode *, int, int *); + STORE_COUNTERS; + + assert("umka-329", tree != NULL); + assert("umka-330", from_key != NULL); + assert("umka-331", to_key != NULL); + assert("zam-936", keyle(from_key, to_key)); + + if (smallest_removed_p == NULL) + smallest_removed_p = &smallest_removed; + + init_lh(&lock); + + do { + /* Find rightmost item to cut away from the tree. */ + result = reiser4_object_lookup(object, to_key, &right_coord, + &lock, ZNODE_WRITE_LOCK, + FIND_MAX_NOT_MORE_THAN, + TWIG_LEVEL, LEAF_LEVEL, + CBK_UNIQUE, NULL /*ra_info */); + if (result != CBK_COORD_FOUND) + break; + if (object == NULL + || inode_file_plugin(object)->cut_tree_worker == NULL) + cut_tree_worker = cut_tree_worker_common; + else + cut_tree_worker = + inode_file_plugin(object)->cut_tree_worker; + reiser4_tap_init(&tap, &right_coord, &lock, ZNODE_WRITE_LOCK); + result = + cut_tree_worker(&tap, from_key, to_key, smallest_removed_p, + object, truncate, progress); + reiser4_tap_done(&tap); + + reiser4_preempt_point(); + + } while (0); + + done_lh(&lock); + + if (result) { + switch (result) { + case -E_NO_NEIGHBOR: + result = 0; + break; + case -E_DEADLOCK: + result = -E_REPEAT; + case -E_REPEAT: + case -ENOMEM: + case -ENOENT: + break; + default: + warning("nikita-2861", "failure: %i", result); + } + } + + CHECK_COUNTERS; + return result; +} + +/* repeat reiser4_cut_tree_object until everything is deleted. + * unlike cut_file_items, it does not end current transaction if -E_REPEAT + * is returned by cut_tree_object. */ +int reiser4_cut_tree(reiser4_tree * tree, const reiser4_key * from, + const reiser4_key * to, struct inode *inode, int truncate) +{ + int result; + int progress; + + do { + result = reiser4_cut_tree_object(tree, from, to, NULL, + inode, truncate, &progress); + } while (result == -E_REPEAT); + + return result; +} + +/* finishing reiser4 initialization */ +int reiser4_init_tree(reiser4_tree * tree /* pointer to structure being + * initialized */ , + const reiser4_block_nr * root_block /* address of a root block + * on a disk */ , + tree_level height /* height of a tree */ , + node_plugin * nplug /* default node plugin */ ) +{ + int result; + + assert("nikita-306", tree != NULL); + assert("nikita-307", root_block != NULL); + assert("nikita-308", height > 0); + assert("nikita-309", nplug != NULL); + assert("zam-587", tree->super != NULL); + assert("edward-171", get_current_context() != NULL); + /* + * We'll perform costly memory allocations for znode hash table, etc. + * So, set proper allocation flags + */ + get_current_context()->gfp_mask |= (__GFP_NOWARN); + + tree->root_block = *root_block; + tree->height = height; + tree->estimate_one_insert = calc_estimate_one_insert(height); + tree->nplug = nplug; + + tree->znode_epoch = 1ull; + + cbk_cache_init(&tree->cbk_cache); + + result = znodes_tree_init(tree); + if (result == 0) + result = jnodes_tree_init(tree); + if (result == 0) { + tree->uber = zget(tree, &UBER_TREE_ADDR, NULL, 0, + reiser4_ctx_gfp_mask_get()); + if (IS_ERR(tree->uber)) { + result = PTR_ERR(tree->uber); + tree->uber = NULL; + } + } + return result; +} + +/* release resources associated with @tree */ +void reiser4_done_tree(reiser4_tree * tree /* tree to release */ ) +{ + if (tree == NULL) + return; + + if (tree->uber != NULL) { + zput(tree->uber); + tree->uber = NULL; + } + znodes_tree_done(tree); + jnodes_tree_done(tree); + cbk_cache_done(&tree->cbk_cache); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree.h b/fs/reiser4/tree.h new file mode 100644 index 000000000000..fbf8542f37b4 --- /dev/null +++ b/fs/reiser4/tree.h @@ -0,0 +1,577 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Tree operations. See fs/reiser4/tree.c for comments */ + +#if !defined( __REISER4_TREE_H__ ) +#define __REISER4_TREE_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "znode.h" +#include "tap.h" + +#include /* for __u?? */ +#include /* for struct super_block */ +#include +#include /* for struct task_struct */ + +/* fictive block number never actually used */ +extern const reiser4_block_nr UBER_TREE_ADDR; + +/* &cbk_cache_slot - entry in a coord cache. + + This is entry in a coord_by_key (cbk) cache, represented by + &cbk_cache. + +*/ +typedef struct cbk_cache_slot { + /* cached node */ + znode *node; + /* linkage to the next cbk cache slot in a LRU order */ + struct list_head lru; +} cbk_cache_slot; + +/* &cbk_cache - coord cache. This is part of reiser4_tree. + + cbk_cache is supposed to speed up tree lookups by caching results of recent + successful lookups (we don't cache negative results as dentry cache + does). Cache consists of relatively small number of entries kept in a LRU + order. Each entry (&cbk_cache_slot) contains a pointer to znode, from + which we can obtain a range of keys that covered by this znode. Before + embarking into real tree traversal we scan cbk_cache slot by slot and for + each slot check whether key we are looking for is between minimal and + maximal keys for node pointed to by this slot. If no match is found, real + tree traversal is performed and if result is successful, appropriate entry + is inserted into cache, possibly pulling least recently used entry out of + it. + + Tree spin lock is used to protect coord cache. If contention for this + lock proves to be too high, more finer grained locking can be added. + + Invariants involving parts of this data-type: + + [cbk-cache-invariant] +*/ +typedef struct cbk_cache { + /* serializator */ + rwlock_t guard; + int nr_slots; + /* head of LRU list of cache slots */ + struct list_head lru; + /* actual array of slots */ + cbk_cache_slot *slot; +} cbk_cache; + +/* level_lookup_result - possible outcome of looking up key at some level. + This is used by coord_by_key when traversing tree downward. */ +typedef enum { + /* continue to the next level */ + LOOKUP_CONT, + /* done. Either required item was found, or we can prove it + doesn't exist, or some error occurred. */ + LOOKUP_DONE, + /* restart traversal from the root. Infamous "repetition". */ + LOOKUP_REST +} level_lookup_result; + +/* This is representation of internal reiser4 tree where all file-system + data and meta-data are stored. This structure is passed to all tree + manipulation functions. It's different from the super block because: + we don't want to limit ourselves to strictly one to one mapping + between super blocks and trees, and, because they are logically + different: there are things in a super block that have no relation to + the tree (bitmaps, journalling area, mount options, etc.) and there + are things in a tree that bear no relation to the super block, like + tree of znodes. + + At this time, there is only one tree + per filesystem, and this struct is part of the super block. We only + call the super block the super block for historical reasons (most + other filesystems call the per filesystem metadata the super block). +*/ + +struct reiser4_tree { + /* block_nr == 0 is fake znode. Write lock it, while changing + tree height. */ + /* disk address of root node of a tree */ + reiser4_block_nr root_block; + + /* level of the root node. If this is 1, tree consists of root + node only */ + tree_level height; + + /* + * this is cached here avoid calling plugins through function + * dereference all the time. + */ + __u64 estimate_one_insert; + + /* cache of recent tree lookup results */ + cbk_cache cbk_cache; + + /* hash table to look up znodes by block number. */ + z_hash_table zhash_table; + z_hash_table zfake_table; + /* hash table to look up jnodes by inode and offset. */ + j_hash_table jhash_table; + + /* lock protecting: + - parent pointers, + - sibling pointers, + - znode hash table + - coord cache + */ + /* NOTE: The "giant" tree lock can be replaced by more spin locks, + hoping they will be less contented. We can use one spin lock per one + znode hash bucket. With adding of some code complexity, sibling + pointers can be protected by both znode spin locks. However it looks + more SMP scalable we should test this locking change on n-ways (n > + 4) SMP machines. Current 4-ways machine test does not show that tree + lock is contented and it is a bottleneck (2003.07.25). */ + + rwlock_t tree_lock; + + /* lock protecting delimiting keys */ + rwlock_t dk_lock; + + /* spin lock protecting znode_epoch */ + spinlock_t epoch_lock; + /* version stamp used to mark znode updates. See seal.[ch] for more + * information. */ + __u64 znode_epoch; + + znode *uber; + node_plugin *nplug; + struct super_block *super; + struct { + /* carry flags used for insertion of new nodes */ + __u32 new_node_flags; + /* carry flags used for insertion of new extents */ + __u32 new_extent_flags; + /* carry flags used for paste operations */ + __u32 paste_flags; + /* carry flags used for insert operations */ + __u32 insert_flags; + } carry; +}; + +extern int reiser4_init_tree(reiser4_tree * tree, + const reiser4_block_nr * root_block, + tree_level height, node_plugin * default_plugin); +extern void reiser4_done_tree(reiser4_tree * tree); + +/* cbk flags: options for coord_by_key() */ +typedef enum { + /* coord_by_key() is called for insertion. This is necessary because + of extents being located at the twig level. For explanation, see + comment just above is_next_item_internal(). + */ + CBK_FOR_INSERT = (1 << 0), + /* coord_by_key() is called with key that is known to be unique */ + CBK_UNIQUE = (1 << 1), + /* coord_by_key() can trust delimiting keys. This options is not user + accessible. coord_by_key() will set it automatically. It will be + only cleared by special-case in extents-on-the-twig-level handling + where it is necessary to insert item with a key smaller than + leftmost key in a node. This is necessary because of extents being + located at the twig level. For explanation, see comment just above + is_next_item_internal(). + */ + CBK_TRUST_DK = (1 << 2), + CBK_READA = (1 << 3), /* original: readahead leaves which contain items of certain file */ + CBK_READDIR_RA = (1 << 4), /* readdir: readahead whole directory and all its stat datas */ + CBK_DKSET = (1 << 5), + CBK_EXTENDED_COORD = (1 << 6), /* coord_t is actually */ + CBK_IN_CACHE = (1 << 7), /* node is already in cache */ + CBK_USE_CRABLOCK = (1 << 8) /* use crab_lock in stead of long term + * lock */ +} cbk_flags; + +/* insertion outcome. IBK = insert by key */ +typedef enum { + IBK_INSERT_OK = 0, + IBK_ALREADY_EXISTS = -EEXIST, + IBK_IO_ERROR = -EIO, + IBK_NO_SPACE = -E_NODE_FULL, + IBK_OOM = -ENOMEM +} insert_result; + +#define IS_CBKERR(err) ((err) != CBK_COORD_FOUND && (err) != CBK_COORD_NOTFOUND) + +typedef int (*tree_iterate_actor_t) (reiser4_tree * tree, coord_t * coord, + lock_handle * lh, void *arg); +extern int reiser4_iterate_tree(reiser4_tree * tree, coord_t * coord, + lock_handle * lh, + tree_iterate_actor_t actor, void *arg, + znode_lock_mode mode, int through_units_p); +extern int get_uber_znode(reiser4_tree * tree, znode_lock_mode mode, + znode_lock_request pri, lock_handle * lh); + +/* return node plugin of @node */ +static inline node_plugin *node_plugin_by_node(const znode * + node /* node to query */ ) +{ + assert("vs-213", node != NULL); + assert("vs-214", znode_is_loaded(node)); + + return node->nplug; +} + +/* number of items in @node */ +static inline pos_in_node_t node_num_items(const znode * node) +{ + assert("nikita-2754", znode_is_loaded(node)); + assert("nikita-2468", + node_plugin_by_node(node)->num_of_items(node) == node->nr_items); + + return node->nr_items; +} + +/* Return the number of items at the present node. Asserts coord->node != + NULL. */ +static inline unsigned coord_num_items(const coord_t * coord) +{ + assert("jmacd-9805", coord->node != NULL); + + return node_num_items(coord->node); +} + +/* true if @node is empty */ +static inline int node_is_empty(const znode * node) +{ + return node_num_items(node) == 0; +} + +typedef enum { + SHIFTED_SOMETHING = 0, + SHIFT_NO_SPACE = -E_NODE_FULL, + SHIFT_IO_ERROR = -EIO, + SHIFT_OOM = -ENOMEM, +} shift_result; + +extern node_plugin *node_plugin_by_coord(const coord_t * coord); +extern int is_coord_in_node(const coord_t * coord); +extern int key_in_node(const reiser4_key *, const coord_t *); +extern void coord_item_move_to(coord_t * coord, int items); +extern void coord_unit_move_to(coord_t * coord, int units); + +/* there are two types of repetitive accesses (ra): intra-syscall + (local) and inter-syscall (global). Local ra is used when + during single syscall we add/delete several items and units in the + same place in a tree. Note that plan-A fragments local ra by + separating stat-data and file body in key-space. Global ra is + used when user does repetitive modifications in the same place in a + tree. + + Our ra implementation serves following purposes: + 1 it affects balancing decisions so that next operation in a row + can be performed faster; + 2 it affects lower-level read-ahead in page-cache; + 3 it allows to avoid unnecessary lookups by maintaining some state + across several operations (this is only for local ra); + 4 it leaves room for lazy-micro-balancing: when we start a sequence of + operations they are performed without actually doing any intra-node + shifts, until we finish sequence or scope of sequence leaves + current node, only then we really pack node (local ra only). +*/ + +/* another thing that can be useful is to keep per-tree and/or + per-process cache of recent lookups. This cache can be organised as a + list of block numbers of formatted nodes sorted by starting key in + this node. Balancings should invalidate appropriate parts of this + cache. +*/ + +lookup_result coord_by_key(reiser4_tree * tree, const reiser4_key * key, + coord_t * coord, lock_handle * handle, + znode_lock_mode lock, lookup_bias bias, + tree_level lock_level, tree_level stop_level, + __u32 flags, ra_info_t *); + +lookup_result reiser4_object_lookup(struct inode *object, + const reiser4_key * key, + coord_t * coord, + lock_handle * lh, + znode_lock_mode lock_mode, + lookup_bias bias, + tree_level lock_level, + tree_level stop_level, + __u32 flags, ra_info_t * info); + +insert_result insert_by_key(reiser4_tree * tree, const reiser4_key * key, + reiser4_item_data * data, coord_t * coord, + lock_handle * lh, + tree_level stop_level, __u32 flags); +insert_result insert_by_coord(coord_t * coord, + reiser4_item_data * data, const reiser4_key * key, + lock_handle * lh, __u32); +insert_result insert_extent_by_coord(coord_t * coord, + reiser4_item_data * data, + const reiser4_key * key, lock_handle * lh); +int cut_node_content(coord_t * from, coord_t * to, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed); +int kill_node_content(coord_t * from, coord_t * to, + const reiser4_key * from_key, const reiser4_key * to_key, + reiser4_key * smallest_removed, + znode * locked_left_neighbor, struct inode *inode, + int truncate); + +int reiser4_resize_item(coord_t * coord, reiser4_item_data * data, + reiser4_key * key, lock_handle * lh, cop_insert_flag); +int insert_into_item(coord_t * coord, lock_handle * lh, const reiser4_key * key, + reiser4_item_data * data, unsigned); +int reiser4_insert_flow(coord_t * coord, lock_handle * lh, flow_t * f); +int find_new_child_ptr(znode * parent, znode * child, znode * left, + coord_t * result); + +int shift_right_of_but_excluding_insert_coord(coord_t * insert_coord); +int shift_left_of_and_including_insert_coord(coord_t * insert_coord); + +void fake_kill_hook_tail(struct inode *, loff_t start, loff_t end, int); + +extern int cut_tree_worker_common(tap_t *, const reiser4_key *, + const reiser4_key *, reiser4_key *, + struct inode *, int, int *); +extern int reiser4_cut_tree_object(reiser4_tree *, const reiser4_key *, + const reiser4_key *, reiser4_key *, + struct inode *, int, int *); +extern int reiser4_cut_tree(reiser4_tree * tree, const reiser4_key * from, + const reiser4_key * to, struct inode *, int); + +extern int reiser4_delete_node(znode *, reiser4_key *, struct inode *, int); +extern int check_tree_pointer(const coord_t * pointer, const znode * child); +extern int find_new_child_ptr(znode * parent, znode * child UNUSED_ARG, + znode * left, coord_t * result); +extern int find_child_ptr(znode * parent, znode * child, coord_t * result); +extern int set_child_delimiting_keys(znode * parent, const coord_t * in_parent, + znode * child); +extern znode *child_znode(const coord_t * in_parent, znode * parent, + int incore_p, int setup_dkeys_p); + +extern int cbk_cache_init(cbk_cache * cache); +extern void cbk_cache_done(cbk_cache * cache); +extern void cbk_cache_invalidate(const znode * node, reiser4_tree * tree); + +extern char *sprint_address(const reiser4_block_nr * block); + +#if REISER4_DEBUG +extern void print_coord_content(const char *prefix, coord_t * p); +extern void reiser4_print_address(const char *prefix, + const reiser4_block_nr * block); +extern void print_tree_rec(const char *prefix, reiser4_tree * tree, + __u32 flags); +extern void check_dkeys(znode *node); +#else +#define print_coord_content(p, c) noop +#define reiser4_print_address(p, b) noop +#endif + +extern void forget_znode(lock_handle * handle); +extern int deallocate_znode(znode * node); + +extern int is_disk_addr_unallocated(const reiser4_block_nr * addr); + +/* struct used internally to pack all numerous arguments of tree lookup. + Used to avoid passing a lot of arguments to helper functions. */ +typedef struct cbk_handle { + /* tree we are in */ + reiser4_tree *tree; + /* key we are going after */ + const reiser4_key *key; + /* coord we will store result in */ + coord_t *coord; + /* type of lock to take on target node */ + znode_lock_mode lock_mode; + /* lookup bias. See comments at the declaration of lookup_bias */ + lookup_bias bias; + /* lock level: level starting from which tree traversal starts taking + * write locks. */ + tree_level lock_level; + /* level where search will stop. Either item will be found between + lock_level and stop_level, or CBK_COORD_NOTFOUND will be + returned. + */ + tree_level stop_level; + /* level we are currently at */ + tree_level level; + /* block number of @active node. Tree traversal operates on two + nodes: active and parent. */ + reiser4_block_nr block; + /* put here error message to be printed by caller */ + const char *error; + /* result passed back to caller */ + int result; + /* lock handles for active and parent */ + lock_handle *parent_lh; + lock_handle *active_lh; + reiser4_key ld_key; + reiser4_key rd_key; + /* flags, passed to the cbk routine. Bits of this bitmask are defined + in tree.h:cbk_flags enum. */ + __u32 flags; + ra_info_t *ra_info; + struct inode *object; +} cbk_handle; + +extern znode_lock_mode cbk_lock_mode(tree_level level, cbk_handle * h); + +/* eottl.c */ +extern int handle_eottl(cbk_handle *h, int *outcome); + +int lookup_multikey(cbk_handle * handle, int nr_keys); +int lookup_couple(reiser4_tree * tree, + const reiser4_key * key1, const reiser4_key * key2, + coord_t * coord1, coord_t * coord2, + lock_handle * lh1, lock_handle * lh2, + znode_lock_mode lock_mode, lookup_bias bias, + tree_level lock_level, tree_level stop_level, __u32 flags, + int *result1, int *result2); + +static inline void read_lock_tree(reiser4_tree *tree) +{ + /* check that tree is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_tree) && + LOCK_CNT_NIL(read_locked_tree) && + LOCK_CNT_NIL(write_locked_tree))); + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(spin_locked_stack))); + + read_lock(&(tree->tree_lock)); + + LOCK_CNT_INC(read_locked_tree); + LOCK_CNT_INC(rw_locked_tree); + LOCK_CNT_INC(spin_locked); +} + +static inline void read_unlock_tree(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(read_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(read_locked_tree); + LOCK_CNT_DEC(rw_locked_tree); + LOCK_CNT_DEC(spin_locked); + + read_unlock(&(tree->tree_lock)); +} + +static inline void write_lock_tree(reiser4_tree *tree) +{ + /* check that tree is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_tree) && + LOCK_CNT_NIL(read_locked_tree) && + LOCK_CNT_NIL(write_locked_tree))); + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(spin_locked_stack))); + + write_lock(&(tree->tree_lock)); + + LOCK_CNT_INC(write_locked_tree); + LOCK_CNT_INC(rw_locked_tree); + LOCK_CNT_INC(spin_locked); +} + +static inline void write_unlock_tree(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(write_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(write_locked_tree); + LOCK_CNT_DEC(rw_locked_tree); + LOCK_CNT_DEC(spin_locked); + + write_unlock(&(tree->tree_lock)); +} + +static inline void read_lock_dk(reiser4_tree *tree) +{ + /* check that dk is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(read_locked_dk) && + LOCK_CNT_NIL(write_locked_dk))); + /* check that spinlocks of lower priorities are not held */ + assert("", LOCK_CNT_NIL(spin_locked_stack)); + + read_lock(&((tree)->dk_lock)); + + LOCK_CNT_INC(read_locked_dk); + LOCK_CNT_INC(rw_locked_dk); + LOCK_CNT_INC(spin_locked); +} + +static inline void read_unlock_dk(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(read_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(read_locked_dk); + LOCK_CNT_DEC(rw_locked_dk); + LOCK_CNT_DEC(spin_locked); + + read_unlock(&(tree->dk_lock)); +} + +static inline void write_lock_dk(reiser4_tree *tree) +{ + /* check that dk is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(read_locked_dk) && + LOCK_CNT_NIL(write_locked_dk))); + /* check that spinlocks of lower priorities are not held */ + assert("", LOCK_CNT_NIL(spin_locked_stack)); + + write_lock(&((tree)->dk_lock)); + + LOCK_CNT_INC(write_locked_dk); + LOCK_CNT_INC(rw_locked_dk); + LOCK_CNT_INC(spin_locked); +} + +static inline void write_unlock_dk(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(write_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(write_locked_dk); + LOCK_CNT_DEC(rw_locked_dk); + LOCK_CNT_DEC(spin_locked); + + write_unlock(&(tree->dk_lock)); +} + +/* estimate api. Implementation is in estimate.c */ +reiser4_block_nr estimate_one_insert_item(reiser4_tree *); +reiser4_block_nr estimate_one_insert_into_item(reiser4_tree *); +reiser4_block_nr estimate_insert_flow(tree_level); +reiser4_block_nr estimate_one_item_removal(reiser4_tree *); +reiser4_block_nr calc_estimate_one_insert(tree_level); +reiser4_block_nr estimate_dirty_cluster(struct inode *); +reiser4_block_nr estimate_insert_cluster(struct inode *); +reiser4_block_nr estimate_update_cluster(struct inode *); + +/* __REISER4_TREE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree_mod.c b/fs/reiser4/tree_mod.c new file mode 100644 index 000000000000..f9687df8ffb7 --- /dev/null +++ b/fs/reiser4/tree_mod.c @@ -0,0 +1,387 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * Functions to add/delete new nodes to/from the tree. + * + * Functions from this file are used by carry (see carry*) to handle: + * + * . insertion of new formatted node into tree + * + * . addition of new tree root, increasing tree height + * + * . removing tree root, decreasing tree height + * + */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/plugin.h" +#include "jnode.h" +#include "znode.h" +#include "tree_mod.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "tree.h" +#include "super.h" + +#include + +static int add_child_ptr(znode * parent, znode * child); +/* warning only issued if error is not -E_REPEAT */ +#define ewarning( error, ... ) \ + if( ( error ) != -E_REPEAT ) \ + warning( __VA_ARGS__ ) + +/* allocate new node on the @level and immediately on the right of @brother. */ +znode * reiser4_new_node(znode * brother /* existing left neighbor + * of new node */, + tree_level level /* tree level at which new node is to + * be allocated */) +{ + znode *result; + int retcode; + reiser4_block_nr blocknr; + + assert("nikita-930", brother != NULL); + assert("umka-264", level < REAL_MAX_ZTREE_HEIGHT); + + retcode = assign_fake_blocknr_formatted(&blocknr); + if (retcode == 0) { + result = + zget(znode_get_tree(brother), &blocknr, NULL, level, + reiser4_ctx_gfp_mask_get()); + if (IS_ERR(result)) { + ewarning(PTR_ERR(result), "nikita-929", + "Cannot allocate znode for carry: %li", + PTR_ERR(result)); + return result; + } + /* cheap test, can be executed even when debugging is off */ + if (!znode_just_created(result)) { + warning("nikita-2213", + "Allocated already existing block: %llu", + (unsigned long long)blocknr); + zput(result); + return ERR_PTR(RETERR(-EIO)); + } + + assert("nikita-931", result != NULL); + result->nplug = znode_get_tree(brother)->nplug; + assert("nikita-933", result->nplug != NULL); + + retcode = zinit_new(result, reiser4_ctx_gfp_mask_get()); + if (retcode == 0) { + ZF_SET(result, JNODE_CREATED); + zrelse(result); + } else { + zput(result); + result = ERR_PTR(retcode); + } + } else { + /* failure to allocate new node during balancing. + This should never happen. Ever. Returning -E_REPEAT + is not viable solution, because "out of disk space" + is not transient error that will go away by itself. + */ + ewarning(retcode, "nikita-928", + "Cannot allocate block for carry: %i", retcode); + result = ERR_PTR(retcode); + } + assert("nikita-1071", result != NULL); + return result; +} + +/* allocate new root and add it to the tree + + This helper function is called by add_new_root(). + +*/ +znode *reiser4_add_tree_root(znode * old_root /* existing tree root */ , + znode * fake /* "fake" znode */ ) +{ + reiser4_tree *tree = znode_get_tree(old_root); + znode *new_root = NULL; /* to shut gcc up */ + int result; + + assert("nikita-1069", old_root != NULL); + assert("umka-262", fake != NULL); + assert("umka-263", tree != NULL); + + /* "fake" znode---one always hanging just above current root. This + node is locked when new root is created or existing root is + deleted. Downward tree traversal takes lock on it before taking + lock on a root node. This avoids race conditions with root + manipulations. + + */ + assert("nikita-1348", znode_above_root(fake)); + assert("nikita-1211", znode_is_root(old_root)); + + result = 0; + if (tree->height >= REAL_MAX_ZTREE_HEIGHT) { + warning("nikita-1344", "Tree is too tall: %i", tree->height); + /* ext2 returns -ENOSPC when it runs out of free inodes with a + following comment (fs/ext2/ialloc.c:441): Is it really + ENOSPC? + + -EXFULL? -EINVAL? + */ + result = RETERR(-ENOSPC); + } else { + /* Allocate block for new root. It's not that + important where it will be allocated, as root is + almost always in memory. Moreover, allocate on + flush can be going here. + */ + assert("nikita-1448", znode_is_root(old_root)); + new_root = reiser4_new_node(fake, tree->height + 1); + if (!IS_ERR(new_root) && (result = zload(new_root)) == 0) { + lock_handle rlh; + + init_lh(&rlh); + result = + longterm_lock_znode(&rlh, new_root, + ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (result == 0) { + parent_coord_t *in_parent; + + znode_make_dirty(fake); + + /* new root is a child of "fake" node */ + write_lock_tree(tree); + + ++tree->height; + + /* recalculate max balance overhead */ + tree->estimate_one_insert = + calc_estimate_one_insert(tree->height); + + tree->root_block = *znode_get_block(new_root); + in_parent = &new_root->in_parent; + init_parent_coord(in_parent, fake); + /* manually insert new root into sibling + * list. With this all nodes involved into + * balancing are connected after balancing is + * done---useful invariant to check. */ + sibling_list_insert_nolock(new_root, NULL); + write_unlock_tree(tree); + + /* insert into new root pointer to the + @old_root. */ + assert("nikita-1110", + WITH_DATA(new_root, + node_is_empty(new_root))); + write_lock_dk(tree); + znode_set_ld_key(new_root, reiser4_min_key()); + znode_set_rd_key(new_root, reiser4_max_key()); + write_unlock_dk(tree); + if (REISER4_DEBUG) { + ZF_CLR(old_root, JNODE_LEFT_CONNECTED); + ZF_CLR(old_root, JNODE_RIGHT_CONNECTED); + ZF_SET(old_root, JNODE_ORPHAN); + } + result = add_child_ptr(new_root, old_root); + done_lh(&rlh); + } + zrelse(new_root); + } + } + if (result != 0) + new_root = ERR_PTR(result); + return new_root; +} + +/* build &reiser4_item_data for inserting child pointer + + Build &reiser4_item_data that can be later used to insert pointer to @child + in its parent. + +*/ +void build_child_ptr_data(znode * child /* node pointer to which will be + * inserted */ , + reiser4_item_data * data /* where to store result */ ) +{ + assert("nikita-1116", child != NULL); + assert("nikita-1117", data != NULL); + + /* + * NOTE: use address of child's blocknr as address of data to be + * inserted. As result of this data gets into on-disk structure in cpu + * byte order. internal's create_hook converts it to little endian byte + * order. + */ + data->data = (char *)znode_get_block(child); + /* data -> data is kernel space */ + data->user = 0; + data->length = sizeof(reiser4_block_nr); + /* FIXME-VS: hardcoded internal item? */ + + /* AUDIT: Is it possible that "item_plugin_by_id" may find nothing? */ + data->iplug = item_plugin_by_id(NODE_POINTER_ID); +} + +/* add pointer to @child into empty @parent. + + This is used when pointer to old root is inserted into new root which is + empty. +*/ +static int add_child_ptr(znode * parent, znode * child) +{ + coord_t coord; + reiser4_item_data data; + int result; + reiser4_key key; + + assert("nikita-1111", parent != NULL); + assert("nikita-1112", child != NULL); + assert("nikita-1115", + znode_get_level(parent) == znode_get_level(child) + 1); + + result = zload(parent); + if (result != 0) + return result; + assert("nikita-1113", node_is_empty(parent)); + coord_init_first_unit(&coord, parent); + + build_child_ptr_data(child, &data); + data.arg = NULL; + + read_lock_dk(znode_get_tree(parent)); + key = *znode_get_ld_key(child); + read_unlock_dk(znode_get_tree(parent)); + + result = node_plugin_by_node(parent)->create_item(&coord, &key, &data, + NULL); + znode_make_dirty(parent); + zrelse(parent); + return result; +} + +/* actually remove tree root */ +static int reiser4_kill_root(reiser4_tree * tree /* tree from which root is + * being removed */, + znode * old_root /* root node that is being + * removed */ , + znode * new_root /* new root---sole child of + * @old_root */, + const reiser4_block_nr * new_root_blk /* disk address of + * @new_root */) +{ + znode *uber; + int result; + lock_handle handle_for_uber; + + assert("umka-265", tree != NULL); + assert("nikita-1198", new_root != NULL); + assert("nikita-1199", + znode_get_level(new_root) + 1 == znode_get_level(old_root)); + + assert("nikita-1201", znode_is_write_locked(old_root)); + + assert("nikita-1203", + disk_addr_eq(new_root_blk, znode_get_block(new_root))); + + init_lh(&handle_for_uber); + /* obtain and lock "fake" znode protecting changes in tree height. */ + result = get_uber_znode(tree, ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI, + &handle_for_uber); + if (result == 0) { + uber = handle_for_uber.node; + + znode_make_dirty(uber); + + /* don't take long term lock a @new_root. Take spinlock. */ + + write_lock_tree(tree); + + tree->root_block = *new_root_blk; + --tree->height; + + /* recalculate max balance overhead */ + tree->estimate_one_insert = + calc_estimate_one_insert(tree->height); + + assert("nikita-1202", + tree->height == znode_get_level(new_root)); + + /* new root is child on "fake" node */ + init_parent_coord(&new_root->in_parent, uber); + ++uber->c_count; + + /* sibling_list_insert_nolock(new_root, NULL); */ + write_unlock_tree(tree); + + /* reinitialise old root. */ + result = init_znode(ZJNODE(old_root)); + znode_make_dirty(old_root); + if (result == 0) { + assert("nikita-1279", node_is_empty(old_root)); + ZF_SET(old_root, JNODE_HEARD_BANSHEE); + old_root->c_count = 0; + } + } + done_lh(&handle_for_uber); + + return result; +} + +/* remove tree root + + This function removes tree root, decreasing tree height by one. Tree root + and its only child (that is going to become new tree root) are write locked + at the entry. + + To remove tree root we need to take lock on special "fake" znode that + protects changes of tree height. See comments in reiser4_add_tree_root() for + more on this. + + Also parent pointers have to be updated in + old and new root. To simplify code, function is split into two parts: outer + reiser4_kill_tree_root() collects all necessary arguments and calls + reiser4_kill_root() to do the actual job. + +*/ +int reiser4_kill_tree_root(znode * old_root /* tree root that we are + removing*/) +{ + int result; + coord_t down_link; + znode *new_root; + reiser4_tree *tree; + + assert("umka-266", current_tree != NULL); + assert("nikita-1194", old_root != NULL); + assert("nikita-1196", znode_is_root(old_root)); + assert("nikita-1200", node_num_items(old_root) == 1); + assert("nikita-1401", znode_is_write_locked(old_root)); + + coord_init_first_unit(&down_link, old_root); + + tree = znode_get_tree(old_root); + new_root = child_znode(&down_link, old_root, 0, 1); + if (!IS_ERR(new_root)) { + result = + reiser4_kill_root(tree, old_root, new_root, + znode_get_block(new_root)); + zput(new_root); + } else + result = PTR_ERR(new_root); + + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree_mod.h b/fs/reiser4/tree_mod.h new file mode 100644 index 000000000000..151964117f26 --- /dev/null +++ b/fs/reiser4/tree_mod.h @@ -0,0 +1,29 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Functions to add/delete new nodes to/from the tree. See tree_mod.c for + * comments. */ + +#if !defined( __REISER4_TREE_MOD_H__ ) +#define __REISER4_TREE_MOD_H__ + +#include "forward.h" + +znode *reiser4_new_node(znode * brother, tree_level level); +znode *reiser4_add_tree_root(znode * old_root, znode * fake); +int reiser4_kill_tree_root(znode * old_root); +void build_child_ptr_data(znode * child, reiser4_item_data * data); + +/* __REISER4_TREE_MOD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree_walk.c b/fs/reiser4/tree_walk.c new file mode 100644 index 000000000000..cde4875b4481 --- /dev/null +++ b/fs/reiser4/tree_walk.c @@ -0,0 +1,927 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Routines and macros to: + + get_left_neighbor() + + get_right_neighbor() + + get_parent() + + get_first_child() + + get_last_child() + + various routines to walk the whole tree and do things to it like + repack it, or move it to tertiary storage. Please make them as + generic as is reasonable. + +*/ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "jnode.h" +#include "znode.h" +#include "tree_walk.h" +#include "tree.h" +#include "super.h" + +/* These macros are used internally in tree_walk.c in attempt to make + lock_neighbor() code usable to build lock_parent(), lock_right_neighbor, + lock_left_neighbor */ +#define GET_NODE_BY_PTR_OFFSET(node, off) (*(znode**)(((unsigned long)(node)) + (off))) +#define FIELD_OFFSET(name) offsetof(znode, name) +#define PARENT_PTR_OFFSET FIELD_OFFSET(in_parent.node) +#define LEFT_PTR_OFFSET FIELD_OFFSET(left) +#define RIGHT_PTR_OFFSET FIELD_OFFSET(right) + +/* This is the generic procedure to get and lock `generic' neighbor (left or + right neighbor or parent). It implements common algorithm for all cases of + getting lock on neighbor node, only znode structure field is different in + each case. This is parameterized by ptr_offset argument, which is byte + offset for the pointer to the desired neighbor within the current node's + znode structure. This function should be called with the tree lock held */ +static int lock_neighbor( + /* resulting lock handle */ + lock_handle * result, + /* znode to lock */ + znode * node, + /* pointer to neighbor (or parent) znode field offset, in bytes from + the base address of znode structure */ + int ptr_offset, + /* lock mode for longterm_lock_znode call */ + znode_lock_mode mode, + /* lock request for longterm_lock_znode call */ + znode_lock_request req, + /* GN_* flags */ + int flags, int rlocked) +{ + reiser4_tree *tree = znode_get_tree(node); + znode *neighbor; + int ret; + + assert("umka-236", node != NULL); + assert("umka-237", tree != NULL); + assert_rw_locked(&(tree->tree_lock)); + + if (flags & GN_TRY_LOCK) + req |= ZNODE_LOCK_NONBLOCK; + if (flags & GN_SAME_ATOM) + req |= ZNODE_LOCK_DONT_FUSE; + + /* get neighbor's address by using of sibling link, quit while loop + (and return) if link is not available. */ + while (1) { + neighbor = GET_NODE_BY_PTR_OFFSET(node, ptr_offset); + + /* return -E_NO_NEIGHBOR if parent or side pointer is NULL or if + * node pointed by it is not connected. + * + * However, GN_ALLOW_NOT_CONNECTED option masks "connected" + * check and allows passing reference to not connected znode to + * subsequent longterm_lock_znode() call. This kills possible + * busy loop if we are trying to get longterm lock on locked but + * not yet connected parent node. */ + if (neighbor == NULL || !((flags & GN_ALLOW_NOT_CONNECTED) + || znode_is_connected(neighbor))) { + return RETERR(-E_NO_NEIGHBOR); + } + + /* protect it from deletion. */ + zref(neighbor); + + rlocked ? read_unlock_tree(tree) : write_unlock_tree(tree); + + ret = longterm_lock_znode(result, neighbor, mode, req); + + /* The lock handle obtains its own reference, release the one from above. */ + zput(neighbor); + + rlocked ? read_lock_tree(tree) : write_lock_tree(tree); + + /* restart if node we got reference to is being + invalidated. we should not get reference to this node + again. */ + if (ret == -EINVAL) + continue; + if (ret) + return ret; + + /* check if neighbor link still points to just locked znode; + the link could have been changed while the process slept. */ + if (neighbor == GET_NODE_BY_PTR_OFFSET(node, ptr_offset)) + return 0; + + /* znode was locked by mistake; unlock it and restart locking + process from beginning. */ + rlocked ? read_unlock_tree(tree) : write_unlock_tree(tree); + longterm_unlock_znode(result); + rlocked ? read_lock_tree(tree) : write_lock_tree(tree); + } +} + +/* get parent node with longterm lock, accepts GN* flags. */ +int reiser4_get_parent_flags(lock_handle * lh /* resulting lock handle */ , + znode * node /* child node */ , + znode_lock_mode mode + /* type of lock: read or write */ , + int flags /* GN_* flags */ ) +{ + int result; + + read_lock_tree(znode_get_tree(node)); + result = lock_neighbor(lh, node, PARENT_PTR_OFFSET, mode, + ZNODE_LOCK_HIPRI, flags, 1); + read_unlock_tree(znode_get_tree(node)); + return result; +} + +/* wrapper function to lock right or left neighbor depending on GN_GO_LEFT + bit in @flags parameter */ +/* Audited by: umka (2002.06.14) */ +static inline int +lock_side_neighbor(lock_handle * result, + znode * node, znode_lock_mode mode, int flags, int rlocked) +{ + int ret; + int ptr_offset; + znode_lock_request req; + + if (flags & GN_GO_LEFT) { + ptr_offset = LEFT_PTR_OFFSET; + req = ZNODE_LOCK_LOPRI; + } else { + ptr_offset = RIGHT_PTR_OFFSET; + req = ZNODE_LOCK_HIPRI; + } + + ret = + lock_neighbor(result, node, ptr_offset, mode, req, flags, rlocked); + + if (ret == -E_NO_NEIGHBOR) /* if we walk left or right -E_NO_NEIGHBOR does not + * guarantee that neighbor is absent in the + * tree; in this case we return -ENOENT -- + * means neighbor at least not found in + * cache */ + return RETERR(-ENOENT); + + return ret; +} + +#if REISER4_DEBUG + +int check_sibling_list(znode * node) +{ + znode *scan; + znode *next; + + assert("nikita-3283", LOCK_CNT_GTZ(write_locked_tree)); + + if (node == NULL) + return 1; + + if (ZF_ISSET(node, JNODE_RIP)) + return 1; + + assert("nikita-3270", node != NULL); + assert_rw_write_locked(&(znode_get_tree(node)->tree_lock)); + + for (scan = node; znode_is_left_connected(scan); scan = next) { + next = scan->left; + if (next != NULL && !ZF_ISSET(next, JNODE_RIP)) { + assert("nikita-3271", znode_is_right_connected(next)); + assert("nikita-3272", next->right == scan); + } else + break; + } + for (scan = node; znode_is_right_connected(scan); scan = next) { + next = scan->right; + if (next != NULL && !ZF_ISSET(next, JNODE_RIP)) { + assert("nikita-3273", znode_is_left_connected(next)); + assert("nikita-3274", next->left == scan); + } else + break; + } + return 1; +} + +#endif + +/* Znode sibling pointers maintenence. */ + +/* Znode sibling pointers are established between any neighbored nodes which are + in cache. There are two znode state bits (JNODE_LEFT_CONNECTED, + JNODE_RIGHT_CONNECTED), if left or right sibling pointer contains actual + value (even NULL), corresponded JNODE_*_CONNECTED bit is set. + + Reiser4 tree operations which may allocate new znodes (CBK, tree balancing) + take care about searching (hash table lookup may be required) of znode + neighbors, establishing sibling pointers between them and setting + JNODE_*_CONNECTED state bits. */ + +/* adjusting of sibling pointers and `connected' states for two + neighbors; works if one neighbor is NULL (was not found). */ + +/* FIXME-VS: this is unstatic-ed to use in tree.c in prepare_twig_cut */ +void link_left_and_right(znode * left, znode * right) +{ + assert("nikita-3275", check_sibling_list(left)); + assert("nikita-3275", check_sibling_list(right)); + + if (left != NULL) { + if (left->right == NULL) { + left->right = right; + ZF_SET(left, JNODE_RIGHT_CONNECTED); + + ON_DEBUG(left->right_version = + atomic_inc_return(&delim_key_version); + ); + + } else if (ZF_ISSET(left->right, JNODE_HEARD_BANSHEE) + && left->right != right) { + + ON_DEBUG(left->right->left_version = + atomic_inc_return(&delim_key_version); + left->right_version = + atomic_inc_return(&delim_key_version);); + + left->right->left = NULL; + left->right = right; + ZF_SET(left, JNODE_RIGHT_CONNECTED); + } else + /* + * there is a race condition in renew_sibling_link() + * and assertions below check that it is only one + * there. Thread T1 calls renew_sibling_link() without + * GN_NO_ALLOC flag. zlook() doesn't find neighbor + * node, but before T1 gets to the + * link_left_and_right(), another thread T2 creates + * neighbor node and connects it. check for + * left->right == NULL above protects T1 from + * overwriting correct left->right pointer installed + * by T2. + */ + assert("nikita-3302", + right == NULL || left->right == right); + } + if (right != NULL) { + if (right->left == NULL) { + right->left = left; + ZF_SET(right, JNODE_LEFT_CONNECTED); + + ON_DEBUG(right->left_version = + atomic_inc_return(&delim_key_version); + ); + + } else if (ZF_ISSET(right->left, JNODE_HEARD_BANSHEE) + && right->left != left) { + + ON_DEBUG(right->left->right_version = + atomic_inc_return(&delim_key_version); + right->left_version = + atomic_inc_return(&delim_key_version);); + + right->left->right = NULL; + right->left = left; + ZF_SET(right, JNODE_LEFT_CONNECTED); + + } else + assert("nikita-3303", + left == NULL || right->left == left); + } + assert("nikita-3275", check_sibling_list(left)); + assert("nikita-3275", check_sibling_list(right)); +} + +/* Audited by: umka (2002.06.14) */ +static void link_znodes(znode * first, znode * second, int to_left) +{ + if (to_left) + link_left_and_right(second, first); + else + link_left_and_right(first, second); +} + +/* getting of next (to left or to right, depend on gn_to_left bit in flags) + coord's unit position in horizontal direction, even across node + boundary. Should be called under tree lock, it protects nonexistence of + sibling link on parent level, if lock_side_neighbor() fails with + -ENOENT. */ +static int far_next_coord(coord_t * coord, lock_handle * handle, int flags) +{ + int ret; + znode *node; + reiser4_tree *tree; + + assert("umka-243", coord != NULL); + assert("umka-244", handle != NULL); + assert("zam-1069", handle->node == NULL); + + ret = + (flags & GN_GO_LEFT) ? coord_prev_unit(coord) : + coord_next_unit(coord); + if (!ret) + return 0; + + ret = + lock_side_neighbor(handle, coord->node, ZNODE_READ_LOCK, flags, 0); + if (ret) + return ret; + + node = handle->node; + tree = znode_get_tree(node); + write_unlock_tree(tree); + + coord_init_zero(coord); + + /* We avoid synchronous read here if it is specified by flag. */ + if ((flags & GN_ASYNC) && znode_page(handle->node) == NULL) { + ret = jstartio(ZJNODE(handle->node)); + if (!ret) + ret = -E_REPEAT; + goto error_locked; + } + + /* corresponded zrelse() should be called by the clients of + far_next_coord(), in place when this node gets unlocked. */ + ret = zload(handle->node); + if (ret) + goto error_locked; + + if (flags & GN_GO_LEFT) + coord_init_last_unit(coord, node); + else + coord_init_first_unit(coord, node); + + if (0) { + error_locked: + longterm_unlock_znode(handle); + } + write_lock_tree(tree); + return ret; +} + +/* Very significant function which performs a step in horizontal direction + when sibling pointer is not available. Actually, it is only function which + does it. + Note: this function does not restore locking status at exit, + caller should does care about proper unlocking and zrelsing */ +static int +renew_sibling_link(coord_t * coord, lock_handle * handle, znode * child, + tree_level level, int flags, int *nr_locked) +{ + int ret; + int to_left = flags & GN_GO_LEFT; + reiser4_block_nr da; + /* parent of the neighbor node; we set it to parent until not sharing + of one parent between child and neighbor node is detected */ + znode *side_parent = coord->node; + reiser4_tree *tree = znode_get_tree(child); + znode *neighbor = NULL; + + assert("umka-245", coord != NULL); + assert("umka-246", handle != NULL); + assert("umka-247", child != NULL); + assert("umka-303", tree != NULL); + + init_lh(handle); + write_lock_tree(tree); + ret = far_next_coord(coord, handle, flags); + + if (ret) { + if (ret != -ENOENT) { + write_unlock_tree(tree); + return ret; + } + } else { + item_plugin *iplug; + + if (handle->node != NULL) { + (*nr_locked)++; + side_parent = handle->node; + } + + /* does coord object points to internal item? We do not + support sibling pointers between znode for formatted and + unformatted nodes and return -E_NO_NEIGHBOR in that case. */ + iplug = item_plugin_by_coord(coord); + if (!item_is_internal(coord)) { + link_znodes(child, NULL, to_left); + write_unlock_tree(tree); + /* we know there can't be formatted neighbor */ + return RETERR(-E_NO_NEIGHBOR); + } + write_unlock_tree(tree); + + iplug->s.internal.down_link(coord, NULL, &da); + + if (flags & GN_NO_ALLOC) { + neighbor = zlook(tree, &da); + } else { + neighbor = + zget(tree, &da, side_parent, level, + reiser4_ctx_gfp_mask_get()); + } + + if (IS_ERR(neighbor)) { + ret = PTR_ERR(neighbor); + return ret; + } + + if (neighbor) + /* update delimiting keys */ + set_child_delimiting_keys(coord->node, coord, neighbor); + + write_lock_tree(tree); + } + + if (likely(neighbor == NULL || + (znode_get_level(child) == znode_get_level(neighbor) + && child != neighbor))) + link_znodes(child, neighbor, to_left); + else { + warning("nikita-3532", + "Sibling nodes on the different levels: %i != %i\n", + znode_get_level(child), znode_get_level(neighbor)); + ret = RETERR(-EIO); + } + + write_unlock_tree(tree); + + /* if GN_NO_ALLOC isn't set we keep reference to neighbor znode */ + if (neighbor != NULL && (flags & GN_NO_ALLOC)) + /* atomic_dec(&ZJNODE(neighbor)->x_count); */ + zput(neighbor); + + return ret; +} + +/* This function is for establishing of one side relation. */ +/* Audited by: umka (2002.06.14) */ +static int connect_one_side(coord_t * coord, znode * node, int flags) +{ + coord_t local; + lock_handle handle; + int nr_locked; + int ret; + + assert("umka-248", coord != NULL); + assert("umka-249", node != NULL); + + coord_dup_nocheck(&local, coord); + + init_lh(&handle); + + ret = + renew_sibling_link(&local, &handle, node, znode_get_level(node), + flags | GN_NO_ALLOC, &nr_locked); + + if (handle.node != NULL) { + /* complementary operations for zload() and lock() in far_next_coord() */ + zrelse(handle.node); + longterm_unlock_znode(&handle); + } + + /* we catch error codes which are not interesting for us because we + run renew_sibling_link() only for znode connection. */ + if (ret == -ENOENT || ret == -E_NO_NEIGHBOR) + return 0; + + return ret; +} + +/* if @child is not in `connected' state, performs hash searches for left and + right neighbor nodes and establishes horizontal sibling links */ +/* Audited by: umka (2002.06.14), umka (2002.06.15) */ +int connect_znode(coord_t * parent_coord, znode * child) +{ + reiser4_tree *tree = znode_get_tree(child); + int ret = 0; + + assert("zam-330", parent_coord != NULL); + assert("zam-331", child != NULL); + assert("zam-332", parent_coord->node != NULL); + assert("umka-305", tree != NULL); + + /* it is trivial to `connect' root znode because it can't have + neighbors */ + if (znode_above_root(parent_coord->node)) { + child->left = NULL; + child->right = NULL; + ZF_SET(child, JNODE_LEFT_CONNECTED); + ZF_SET(child, JNODE_RIGHT_CONNECTED); + + ON_DEBUG(child->left_version = + atomic_inc_return(&delim_key_version); + child->right_version = + atomic_inc_return(&delim_key_version);); + + return 0; + } + + /* load parent node */ + coord_clear_iplug(parent_coord); + ret = zload(parent_coord->node); + + if (ret != 0) + return ret; + + /* protect `connected' state check by tree_lock */ + read_lock_tree(tree); + + if (!znode_is_right_connected(child)) { + read_unlock_tree(tree); + /* connect right (default is right) */ + ret = connect_one_side(parent_coord, child, GN_NO_ALLOC); + if (ret) + goto zrelse_and_ret; + + read_lock_tree(tree); + } + + ret = znode_is_left_connected(child); + + read_unlock_tree(tree); + + if (!ret) { + ret = + connect_one_side(parent_coord, child, + GN_NO_ALLOC | GN_GO_LEFT); + } else + ret = 0; + + zrelse_and_ret: + zrelse(parent_coord->node); + + return ret; +} + +/* this function is like renew_sibling_link() but allocates neighbor node if + it doesn't exist and `connects' it. It may require making two steps in + horizontal direction, first one for neighbor node finding/allocation, + second one is for finding neighbor of neighbor to connect freshly allocated + znode. */ +/* Audited by: umka (2002.06.14), umka (2002.06.15) */ +static int +renew_neighbor(coord_t * coord, znode * node, tree_level level, int flags) +{ + coord_t local; + lock_handle empty[2]; + reiser4_tree *tree = znode_get_tree(node); + znode *neighbor = NULL; + int nr_locked = 0; + int ret; + + assert("umka-250", coord != NULL); + assert("umka-251", node != NULL); + assert("umka-307", tree != NULL); + assert("umka-308", level <= tree->height); + + /* umka (2002.06.14) + Here probably should be a check for given "level" validness. + Something like assert("xxx-yyy", level < REAL_MAX_ZTREE_HEIGHT); + */ + + coord_dup(&local, coord); + + ret = + renew_sibling_link(&local, &empty[0], node, level, + flags & ~GN_NO_ALLOC, &nr_locked); + if (ret) + goto out; + + /* tree lock is not needed here because we keep parent node(s) locked + and reference to neighbor znode incremented */ + neighbor = (flags & GN_GO_LEFT) ? node->left : node->right; + + read_lock_tree(tree); + ret = znode_is_connected(neighbor); + read_unlock_tree(tree); + if (ret) { + ret = 0; + goto out; + } + + ret = + renew_sibling_link(&local, &empty[nr_locked], neighbor, level, + flags | GN_NO_ALLOC, &nr_locked); + /* second renew_sibling_link() call is used for znode connection only, + so we can live with these errors */ + if (-ENOENT == ret || -E_NO_NEIGHBOR == ret) + ret = 0; + + out: + + for (--nr_locked; nr_locked >= 0; --nr_locked) { + zrelse(empty[nr_locked].node); + longterm_unlock_znode(&empty[nr_locked]); + } + + if (neighbor != NULL) + /* decrement znode reference counter without actually + releasing it. */ + atomic_dec(&ZJNODE(neighbor)->x_count); + + return ret; +} + +/* + reiser4_get_neighbor() -- lock node's neighbor. + + reiser4_get_neighbor() locks node's neighbor (left or right one, depends on + given parameter) using sibling link to it. If sibling link is not available + (i.e. neighbor znode is not in cache) and flags allow read blocks, we go one + level up for information about neighbor's disk address. We lock node's + parent, if it is common parent for both 'node' and its neighbor, neighbor's + disk address is in next (to left or to right) down link from link that points + to original node. If not, we need to lock parent's neighbor, read its content + and take first(last) downlink with neighbor's disk address. That locking + could be done by using sibling link and lock_neighbor() function, if sibling + link exists. In another case we have to go level up again until we find + common parent or valid sibling link. Then go down + allocating/connecting/locking/reading nodes until neighbor of first one is + locked. + + @neighbor: result lock handle, + @node: a node which we lock neighbor of, + @lock_mode: lock mode {LM_READ, LM_WRITE}, + @flags: logical OR of {GN_*} (see description above) subset. + + @return: 0 if success, negative value if lock was impossible due to an error + or lack of neighbor node. +*/ + +/* Audited by: umka (2002.06.14), umka (2002.06.15) */ +int +reiser4_get_neighbor(lock_handle * neighbor, znode * node, + znode_lock_mode lock_mode, int flags) +{ + reiser4_tree *tree = znode_get_tree(node); + lock_handle path[REAL_MAX_ZTREE_HEIGHT]; + + coord_t coord; + + tree_level base_level; + tree_level h = 0; + int ret; + + assert("umka-252", tree != NULL); + assert("umka-253", neighbor != NULL); + assert("umka-254", node != NULL); + + base_level = znode_get_level(node); + + assert("umka-310", base_level <= tree->height); + + coord_init_zero(&coord); + + again: + /* first, we try to use simple lock_neighbor() which requires sibling + link existence */ + read_lock_tree(tree); + ret = lock_side_neighbor(neighbor, node, lock_mode, flags, 1); + read_unlock_tree(tree); + if (!ret) { + /* load znode content if it was specified */ + if (flags & GN_LOAD_NEIGHBOR) { + ret = zload(node); + if (ret) + longterm_unlock_znode(neighbor); + } + return ret; + } + + /* only -ENOENT means we may look upward and try to connect + @node with its neighbor (if @flags allow us to do it) */ + if (ret != -ENOENT || !(flags & GN_CAN_USE_UPPER_LEVELS)) + return ret; + + /* before establishing of sibling link we lock parent node; it is + required by renew_neighbor() to work. */ + init_lh(&path[0]); + ret = reiser4_get_parent(&path[0], node, ZNODE_READ_LOCK); + if (ret) + return ret; + if (znode_above_root(path[0].node)) { + longterm_unlock_znode(&path[0]); + return RETERR(-E_NO_NEIGHBOR); + } + + while (1) { + znode *child = (h == 0) ? node : path[h - 1].node; + znode *parent = path[h].node; + + ret = zload(parent); + if (ret) + break; + + ret = find_child_ptr(parent, child, &coord); + + if (ret) { + zrelse(parent); + break; + } + + /* try to establish missing sibling link */ + ret = renew_neighbor(&coord, child, h + base_level, flags); + + zrelse(parent); + + switch (ret) { + case 0: + /* unlocking of parent znode prevents simple + deadlock situation */ + done_lh(&path[h]); + + /* depend on tree level we stay on we repeat first + locking attempt ... */ + if (h == 0) + goto again; + + /* ... or repeat establishing of sibling link at + one level below. */ + --h; + break; + + case -ENOENT: + /* sibling link is not available -- we go + upward. */ + init_lh(&path[h + 1]); + ret = + reiser4_get_parent(&path[h + 1], parent, + ZNODE_READ_LOCK); + if (ret) + goto fail; + ++h; + if (znode_above_root(path[h].node)) { + ret = RETERR(-E_NO_NEIGHBOR); + goto fail; + } + break; + + case -E_DEADLOCK: + /* there was lock request from hi-pri locker. if + it is possible we unlock last parent node and + re-lock it again. */ + for (; reiser4_check_deadlock(); h--) { + done_lh(&path[h]); + if (h == 0) + goto fail; + } + + break; + + default: /* other errors. */ + goto fail; + } + } + fail: + ON_DEBUG(check_lock_node_data(node)); + ON_DEBUG(check_lock_data()); + + /* unlock path */ + do { + /* FIXME-Zam: when we get here from case -E_DEADLOCK's goto + fail; path[0] is already done_lh-ed, therefore + longterm_unlock_znode(&path[h]); is not applicable */ + done_lh(&path[h]); + --h; + } while (h + 1 != 0); + + return ret; +} + +/* remove node from sibling list */ +/* Audited by: umka (2002.06.14) */ +void sibling_list_remove(znode * node) +{ + reiser4_tree *tree; + + tree = znode_get_tree(node); + assert("umka-255", node != NULL); + assert_rw_write_locked(&(tree->tree_lock)); + assert("nikita-3275", check_sibling_list(node)); + + write_lock_dk(tree); + if (znode_is_right_connected(node) && node->right != NULL && + znode_is_left_connected(node) && node->left != NULL) { + assert("zam-32245", + keyeq(znode_get_rd_key(node), + znode_get_ld_key(node->right))); + znode_set_rd_key(node->left, znode_get_ld_key(node->right)); + } + write_unlock_dk(tree); + + if (znode_is_right_connected(node) && node->right != NULL) { + assert("zam-322", znode_is_left_connected(node->right)); + node->right->left = node->left; + ON_DEBUG(node->right->left_version = + atomic_inc_return(&delim_key_version); + ); + } + if (znode_is_left_connected(node) && node->left != NULL) { + assert("zam-323", znode_is_right_connected(node->left)); + node->left->right = node->right; + ON_DEBUG(node->left->right_version = + atomic_inc_return(&delim_key_version); + ); + } + + ZF_CLR(node, JNODE_LEFT_CONNECTED); + ZF_CLR(node, JNODE_RIGHT_CONNECTED); + ON_DEBUG(node->left = node->right = NULL; + node->left_version = atomic_inc_return(&delim_key_version); + node->right_version = atomic_inc_return(&delim_key_version);); + assert("nikita-3276", check_sibling_list(node)); +} + +/* disconnect node from sibling list */ +void sibling_list_drop(znode * node) +{ + znode *right; + znode *left; + + assert("nikita-2464", node != NULL); + assert("nikita-3277", check_sibling_list(node)); + + right = node->right; + if (right != NULL) { + assert("nikita-2465", znode_is_left_connected(right)); + right->left = NULL; + ON_DEBUG(right->left_version = + atomic_inc_return(&delim_key_version); + ); + } + left = node->left; + if (left != NULL) { + assert("zam-323", znode_is_right_connected(left)); + left->right = NULL; + ON_DEBUG(left->right_version = + atomic_inc_return(&delim_key_version); + ); + } + ZF_CLR(node, JNODE_LEFT_CONNECTED); + ZF_CLR(node, JNODE_RIGHT_CONNECTED); + ON_DEBUG(node->left = node->right = NULL; + node->left_version = atomic_inc_return(&delim_key_version); + node->right_version = atomic_inc_return(&delim_key_version);); +} + +/* Insert new node into sibling list. Regular balancing inserts new node + after (at right side) existing and locked node (@before), except one case + of adding new tree root node. @before should be NULL in that case. */ +void sibling_list_insert_nolock(znode * new, znode * before) +{ + assert("zam-334", new != NULL); + assert("nikita-3298", !znode_is_left_connected(new)); + assert("nikita-3299", !znode_is_right_connected(new)); + assert("nikita-3300", new->left == NULL); + assert("nikita-3301", new->right == NULL); + assert("nikita-3278", check_sibling_list(new)); + assert("nikita-3279", check_sibling_list(before)); + + if (before != NULL) { + assert("zam-333", znode_is_connected(before)); + new->right = before->right; + new->left = before; + ON_DEBUG(new->right_version = + atomic_inc_return(&delim_key_version); + new->left_version = + atomic_inc_return(&delim_key_version);); + if (before->right != NULL) { + before->right->left = new; + ON_DEBUG(before->right->left_version = + atomic_inc_return(&delim_key_version); + ); + } + before->right = new; + ON_DEBUG(before->right_version = + atomic_inc_return(&delim_key_version); + ); + } else { + new->right = NULL; + new->left = NULL; + ON_DEBUG(new->right_version = + atomic_inc_return(&delim_key_version); + new->left_version = + atomic_inc_return(&delim_key_version);); + } + ZF_SET(new, JNODE_LEFT_CONNECTED); + ZF_SET(new, JNODE_RIGHT_CONNECTED); + assert("nikita-3280", check_sibling_list(new)); + assert("nikita-3281", check_sibling_list(before)); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/tree_walk.h b/fs/reiser4/tree_walk.h new file mode 100644 index 000000000000..3d5f09f8cb54 --- /dev/null +++ b/fs/reiser4/tree_walk.h @@ -0,0 +1,125 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* definitions of reiser4 tree walk functions */ + +#ifndef __FS_REISER4_TREE_WALK_H__ +#define __FS_REISER4_TREE_WALK_H__ + +#include "debug.h" +#include "forward.h" + +/* establishes horizontal links between cached znodes */ +int connect_znode(coord_t * coord, znode * node); + +/* tree traversal functions (reiser4_get_parent(), reiser4_get_neighbor()) + have the following common arguments: + + return codes: + + @return : 0 - OK, + +ZAM-FIXME-HANS: wrong return code name. Change them all. + -ENOENT - neighbor is not in cache, what is detected by sibling + link absence. + + -E_NO_NEIGHBOR - we are sure that neighbor (or parent) node cannot be + found (because we are left-/right- most node of the + tree, for example). Also, this return code is for + reiser4_get_parent() when we see no parent link -- it + means that our node is root node. + + -E_DEADLOCK - deadlock detected (request from high-priority process + received), other error codes are conformed to + /usr/include/asm/errno.h . +*/ + +int +reiser4_get_parent_flags(lock_handle * result, znode * node, + znode_lock_mode mode, int flags); + +/* bits definition for reiser4_get_neighbor function `flags' arg. */ +typedef enum { + /* If sibling pointer is NULL, this flag allows get_neighbor() to try to + * find not allocated not connected neigbor by going though upper + * levels */ + GN_CAN_USE_UPPER_LEVELS = 0x1, + /* locking left neighbor instead of right one */ + GN_GO_LEFT = 0x2, + /* automatically load neighbor node content */ + GN_LOAD_NEIGHBOR = 0x4, + /* return -E_REPEAT if can't lock */ + GN_TRY_LOCK = 0x8, + /* used internally in tree_walk.c, causes renew_sibling to not + allocate neighbor znode, but only search for it in znode cache */ + GN_NO_ALLOC = 0x10, + /* do not go across atom boundaries */ + GN_SAME_ATOM = 0x20, + /* allow to lock not connected nodes */ + GN_ALLOW_NOT_CONNECTED = 0x40, + /* Avoid synchronous jload, instead, call jstartio() and return -E_REPEAT. */ + GN_ASYNC = 0x80 +} znode_get_neigbor_flags; + +/* A commonly used wrapper for reiser4_get_parent_flags(). */ +static inline int reiser4_get_parent(lock_handle * result, znode * node, + znode_lock_mode mode) +{ + return reiser4_get_parent_flags(result, node, mode, + GN_ALLOW_NOT_CONNECTED); +} + +int reiser4_get_neighbor(lock_handle * neighbor, znode * node, + znode_lock_mode lock_mode, int flags); + +/* there are wrappers for most common usages of reiser4_get_neighbor() */ +static inline int +reiser4_get_left_neighbor(lock_handle * result, znode * node, int lock_mode, + int flags) +{ + return reiser4_get_neighbor(result, node, lock_mode, + flags | GN_GO_LEFT); +} + +static inline int +reiser4_get_right_neighbor(lock_handle * result, znode * node, int lock_mode, + int flags) +{ + ON_DEBUG(check_lock_node_data(node)); + ON_DEBUG(check_lock_data()); + return reiser4_get_neighbor(result, node, lock_mode, + flags & (~GN_GO_LEFT)); +} + +extern void sibling_list_remove(znode * node); +extern void sibling_list_drop(znode * node); +extern void sibling_list_insert_nolock(znode * new, znode * before); +extern void link_left_and_right(znode * left, znode * right); + +/* Functions called by tree_walk() when tree_walk() ... */ +struct tree_walk_actor { + /* ... meets a formatted node, */ + int (*process_znode) (tap_t *, void *); + /* ... meets an extent, */ + int (*process_extent) (tap_t *, void *); + /* ... begins tree traversal or repeats it after -E_REPEAT was returned by + * node or extent processing functions. */ + int (*before) (void *); +}; + +#if REISER4_DEBUG +int check_sibling_list(znode * node); +#else +#define check_sibling_list(n) (1) +#endif + +#endif /* __FS_REISER4_TREE_WALK_H__ */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/txnmgr.c b/fs/reiser4/txnmgr.c new file mode 100644 index 000000000000..d0de1887edae --- /dev/null +++ b/fs/reiser4/txnmgr.c @@ -0,0 +1,3163 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Joshua MacDonald wrote the first draft of this code. */ + +/* ZAM-LONGTERM-FIXME-HANS: The locking in this file is badly designed, and a +filesystem scales only as well as its worst locking design. You need to +substantially restructure this code. Josh was not as experienced a programmer +as you. Particularly review how the locking style differs from what you did +for znodes usingt hi-lo priority locking, and present to me an opinion on +whether the differences are well founded. */ + +/* I cannot help but to disagree with the sentiment above. Locking of + * transaction manager is _not_ badly designed, and, at the very least, is not + * the scaling bottleneck. Scaling bottleneck is _exactly_ hi-lo priority + * locking on znodes, especially on the root node of the tree. --nikita, + * 2003.10.13 */ + +/* The txnmgr is a set of interfaces that keep track of atoms and transcrash handles. The + txnmgr processes capture_block requests and manages the relationship between jnodes and + atoms through the various stages of a transcrash, and it also oversees the fusion and + capture-on-copy processes. The main difficulty with this task is maintaining a + deadlock-free lock ordering between atoms and jnodes/handles. The reason for the + difficulty is that jnodes, handles, and atoms contain pointer circles, and the cycle + must be broken. The main requirement is that atom-fusion be deadlock free, so once you + hold the atom_lock you may then wait to acquire any jnode or handle lock. This implies + that any time you check the atom-pointer of a jnode or handle and then try to lock that + atom, you must use trylock() and possibly reverse the order. + + This code implements the design documented at: + + http://namesys.com/txn-doc.html + +ZAM-FIXME-HANS: update v4.html to contain all of the information present in the above (but updated), and then remove the +above document and reference the new. Be sure to provide some credit to Josh. I already have some writings on this +topic in v4.html, but they are lacking in details present in the above. Cure that. Remember to write for the bright 12 +year old --- define all technical terms used. + +*/ + +/* Thoughts on the external transaction interface: + + In the current code, a TRANSCRASH handle is created implicitly by reiser4_init_context() (which + creates state that lasts for the duration of a system call and is called at the start + of ReiserFS methods implementing VFS operations), and closed by reiser4_exit_context(), + occupying the scope of a single system call. We wish to give certain applications an + interface to begin and close (commit) transactions. Since our implementation of + transactions does not yet support isolation, allowing an application to open a + transaction implies trusting it to later close the transaction. Part of the + transaction interface will be aimed at enabling that trust, but the interface for + actually using transactions is fairly narrow. + + BEGIN_TRANSCRASH: Returns a transcrash identifier. It should be possible to translate + this identifier into a string that a shell-script could use, allowing you to start a + transaction by issuing a command. Once open, the transcrash should be set in the task + structure, and there should be options (I suppose) to allow it to be carried across + fork/exec. A transcrash has several options: + + - READ_FUSING or WRITE_FUSING: The default policy is for txn-capture to capture only + on writes (WRITE_FUSING) and allow "dirty reads". If the application wishes to + capture on reads as well, it should set READ_FUSING. + + - TIMEOUT: Since a non-isolated transcrash cannot be undone, every transcrash must + eventually close (or else the machine must crash). If the application dies an + unexpected death with an open transcrash, for example, or if it hangs for a long + duration, one solution (to avoid crashing the machine) is to simply close it anyway. + This is a dangerous option, but it is one way to solve the problem until isolated + transcrashes are available for untrusted applications. + + It seems to be what databases do, though it is unclear how one avoids a DoS attack + creating a vulnerability based on resource starvation. Guaranteeing that some + minimum amount of computational resources are made available would seem more correct + than guaranteeing some amount of time. When we again have someone to code the work, + this issue should be considered carefully. -Hans + + RESERVE_BLOCKS: A running transcrash should indicate to the transaction manager how + many dirty blocks it expects. The reserve_blocks interface should be called at a point + where it is safe for the application to fail, because the system may not be able to + grant the allocation and the application must be able to back-out. For this reason, + the number of reserve-blocks can also be passed as an argument to BEGIN_TRANSCRASH, but + the application may also wish to extend the allocation after beginning its transcrash. + + CLOSE_TRANSCRASH: The application closes the transcrash when it is finished making + modifications that require transaction protection. When isolated transactions are + supported the CLOSE operation is replaced by either COMMIT or ABORT. For example, if a + RESERVE_BLOCKS call fails for the application, it should "abort" by calling + CLOSE_TRANSCRASH, even though it really commits any changes that were made (which is + why, for safety, the application should call RESERVE_BLOCKS before making any changes). + + For actually implementing these out-of-system-call-scopped transcrashes, the + reiser4_context has a "txn_handle *trans" pointer that may be set to an open + transcrash. Currently there are no dynamically-allocated transcrashes, but there is a + "struct kmem_cache *_txnh_slab" created for that purpose in this file. +*/ + +/* Extending the other system call interfaces for future transaction features: + + Specialized applications may benefit from passing flags to the ordinary system call + interface such as read(), write(), or stat(). For example, the application specifies + WRITE_FUSING by default but wishes to add that a certain read() command should be + treated as READ_FUSING. But which read? Is it the directory-entry read, the stat-data + read, or the file-data read? These issues are straight-forward, but there are a lot of + them and adding the necessary flags-passing code will be tedious. + + When supporting isolated transactions, there is a corresponding READ_MODIFY_WRITE (RMW) + flag, which specifies that although it is a read operation being requested, a + write-lock should be taken. The reason is that read-locks are shared while write-locks + are exclusive, so taking a read-lock when a later-write is known in advance will often + leads to deadlock. If a reader knows it will write later, it should issue read + requests with the RMW flag set. +*/ + +/* + The znode/atom deadlock avoidance. + + FIXME(Zam): writing of this comment is in progress. + + The atom's special stage ASTAGE_CAPTURE_WAIT introduces a kind of atom's + long-term locking, which makes reiser4 locking scheme more complex. It had + deadlocks until we implement deadlock avoidance algorithms. That deadlocks + looked as the following: one stopped thread waits for a long-term lock on + znode, the thread who owns that lock waits when fusion with another atom will + be allowed. + + The source of the deadlocks is an optimization of not capturing index nodes + for read. Let's prove it. Suppose we have dumb node capturing scheme which + unconditionally captures each block before locking it. + + That scheme has no deadlocks. Let's begin with the thread which stage is + ASTAGE_CAPTURE_WAIT and it waits for a znode lock. The thread can't wait for + a capture because it's stage allows fusion with any atom except which are + being committed currently. A process of atom commit can't deadlock because + atom commit procedure does not acquire locks and does not fuse with other + atoms. Reiser4 does capturing right before going to sleep inside the + longtertm_lock_znode() function, it means the znode which we want to lock is + already captured and its atom is in ASTAGE_CAPTURE_WAIT stage. If we + continue the analysis we understand that no one process in the sequence may + waits atom fusion. Thereby there are no deadlocks of described kind. + + The capturing optimization makes the deadlocks possible. A thread can wait a + lock which owner did not captured that node. The lock owner's current atom + is not fused with the first atom and it does not get a ASTAGE_CAPTURE_WAIT + state. A deadlock is possible when that atom meets another one which is in + ASTAGE_CAPTURE_WAIT already. + + The deadlock avoidance scheme includes two algorithms: + + First algorithm is used when a thread captures a node which is locked but not + captured by another thread. Those nodes are marked MISSED_IN_CAPTURE at the + moment we skip their capturing. If such a node (marked MISSED_IN_CAPTURE) is + being captured by a thread with current atom is in ASTAGE_CAPTURE_WAIT, the + routine which forces all lock owners to join with current atom is executed. + + Second algorithm does not allow to skip capturing of already captured nodes. + + Both algorithms together prevent waiting a longterm lock without atom fusion + with atoms of all lock owners, which is a key thing for getting atom/znode + locking deadlocks. +*/ + +/* + * Transactions and mmap(2). + * + * 1. Transactions are not supported for accesses through mmap(2), because + * this would effectively amount to user-level transactions whose duration + * is beyond control of the kernel. + * + * 2. That said, we still want to preserve some decency with regard to + * mmap(2). During normal write(2) call, following sequence of events + * happens: + * + * 1. page is created; + * + * 2. jnode is created, dirtied and captured into current atom. + * + * 3. extent is inserted and modified. + * + * Steps (2) and (3) take place under long term lock on the twig node. + * + * When file is accessed through mmap(2) page is always created during + * page fault. + * After this (in reiser4_readpage_dispatch()->reiser4_readpage_extent()): + * + * 1. if access is made to non-hole page new jnode is created, (if + * necessary) + * + * 2. if access is made to the hole page, jnode is not created (XXX + * not clear why). + * + * Also, even if page is created by write page fault it is not marked + * dirty immediately by handle_mm_fault(). Probably this is to avoid races + * with page write-out. + * + * Dirty bit installed by hardware is only transferred to the struct page + * later, when page is unmapped (in zap_pte_range(), or + * try_to_unmap_one()). + * + * So, with mmap(2) we have to handle following irksome situations: + * + * 1. there exists modified page (clean or dirty) without jnode + * + * 2. there exists modified page (clean or dirty) with clean jnode + * + * 3. clean page which is a part of atom can be transparently modified + * at any moment through mapping without becoming dirty. + * + * (1) and (2) can lead to the out-of-memory situation: ->writepage() + * doesn't know what to do with such pages and ->sync_sb()/->writepages() + * don't see them, because these methods operate on atoms. + * + * (3) can lead to the loss of data: suppose we have dirty page with dirty + * captured jnode captured by some atom. As part of early flush (for + * example) page was written out. Dirty bit was cleared on both page and + * jnode. After this page is modified through mapping, but kernel doesn't + * notice and just discards page and jnode as part of commit. (XXX + * actually it doesn't, because to reclaim page ->releasepage() has to be + * called and before this dirty bit will be transferred to the struct + * page). + * + */ + +#include "debug.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "wander.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "page_cache.h" +#include "reiser4.h" +#include "vfs_ops.h" +#include "inode.h" +#include "flush.h" +#include "discard.h" + +#include +#include +#include +#include +#include +#include +#include +#include /* for totalram_pages */ + +static void atom_free(txn_atom * atom); + +static int commit_txnh(txn_handle * txnh); + +static void wakeup_atom_waitfor_list(txn_atom * atom); +static void wakeup_atom_waiting_list(txn_atom * atom); + +static void capture_assign_txnh_nolock(txn_atom * atom, txn_handle * txnh); + +static void capture_assign_block_nolock(txn_atom * atom, jnode * node); + +static void fuse_not_fused_lock_owners(txn_handle * txnh, znode * node); + +static int capture_init_fusion(jnode * node, txn_handle * txnh, + txn_capture mode); + +static int capture_fuse_wait(txn_handle *, txn_atom *, txn_atom *, txn_capture); + +static void capture_fuse_into(txn_atom * small, txn_atom * large); + +void reiser4_invalidate_list(struct list_head *); + +/* GENERIC STRUCTURES */ + +typedef struct _txn_wait_links txn_wait_links; + +struct _txn_wait_links { + lock_stack *_lock_stack; + struct list_head _fwaitfor_link; + struct list_head _fwaiting_link; + int (*waitfor_cb) (txn_atom * atom, struct _txn_wait_links * wlinks); + int (*waiting_cb) (txn_atom * atom, struct _txn_wait_links * wlinks); +}; + +/* FIXME: In theory, we should be using the slab cache init & destructor + methods instead of, e.g., jnode_init, etc. */ +static struct kmem_cache *_atom_slab = NULL; +/* this is for user-visible, cross system-call transactions. */ +static struct kmem_cache *_txnh_slab = NULL; + +/** + * init_txnmgr_static - create transaction manager slab caches + * + * Initializes caches of txn-atoms and txn_handle. It is part of reiser4 module + * initialization. + */ +int init_txnmgr_static(void) +{ + assert("jmacd-600", _atom_slab == NULL); + assert("jmacd-601", _txnh_slab == NULL); + + ON_DEBUG(atomic_set(&flush_cnt, 0)); + + _atom_slab = kmem_cache_create("txn_atom", sizeof(txn_atom), 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (_atom_slab == NULL) + return RETERR(-ENOMEM); + + _txnh_slab = kmem_cache_create("txn_handle", sizeof(txn_handle), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (_txnh_slab == NULL) { + kmem_cache_destroy(_atom_slab); + _atom_slab = NULL; + return RETERR(-ENOMEM); + } + + return 0; +} + +/** + * done_txnmgr_static - delete txn_atom and txn_handle caches + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_txnmgr_static(void) +{ + destroy_reiser4_cache(&_atom_slab); + destroy_reiser4_cache(&_txnh_slab); +} + +/** + * init_txnmgr - initialize a new transaction manager + * @mgr: pointer to transaction manager embedded in reiser4 super block + * + * This is called on mount. Makes necessary initializations. + */ +void reiser4_init_txnmgr(txn_mgr *mgr) +{ + assert("umka-169", mgr != NULL); + + mgr->atom_count = 0; + mgr->id_count = 1; + INIT_LIST_HEAD(&mgr->atoms_list); + spin_lock_init(&mgr->tmgr_lock); + mutex_init(&mgr->commit_mutex); +} + +/** + * reiser4_done_txnmgr - stop transaction manager + * @mgr: pointer to transaction manager embedded in reiser4 super block + * + * This is called on umount. Does sanity checks. + */ +void reiser4_done_txnmgr(txn_mgr *mgr) +{ + assert("umka-170", mgr != NULL); + assert("umka-1701", list_empty_careful(&mgr->atoms_list)); + assert("umka-1702", mgr->atom_count == 0); +} + +/* Initialize a transaction handle. */ +/* Audited by: umka (2002.06.13) */ +static void txnh_init(txn_handle * txnh, txn_mode mode) +{ + assert("umka-171", txnh != NULL); + + txnh->mode = mode; + txnh->atom = NULL; + reiser4_ctx_gfp_mask_set(); + txnh->flags = 0; + spin_lock_init(&txnh->hlock); + INIT_LIST_HEAD(&txnh->txnh_link); +} + +#if REISER4_DEBUG +/* Check if a transaction handle is clean. */ +static int txnh_isclean(txn_handle * txnh) +{ + assert("umka-172", txnh != NULL); + return txnh->atom == NULL && + LOCK_CNT_NIL(spin_locked_txnh); +} +#endif + +/* Initialize an atom. */ +static void atom_init(txn_atom * atom) +{ + int level; + + assert("umka-173", atom != NULL); + + memset(atom, 0, sizeof(txn_atom)); + + atom->stage = ASTAGE_FREE; + atom->start_time = jiffies; + + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) + INIT_LIST_HEAD(ATOM_DIRTY_LIST(atom, level)); + + INIT_LIST_HEAD(ATOM_CLEAN_LIST(atom)); + INIT_LIST_HEAD(ATOM_OVRWR_LIST(atom)); + INIT_LIST_HEAD(ATOM_WB_LIST(atom)); + INIT_LIST_HEAD(&atom->inodes); + spin_lock_init(&(atom->alock)); + /* list of transaction handles */ + INIT_LIST_HEAD(&atom->txnh_list); + /* link to transaction manager's list of atoms */ + INIT_LIST_HEAD(&atom->atom_link); + INIT_LIST_HEAD(&atom->fwaitfor_list); + INIT_LIST_HEAD(&atom->fwaiting_list); + blocknr_set_init(&atom->wandered_map); + + atom_dset_init(atom); + + init_atom_fq_parts(atom); +} + +#if REISER4_DEBUG +/* Check if an atom is clean. */ +static int atom_isclean(txn_atom * atom) +{ + int level; + + assert("umka-174", atom != NULL); + + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + if (!list_empty_careful(ATOM_DIRTY_LIST(atom, level))) { + return 0; + } + } + + return atom->stage == ASTAGE_FREE && + atom->txnh_count == 0 && + atom->capture_count == 0 && + atomic_read(&atom->refcount) == 0 && + (&atom->atom_link == atom->atom_link.next && + &atom->atom_link == atom->atom_link.prev) && + list_empty_careful(&atom->txnh_list) && + list_empty_careful(ATOM_CLEAN_LIST(atom)) && + list_empty_careful(ATOM_OVRWR_LIST(atom)) && + list_empty_careful(ATOM_WB_LIST(atom)) && + list_empty_careful(&atom->fwaitfor_list) && + list_empty_careful(&atom->fwaiting_list) && + atom_fq_parts_are_clean(atom); +} +#endif + +/* Begin a transaction in this context. Currently this uses the reiser4_context's + trans_in_ctx, which means that transaction handles are stack-allocated. Eventually + this will be extended to allow transaction handles to span several contexts. */ +/* Audited by: umka (2002.06.13) */ +void reiser4_txn_begin(reiser4_context * context) +{ + assert("jmacd-544", context->trans == NULL); + + context->trans = &context->trans_in_ctx; + + /* FIXME_LATER_JMACD Currently there's no way to begin a TXN_READ_FUSING + transcrash. Default should be TXN_WRITE_FUSING. Also, the _trans variable is + stack allocated right now, but we would like to allow for dynamically allocated + transcrashes that span multiple system calls. + */ + txnh_init(context->trans, TXN_WRITE_FUSING); +} + +/* Finish a transaction handle context. */ +int reiser4_txn_end(reiser4_context * context) +{ + long ret = 0; + txn_handle *txnh; + + assert("umka-283", context != NULL); + assert("nikita-3012", reiser4_schedulable()); + assert("vs-24", context == get_current_context()); + assert("nikita-2967", lock_stack_isclean(get_current_lock_stack())); + + txnh = context->trans; + if (txnh != NULL) { + if (txnh->atom != NULL) + ret = commit_txnh(txnh); + assert("jmacd-633", txnh_isclean(txnh)); + context->trans = NULL; + } + return ret; +} + +void reiser4_txn_restart(reiser4_context * context) +{ + reiser4_txn_end(context); + reiser4_preempt_point(); + reiser4_txn_begin(context); +} + +void reiser4_txn_restart_current(void) +{ + reiser4_txn_restart(get_current_context()); +} + +/* TXN_ATOM */ + +/* Get the atom belonging to a txnh, which is not locked. Return txnh locked. Locks atom, if atom + is not NULL. This performs the necessary spin_trylock to break the lock-ordering cycle. May + return NULL. */ +static txn_atom *txnh_get_atom(txn_handle * txnh) +{ + txn_atom *atom; + + assert("umka-180", txnh != NULL); + assert_spin_not_locked(&(txnh->hlock)); + + while (1) { + spin_lock_txnh(txnh); + atom = txnh->atom; + + if (atom == NULL) + break; + + if (spin_trylock_atom(atom)) + break; + + atomic_inc(&atom->refcount); + + spin_unlock_txnh(txnh); + spin_lock_atom(atom); + spin_lock_txnh(txnh); + + if (txnh->atom == atom) { + atomic_dec(&atom->refcount); + break; + } + + spin_unlock_txnh(txnh); + atom_dec_and_unlock(atom); + } + + return atom; +} + +/* Get the current atom and spinlock it if current atom present. May return NULL */ +txn_atom *get_current_atom_locked_nocheck(void) +{ + reiser4_context *cx; + txn_atom *atom; + txn_handle *txnh; + + cx = get_current_context(); + assert("zam-437", cx != NULL); + + txnh = cx->trans; + assert("zam-435", txnh != NULL); + + atom = txnh_get_atom(txnh); + + spin_unlock_txnh(txnh); + return atom; +} + +/* Get the atom belonging to a jnode, which is initially locked. Return with + both jnode and atom locked. This performs the necessary spin_trylock to + break the lock-ordering cycle. Assumes the jnode is already locked, and + returns NULL if atom is not set. */ +txn_atom *jnode_get_atom(jnode * node) +{ + txn_atom *atom; + + assert("umka-181", node != NULL); + + while (1) { + assert_spin_locked(&(node->guard)); + + atom = node->atom; + /* node is not in any atom */ + if (atom == NULL) + break; + + /* If atom is not locked, grab the lock and return */ + if (spin_trylock_atom(atom)) + break; + + /* At least one jnode belongs to this atom it guarantees that + * atom->refcount > 0, we can safely increment refcount. */ + atomic_inc(&atom->refcount); + spin_unlock_jnode(node); + + /* re-acquire spin locks in the right order */ + spin_lock_atom(atom); + spin_lock_jnode(node); + + /* check if node still points to the same atom. */ + if (node->atom == atom) { + atomic_dec(&atom->refcount); + break; + } + + /* releasing of atom lock and reference requires not holding + * locks on jnodes. */ + spin_unlock_jnode(node); + + /* We do not sure that this atom has extra references except our + * one, so we should call proper function which may free atom if + * last reference is released. */ + atom_dec_and_unlock(atom); + + /* lock jnode again for getting valid node->atom pointer + * value. */ + spin_lock_jnode(node); + } + + return atom; +} + +/* Returns true if @node is dirty and part of the same atom as one of its neighbors. Used + by flush code to indicate whether the next node (in some direction) is suitable for + flushing. */ +int +same_slum_check(jnode * node, jnode * check, int alloc_check, int alloc_value) +{ + int compat; + txn_atom *atom; + + assert("umka-182", node != NULL); + assert("umka-183", check != NULL); + + /* Not sure what this function is supposed to do if supplied with @check that is + neither formatted nor unformatted (bitmap or so). */ + assert("nikita-2373", jnode_is_znode(check) + || jnode_is_unformatted(check)); + + /* Need a lock on CHECK to get its atom and to check various state bits. + Don't need a lock on NODE once we get the atom lock. */ + /* It is not enough to lock two nodes and check (node->atom == + check->atom) because atom could be locked and being fused at that + moment, jnodes of the atom of that state (being fused) can point to + different objects, but the atom is the same. */ + spin_lock_jnode(check); + + atom = jnode_get_atom(check); + + if (atom == NULL) { + compat = 0; + } else { + compat = (node->atom == atom && JF_ISSET(check, JNODE_DIRTY)); + + if (compat && jnode_is_znode(check)) { + compat &= znode_is_connected(JZNODE(check)); + } + + if (compat && alloc_check) { + compat &= (alloc_value == jnode_is_flushprepped(check)); + } + + spin_unlock_atom(atom); + } + + spin_unlock_jnode(check); + + return compat; +} + +/* Decrement the atom's reference count and if it falls to zero, free it. */ +void atom_dec_and_unlock(txn_atom * atom) +{ + txn_mgr *mgr = &get_super_private(reiser4_get_current_sb())->tmgr; + + assert("umka-186", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("zam-1039", atomic_read(&atom->refcount) > 0); + + if (atomic_dec_and_test(&atom->refcount)) { + /* take txnmgr lock and atom lock in proper order. */ + if (!spin_trylock_txnmgr(mgr)) { + /* This atom should exist after we re-acquire its + * spinlock, so we increment its reference counter. */ + atomic_inc(&atom->refcount); + spin_unlock_atom(atom); + spin_lock_txnmgr(mgr); + spin_lock_atom(atom); + + if (!atomic_dec_and_test(&atom->refcount)) { + spin_unlock_atom(atom); + spin_unlock_txnmgr(mgr); + return; + } + } + assert_spin_locked(&(mgr->tmgr_lock)); + atom_free(atom); + spin_unlock_txnmgr(mgr); + } else + spin_unlock_atom(atom); +} + +/* Create new atom and connect it to given transaction handle. This adds the + atom to the transaction manager's list and sets its reference count to 1, an + artificial reference which is kept until it commits. We play strange games + to avoid allocation under jnode & txnh spinlocks.*/ + +static int atom_begin_and_assign_to_txnh(txn_atom ** atom_alloc, txn_handle * txnh) +{ + txn_atom *atom; + txn_mgr *mgr; + + if (REISER4_DEBUG && rofs_tree(current_tree)) { + warning("nikita-3366", "Creating atom on rofs"); + dump_stack(); + } + + if (*atom_alloc == NULL) { + (*atom_alloc) = kmem_cache_alloc(_atom_slab, + reiser4_ctx_gfp_mask_get()); + + if (*atom_alloc == NULL) + return RETERR(-ENOMEM); + } + + /* and, also, txnmgr spin lock should be taken before jnode and txnh + locks. */ + mgr = &get_super_private(reiser4_get_current_sb())->tmgr; + spin_lock_txnmgr(mgr); + spin_lock_txnh(txnh); + + /* Check whether new atom still needed */ + if (txnh->atom != NULL) { + /* NOTE-NIKITA probably it is rather better to free + * atom_alloc here than thread it up to reiser4_try_capture() */ + + spin_unlock_txnh(txnh); + spin_unlock_txnmgr(mgr); + + return -E_REPEAT; + } + + atom = *atom_alloc; + *atom_alloc = NULL; + + atom_init(atom); + + assert("jmacd-17", atom_isclean(atom)); + + /* + * lock ordering is broken here. It is ok, as long as @atom is new + * and inaccessible for others. We can't use spin_lock_atom or + * spin_lock(&atom->alock) because they care about locking + * dependencies. spin_trylock_lock doesn't. + */ + check_me("", spin_trylock_atom(atom)); + + /* add atom to the end of transaction manager's list of atoms */ + list_add_tail(&atom->atom_link, &mgr->atoms_list); + atom->atom_id = mgr->id_count++; + mgr->atom_count += 1; + + /* Release txnmgr lock */ + spin_unlock_txnmgr(mgr); + + /* One reference until it commits. */ + atomic_inc(&atom->refcount); + atom->stage = ASTAGE_CAPTURE_FUSE; + atom->super = reiser4_get_current_sb(); + capture_assign_txnh_nolock(atom, txnh); + + spin_unlock_atom(atom); + spin_unlock_txnh(txnh); + + return -E_REPEAT; +} + +/* Return true if an atom is currently "open". */ +static int atom_isopen(const txn_atom * atom) +{ + assert("umka-185", atom != NULL); + + return atom->stage > 0 && atom->stage < ASTAGE_PRE_COMMIT; +} + +/* Return the number of pointers to this atom that must be updated during fusion. This + approximates the amount of work to be done. Fusion chooses the atom with fewer + pointers to fuse into the atom with more pointers. */ +static int atom_pointer_count(const txn_atom * atom) +{ + assert("umka-187", atom != NULL); + + /* This is a measure of the amount of work needed to fuse this atom + * into another. */ + return atom->txnh_count + atom->capture_count; +} + +/* Called holding the atom lock, this removes the atom from the transaction manager list + and frees it. */ +static void atom_free(txn_atom * atom) +{ + txn_mgr *mgr = &get_super_private(reiser4_get_current_sb())->tmgr; + + assert("umka-188", atom != NULL); + assert_spin_locked(&(atom->alock)); + + /* Remove from the txn_mgr's atom list */ + assert_spin_locked(&(mgr->tmgr_lock)); + mgr->atom_count -= 1; + list_del_init(&atom->atom_link); + + /* Clean the atom */ + assert("jmacd-16", + (atom->stage == ASTAGE_INVALID || atom->stage == ASTAGE_DONE)); + atom->stage = ASTAGE_FREE; + + blocknr_set_destroy(&atom->wandered_map); + + atom_dset_destroy(atom); + + assert("jmacd-16", atom_isclean(atom)); + + spin_unlock_atom(atom); + + kmem_cache_free(_atom_slab, atom); +} + +static int atom_is_dotard(const txn_atom * atom) +{ + return time_after(jiffies, atom->start_time + + get_current_super_private()->tmgr.atom_max_age); +} + +static int atom_can_be_committed(txn_atom * atom) +{ + assert_spin_locked(&(atom->alock)); + assert("zam-885", atom->txnh_count > atom->nr_waiters); + return atom->txnh_count == atom->nr_waiters + 1; +} + +/* Return true if an atom should commit now. This is determined by aging, atom + size or atom flags. */ +static int atom_should_commit(const txn_atom * atom) +{ + assert("umka-189", atom != NULL); + return + (atom->flags & ATOM_FORCE_COMMIT) || + ((unsigned)atom_pointer_count(atom) > + get_current_super_private()->tmgr.atom_max_size) + || atom_is_dotard(atom); +} + +/* return 1 if current atom exists and requires commit. */ +int current_atom_should_commit(void) +{ + txn_atom *atom; + int result = 0; + + atom = get_current_atom_locked_nocheck(); + if (atom) { + result = atom_should_commit(atom); + spin_unlock_atom(atom); + } + return result; +} + +static int atom_should_commit_asap(const txn_atom * atom) +{ + unsigned int captured; + unsigned int pinnedpages; + + assert("nikita-3309", atom != NULL); + + captured = (unsigned)atom->capture_count; + pinnedpages = (captured >> PAGE_SHIFT) * sizeof(znode); + + return (pinnedpages > (totalram_pages >> 3)) || (atom->flushed > 100); +} + +static jnode *find_first_dirty_in_list(struct list_head *head, int flags) +{ + jnode *first_dirty; + + list_for_each_entry(first_dirty, head, capture_link) { + if (!(flags & JNODE_FLUSH_COMMIT)) { + /* + * skip jnodes which "heard banshee" or having active + * I/O + */ + if (JF_ISSET(first_dirty, JNODE_HEARD_BANSHEE) || + JF_ISSET(first_dirty, JNODE_WRITEBACK)) + continue; + } + return first_dirty; + } + return NULL; +} + +/* Get first dirty node from the atom's dirty_nodes[n] lists; return NULL if atom has no dirty + nodes on atom's lists */ +jnode *find_first_dirty_jnode(txn_atom * atom, int flags) +{ + jnode *first_dirty; + tree_level level; + + assert_spin_locked(&(atom->alock)); + + /* The flush starts from LEAF_LEVEL (=1). */ + for (level = 1; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + if (list_empty_careful(ATOM_DIRTY_LIST(atom, level))) + continue; + + first_dirty = + find_first_dirty_in_list(ATOM_DIRTY_LIST(atom, level), + flags); + if (first_dirty) + return first_dirty; + } + + /* znode-above-root is on the list #0. */ + return find_first_dirty_in_list(ATOM_DIRTY_LIST(atom, 0), flags); +} + +static void dispatch_wb_list(txn_atom * atom, flush_queue_t * fq) +{ + jnode *cur; + + assert("zam-905", atom_is_protected(atom)); + + cur = list_entry(ATOM_WB_LIST(atom)->next, jnode, capture_link); + while (ATOM_WB_LIST(atom) != &cur->capture_link) { + jnode *next = list_entry(cur->capture_link.next, jnode, capture_link); + + spin_lock_jnode(cur); + if (!JF_ISSET(cur, JNODE_WRITEBACK)) { + if (JF_ISSET(cur, JNODE_DIRTY)) { + queue_jnode(fq, cur); + } else { + /* move jnode to atom's clean list */ + list_move_tail(&cur->capture_link, + ATOM_CLEAN_LIST(atom)); + } + } + spin_unlock_jnode(cur); + + cur = next; + } +} + +/* Scan current atom->writeback_nodes list, re-submit dirty and !writeback + * jnodes to disk. */ +static int submit_wb_list(void) +{ + int ret; + flush_queue_t *fq; + + fq = get_fq_for_current_atom(); + if (IS_ERR(fq)) + return PTR_ERR(fq); + + dispatch_wb_list(fq->atom, fq); + spin_unlock_atom(fq->atom); + + ret = reiser4_write_fq(fq, NULL, 1); + reiser4_fq_put(fq); + + return ret; +} + +/* Wait completion of all writes, re-submit atom writeback list if needed. */ +static int current_atom_complete_writes(void) +{ + int ret; + + /* Each jnode from that list was modified and dirtied when it had i/o + * request running already. After i/o completion we have to resubmit + * them to disk again.*/ + ret = submit_wb_list(); + if (ret < 0) + return ret; + + /* Wait all i/o completion */ + ret = current_atom_finish_all_fq(); + if (ret) + return ret; + + /* Scan wb list again; all i/o should be completed, we re-submit dirty + * nodes to disk */ + ret = submit_wb_list(); + if (ret < 0) + return ret; + + /* Wait all nodes we just submitted */ + return current_atom_finish_all_fq(); +} + +#if REISER4_DEBUG + +static void reiser4_info_atom(const char *prefix, const txn_atom * atom) +{ + if (atom == NULL) { + printk("%s: no atom\n", prefix); + return; + } + + printk("%s: refcount: %i id: %i flags: %x txnh_count: %i" + " capture_count: %i stage: %x start: %lu, flushed: %i\n", prefix, + atomic_read(&atom->refcount), atom->atom_id, atom->flags, + atom->txnh_count, atom->capture_count, atom->stage, + atom->start_time, atom->flushed); +} + +#else /* REISER4_DEBUG */ + +static inline void reiser4_info_atom(const char *prefix, const txn_atom * atom) {} + +#endif /* REISER4_DEBUG */ + +#define TOOMANYFLUSHES (1 << 13) + +/* Called with the atom locked and no open "active" transaction handlers except + ours, this function calls flush_current_atom() until all dirty nodes are + processed. Then it initiates commit processing. + + Called by the single remaining open "active" txnh, which is closing. Other + open txnhs belong to processes which wait atom commit in commit_txnh() + routine. They are counted as "waiters" in atom->nr_waiters. Therefore as + long as we hold the atom lock none of the jnodes can be captured and/or + locked. + + Return value is an error code if commit fails. +*/ +static int commit_current_atom(long *nr_submitted, txn_atom ** atom) +{ + reiser4_super_info_data *sbinfo = get_current_super_private(); + long ret = 0; + /* how many times jnode_flush() was called as a part of attempt to + * commit this atom. */ + int flushiters; + + assert("zam-888", atom != NULL && *atom != NULL); + assert_spin_locked(&((*atom)->alock)); + assert("zam-887", get_current_context()->trans->atom == *atom); + assert("jmacd-151", atom_isopen(*atom)); + + assert("nikita-3184", + get_current_super_private()->delete_mutex_owner != current); + + for (flushiters = 0;; ++flushiters) { + ret = + flush_current_atom(JNODE_FLUSH_WRITE_BLOCKS | + JNODE_FLUSH_COMMIT, + LONG_MAX /* nr_to_write */ , + nr_submitted, atom, NULL); + if (ret != -E_REPEAT) + break; + + /* if atom's dirty list contains one znode which is + HEARD_BANSHEE and is locked we have to allow lock owner to + continue and uncapture that znode */ + reiser4_preempt_point(); + + *atom = get_current_atom_locked(); + if (flushiters > TOOMANYFLUSHES && IS_POW(flushiters)) { + warning("nikita-3176", + "Flushing like mad: %i", flushiters); + reiser4_info_atom("atom", *atom); + DEBUGON(flushiters > (1 << 20)); + } + } + + if (ret) + return ret; + + assert_spin_locked(&((*atom)->alock)); + + if (!atom_can_be_committed(*atom)) { + spin_unlock_atom(*atom); + return RETERR(-E_REPEAT); + } + + if ((*atom)->capture_count == 0) + goto done; + + /* Up to this point we have been flushing and after flush is called we + return -E_REPEAT. Now we can commit. We cannot return -E_REPEAT + at this point, commit should be successful. */ + reiser4_atom_set_stage(*atom, ASTAGE_PRE_COMMIT); + ON_DEBUG(((*atom)->committer = current)); + spin_unlock_atom(*atom); + + ret = current_atom_complete_writes(); + if (ret) + return ret; + + assert("zam-906", list_empty(ATOM_WB_LIST(*atom))); + + /* isolate critical code path which should be executed by only one + * thread using tmgr mutex */ + mutex_lock(&sbinfo->tmgr.commit_mutex); + + ret = reiser4_write_logs(nr_submitted); + if (ret < 0) + reiser4_panic("zam-597", "write log failed (%ld)\n", ret); + + /* The atom->ovrwr_nodes list is processed under commit mutex held + because of bitmap nodes which are captured by special way in + reiser4_pre_commit_hook_bitmap(), that way does not include + capture_fuse_wait() as a capturing of other nodes does -- the commit + mutex is used for transaction isolation instead. */ + reiser4_invalidate_list(ATOM_OVRWR_LIST(*atom)); + mutex_unlock(&sbinfo->tmgr.commit_mutex); + + reiser4_invalidate_list(ATOM_CLEAN_LIST(*atom)); + reiser4_invalidate_list(ATOM_WB_LIST(*atom)); + assert("zam-927", list_empty(&(*atom)->inodes)); + + spin_lock_atom(*atom); + done: + reiser4_atom_set_stage(*atom, ASTAGE_DONE); + ON_DEBUG((*atom)->committer = NULL); + + /* Atom's state changes, so wake up everybody waiting for this + event. */ + wakeup_atom_waiting_list(*atom); + + /* Decrement the "until commit" reference, at least one txnh (the caller) is + still open. */ + atomic_dec(&(*atom)->refcount); + + assert("jmacd-1070", atomic_read(&(*atom)->refcount) > 0); + assert("jmacd-1062", (*atom)->capture_count == 0); + BUG_ON((*atom)->capture_count != 0); + assert_spin_locked(&((*atom)->alock)); + + return ret; +} + +/* TXN_TXNH */ + +/** + * force_commit_atom - commit current atom and wait commit completion + * @txnh: + * + * Commits current atom and wait commit completion; current atom and @txnh have + * to be spinlocked before call, this function unlocks them on exit. + */ +int force_commit_atom(txn_handle *txnh) +{ + txn_atom *atom; + + assert("zam-837", txnh != NULL); + assert_spin_locked(&(txnh->hlock)); + assert("nikita-2966", lock_stack_isclean(get_current_lock_stack())); + + atom = txnh->atom; + + assert("zam-834", atom != NULL); + assert_spin_locked(&(atom->alock)); + + /* + * Set flags for atom and txnh: forcing atom commit and waiting for + * commit completion + */ + txnh->flags |= TXNH_WAIT_COMMIT; + atom->flags |= ATOM_FORCE_COMMIT; + + spin_unlock_txnh(txnh); + spin_unlock_atom(atom); + + /* commit is here */ + reiser4_txn_restart_current(); + return 0; +} + +/* Called to force commit of any outstanding atoms. @commit_all_atoms controls + * should we commit all atoms including new ones which are created after this + * functions is called. */ +int txnmgr_force_commit_all(struct super_block *super, int commit_all_atoms) +{ + int ret; + txn_atom *atom; + txn_mgr *mgr; + txn_handle *txnh; + unsigned long start_time = jiffies; + reiser4_context *ctx = get_current_context(); + + assert("nikita-2965", lock_stack_isclean(get_current_lock_stack())); + assert("nikita-3058", reiser4_commit_check_locks()); + + reiser4_txn_restart_current(); + + mgr = &get_super_private(super)->tmgr; + + txnh = ctx->trans; + + again: + + spin_lock_txnmgr(mgr); + + list_for_each_entry(atom, &mgr->atoms_list, atom_link) { + spin_lock_atom(atom); + + /* Commit any atom which can be committed. If @commit_new_atoms + * is not set we commit only atoms which were created before + * this call is started. */ + if (commit_all_atoms + || time_before_eq(atom->start_time, start_time)) { + if (atom->stage <= ASTAGE_POST_COMMIT) { + spin_unlock_txnmgr(mgr); + + if (atom->stage < ASTAGE_PRE_COMMIT) { + spin_lock_txnh(txnh); + /* Add force-context txnh */ + capture_assign_txnh_nolock(atom, txnh); + ret = force_commit_atom(txnh); + if (ret) + return ret; + } else + /* wait atom commit */ + reiser4_atom_wait_event(atom); + + goto again; + } + } + + spin_unlock_atom(atom); + } + +#if REISER4_DEBUG + if (commit_all_atoms) { + reiser4_super_info_data *sbinfo = get_super_private(super); + spin_lock_reiser4_super(sbinfo); + assert("zam-813", + sbinfo->blocks_fake_allocated_unformatted == 0); + assert("zam-812", sbinfo->blocks_fake_allocated == 0); + spin_unlock_reiser4_super(sbinfo); + } +#endif + + spin_unlock_txnmgr(mgr); + + return 0; +} + +/* check whether commit_some_atoms() can commit @atom. Locking is up to the + * caller */ +static int atom_is_committable(txn_atom * atom) +{ + return + atom->stage < ASTAGE_PRE_COMMIT && + atom->txnh_count == atom->nr_waiters && atom_should_commit(atom); +} + +/* called periodically from ktxnmgrd to commit old atoms. Releases ktxnmgrd spin + * lock at exit */ +int commit_some_atoms(txn_mgr * mgr) +{ + int ret = 0; + txn_atom *atom; + txn_handle *txnh; + reiser4_context *ctx; + struct list_head *pos, *tmp; + + ctx = get_current_context(); + assert("nikita-2444", ctx != NULL); + + txnh = ctx->trans; + spin_lock_txnmgr(mgr); + + /* + * this is to avoid gcc complain that atom might be used + * uninitialized + */ + atom = NULL; + + /* look for atom to commit */ + list_for_each_safe(pos, tmp, &mgr->atoms_list) { + atom = list_entry(pos, txn_atom, atom_link); + /* + * first test without taking atom spin lock, whether it is + * eligible for committing at all + */ + if (atom_is_committable(atom)) { + /* now, take spin lock and re-check */ + spin_lock_atom(atom); + if (atom_is_committable(atom)) + break; + spin_unlock_atom(atom); + } + } + + ret = (&mgr->atoms_list == pos); + spin_unlock_txnmgr(mgr); + + if (ret) { + /* nothing found */ + spin_unlock(&mgr->daemon->guard); + return 0; + } + + spin_lock_txnh(txnh); + + BUG_ON(atom == NULL); + /* Set the atom to force committing */ + atom->flags |= ATOM_FORCE_COMMIT; + + /* Add force-context txnh */ + capture_assign_txnh_nolock(atom, txnh); + + spin_unlock_txnh(txnh); + spin_unlock_atom(atom); + + /* we are about to release daemon spin lock, notify daemon it + has to rescan atoms */ + mgr->daemon->rescan = 1; + spin_unlock(&mgr->daemon->guard); + reiser4_txn_restart_current(); + return 0; +} + +static int txn_try_to_fuse_small_atom(txn_mgr * tmgr, txn_atom * atom) +{ + int atom_stage; + txn_atom *atom_2; + int repeat; + + assert("zam-1051", atom->stage < ASTAGE_PRE_COMMIT); + + atom_stage = atom->stage; + repeat = 0; + + if (!spin_trylock_txnmgr(tmgr)) { + atomic_inc(&atom->refcount); + spin_unlock_atom(atom); + spin_lock_txnmgr(tmgr); + spin_lock_atom(atom); + repeat = 1; + if (atom->stage != atom_stage) { + spin_unlock_txnmgr(tmgr); + atom_dec_and_unlock(atom); + return -E_REPEAT; + } + atomic_dec(&atom->refcount); + } + + list_for_each_entry(atom_2, &tmgr->atoms_list, atom_link) { + if (atom == atom_2) + continue; + /* + * if trylock does not succeed we just do not fuse with that + * atom. + */ + if (spin_trylock_atom(atom_2)) { + if (atom_2->stage < ASTAGE_PRE_COMMIT) { + spin_unlock_txnmgr(tmgr); + capture_fuse_into(atom_2, atom); + /* all locks are lost we can only repeat here */ + return -E_REPEAT; + } + spin_unlock_atom(atom_2); + } + } + atom->flags |= ATOM_CANCEL_FUSION; + spin_unlock_txnmgr(tmgr); + if (repeat) { + spin_unlock_atom(atom); + return -E_REPEAT; + } + return 0; +} + +/* Calls jnode_flush for current atom if it exists; if not, just take another + atom and call jnode_flush() for him. If current transaction handle has + already assigned atom (current atom) we have to close current transaction + prior to switch to another atom or do something with current atom. This + code tries to flush current atom. + + flush_some_atom() is called as part of memory clearing process. It is + invoked from balance_dirty_pages(), pdflushd, and entd. + + If we can flush no nodes, atom is committed, because this frees memory. + + If atom is too large or too old it is committed also. +*/ +int +flush_some_atom(jnode * start, long *nr_submitted, const struct writeback_control *wbc, + int flags) +{ + reiser4_context *ctx = get_current_context(); + txn_mgr *tmgr = &get_super_private(ctx->super)->tmgr; + txn_handle *txnh = ctx->trans; + txn_atom *atom; + int ret; + + BUG_ON(wbc->nr_to_write == 0); + BUG_ON(*nr_submitted != 0); + assert("zam-1042", txnh != NULL); +repeat: + if (txnh->atom == NULL) { + /* current atom is not available, take first from txnmgr */ + spin_lock_txnmgr(tmgr); + + /* traverse the list of all atoms */ + list_for_each_entry(atom, &tmgr->atoms_list, atom_link) { + /* lock atom before checking its state */ + spin_lock_atom(atom); + + /* + * we need an atom which is not being committed and + * which has no flushers (jnode_flush() add one flusher + * at the beginning and subtract one at the end). + */ + if (atom->stage < ASTAGE_PRE_COMMIT && + atom->nr_flushers == 0) { + spin_lock_txnh(txnh); + capture_assign_txnh_nolock(atom, txnh); + spin_unlock_txnh(txnh); + + goto found; + } + + spin_unlock_atom(atom); + } + + /* + * Write throttling is case of no one atom can be + * flushed/committed. + */ + if (!current_is_flush_bd_task()) { + list_for_each_entry(atom, &tmgr->atoms_list, atom_link) { + spin_lock_atom(atom); + /* Repeat the check from the above. */ + if (atom->stage < ASTAGE_PRE_COMMIT + && atom->nr_flushers == 0) { + spin_lock_txnh(txnh); + capture_assign_txnh_nolock(atom, txnh); + spin_unlock_txnh(txnh); + + goto found; + } + if (atom->stage <= ASTAGE_POST_COMMIT) { + spin_unlock_txnmgr(tmgr); + /* + * we just wait until atom's flusher + * makes a progress in flushing or + * committing the atom + */ + reiser4_atom_wait_event(atom); + goto repeat; + } + spin_unlock_atom(atom); + } + } + spin_unlock_txnmgr(tmgr); + return 0; + found: + spin_unlock_txnmgr(tmgr); + } else + atom = get_current_atom_locked(); + + BUG_ON(atom->super != ctx->super); + assert("vs-35", atom->super == ctx->super); + if (start) { + spin_lock_jnode(start); + ret = (atom == start->atom) ? 1 : 0; + spin_unlock_jnode(start); + if (ret == 0) + start = NULL; + } + ret = flush_current_atom(flags, wbc->nr_to_write, nr_submitted, &atom, start); + if (ret == 0) { + /* flush_current_atom returns 0 only if it submitted for write + nothing */ + BUG_ON(*nr_submitted != 0); + if (*nr_submitted == 0 || atom_should_commit_asap(atom)) { + if (atom->capture_count < tmgr->atom_min_size && + !(atom->flags & ATOM_CANCEL_FUSION)) { + ret = txn_try_to_fuse_small_atom(tmgr, atom); + if (ret == -E_REPEAT) { + reiser4_preempt_point(); + goto repeat; + } + } + /* if early flushing could not make more nodes clean, + * or atom is too old/large, + * we force current atom to commit */ + /* wait for commit completion but only if this + * wouldn't stall pdflushd and ent thread. */ + if (!ctx->entd) + txnh->flags |= TXNH_WAIT_COMMIT; + atom->flags |= ATOM_FORCE_COMMIT; + } + spin_unlock_atom(atom); + } else if (ret == -E_REPEAT) { + if (*nr_submitted == 0) { + /* let others who hampers flushing (hold longterm locks, + for instance) to free the way for flush */ + reiser4_preempt_point(); + goto repeat; + } + ret = 0; + } +/* + if (*nr_submitted > wbc->nr_to_write) + warning("", "asked for %ld, written %ld\n", wbc->nr_to_write, *nr_submitted); +*/ + reiser4_txn_restart(ctx); + + return ret; +} + +/* Remove processed nodes from atom's clean list (thereby remove them from transaction). */ +void reiser4_invalidate_list(struct list_head *head) +{ + while (!list_empty(head)) { + jnode *node; + + node = list_entry(head->next, jnode, capture_link); + spin_lock_jnode(node); + reiser4_uncapture_block(node); + jput(node); + } +} + +static void init_wlinks(txn_wait_links * wlinks) +{ + wlinks->_lock_stack = get_current_lock_stack(); + INIT_LIST_HEAD(&wlinks->_fwaitfor_link); + INIT_LIST_HEAD(&wlinks->_fwaiting_link); + wlinks->waitfor_cb = NULL; + wlinks->waiting_cb = NULL; +} + +/* Add atom to the atom's waitfor list and wait for somebody to wake us up; */ +void reiser4_atom_wait_event(txn_atom * atom) +{ + txn_wait_links _wlinks; + + assert_spin_locked(&(atom->alock)); + assert("nikita-3156", + lock_stack_isclean(get_current_lock_stack()) || + atom->nr_running_queues > 0); + + init_wlinks(&_wlinks); + list_add_tail(&_wlinks._fwaitfor_link, &atom->fwaitfor_list); + atomic_inc(&atom->refcount); + spin_unlock_atom(atom); + + reiser4_prepare_to_sleep(_wlinks._lock_stack); + reiser4_go_to_sleep(_wlinks._lock_stack); + + spin_lock_atom(atom); + list_del(&_wlinks._fwaitfor_link); + atom_dec_and_unlock(atom); +} + +void reiser4_atom_set_stage(txn_atom * atom, txn_stage stage) +{ + assert("nikita-3535", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("nikita-3536", stage <= ASTAGE_INVALID); + /* Excelsior! */ + assert("nikita-3537", stage >= atom->stage); + if (atom->stage != stage) { + atom->stage = stage; + reiser4_atom_send_event(atom); + } +} + +/* wake all threads which wait for an event */ +void reiser4_atom_send_event(txn_atom * atom) +{ + assert_spin_locked(&(atom->alock)); + wakeup_atom_waitfor_list(atom); +} + +/* Informs txn manager code that owner of this txn_handle should wait atom commit completion (for + example, because it does fsync(2)) */ +static int should_wait_commit(txn_handle * h) +{ + return h->flags & TXNH_WAIT_COMMIT; +} + +typedef struct commit_data { + txn_atom *atom; + txn_handle *txnh; + long nr_written; + /* as an optimization we start committing atom by first trying to + * flush it few times without switching into ASTAGE_CAPTURE_WAIT. This + * allows to reduce stalls due to other threads waiting for atom in + * ASTAGE_CAPTURE_WAIT stage. ->preflush is counter of these + * preliminary flushes. */ + int preflush; + /* have we waited on atom. */ + int wait; + int failed; + int wake_ktxnmgrd_up; +} commit_data; + +/* + * Called from commit_txnh() repeatedly, until either error happens, or atom + * commits successfully. + */ +static int try_commit_txnh(commit_data * cd) +{ + int result; + + assert("nikita-2968", lock_stack_isclean(get_current_lock_stack())); + + /* Get the atom and txnh locked. */ + cd->atom = txnh_get_atom(cd->txnh); + assert("jmacd-309", cd->atom != NULL); + spin_unlock_txnh(cd->txnh); + + if (cd->wait) { + cd->atom->nr_waiters--; + cd->wait = 0; + } + + if (cd->atom->stage == ASTAGE_DONE) + return 0; + + if (cd->failed) + return 0; + + if (atom_should_commit(cd->atom)) { + /* if atom is _very_ large schedule it for commit as soon as + * possible. */ + if (atom_should_commit_asap(cd->atom)) { + /* + * When atom is in PRE_COMMIT or later stage following + * invariant (encoded in atom_can_be_committed()) + * holds: there is exactly one non-waiter transaction + * handle opened on this atom. When thread wants to + * wait until atom commits (for example sync()) it + * waits on atom event after increasing + * atom->nr_waiters (see blow in this function). It + * cannot be guaranteed that atom is already committed + * after receiving event, so loop has to be + * re-started. But if atom switched into PRE_COMMIT + * stage and became too large, we cannot change its + * state back to CAPTURE_WAIT (atom stage can only + * increase monotonically), hence this check. + */ + if (cd->atom->stage < ASTAGE_CAPTURE_WAIT) + reiser4_atom_set_stage(cd->atom, + ASTAGE_CAPTURE_WAIT); + cd->atom->flags |= ATOM_FORCE_COMMIT; + } + if (cd->txnh->flags & TXNH_DONT_COMMIT) { + /* + * this thread (transaction handle that is) doesn't + * want to commit atom. Notify waiters that handle is + * closed. This can happen, for example, when we are + * under VFS directory lock and don't want to commit + * atom right now to avoid stalling other threads + * working in the same directory. + */ + + /* Wake the ktxnmgrd up if the ktxnmgrd is needed to + * commit this atom: no atom waiters and only one + * (our) open transaction handle. */ + cd->wake_ktxnmgrd_up = + cd->atom->txnh_count == 1 && + cd->atom->nr_waiters == 0; + reiser4_atom_send_event(cd->atom); + result = 0; + } else if (!atom_can_be_committed(cd->atom)) { + if (should_wait_commit(cd->txnh)) { + /* sync(): wait for commit */ + cd->atom->nr_waiters++; + cd->wait = 1; + reiser4_atom_wait_event(cd->atom); + result = RETERR(-E_REPEAT); + } else { + result = 0; + } + } else if (cd->preflush > 0 && !is_current_ktxnmgrd()) { + /* + * optimization: flush atom without switching it into + * ASTAGE_CAPTURE_WAIT. + * + * But don't do this for ktxnmgrd, because ktxnmgrd + * should never block on atom fusion. + */ + result = flush_current_atom(JNODE_FLUSH_WRITE_BLOCKS, + LONG_MAX, &cd->nr_written, + &cd->atom, NULL); + if (result == 0) { + spin_unlock_atom(cd->atom); + cd->preflush = 0; + result = RETERR(-E_REPEAT); + } else /* Atoms wasn't flushed + * completely. Rinse. Repeat. */ + --cd->preflush; + } else { + /* We change atom state to ASTAGE_CAPTURE_WAIT to + prevent atom fusion and count ourself as an active + flusher */ + reiser4_atom_set_stage(cd->atom, ASTAGE_CAPTURE_WAIT); + cd->atom->flags |= ATOM_FORCE_COMMIT; + + result = + commit_current_atom(&cd->nr_written, &cd->atom); + if (result != 0 && result != -E_REPEAT) + cd->failed = 1; + } + } else + result = 0; + +#if REISER4_DEBUG + if (result == 0) + assert_spin_locked(&(cd->atom->alock)); +#endif + + /* perfectly valid assertion, except that when atom/txnh is not locked + * fusion can take place, and cd->atom points nowhere. */ + /* + assert("jmacd-1028", ergo(result != 0, spin_atom_is_not_locked(cd->atom))); + */ + return result; +} + +/* Called to commit a transaction handle. This decrements the atom's number of open + handles and if it is the last handle to commit and the atom should commit, initiates + atom commit. if commit does not fail, return number of written blocks */ +static int commit_txnh(txn_handle * txnh) +{ + commit_data cd; + assert("umka-192", txnh != NULL); + + memset(&cd, 0, sizeof cd); + cd.txnh = txnh; + cd.preflush = 10; + + /* calls try_commit_txnh() until either atom commits, or error + * happens */ + while (try_commit_txnh(&cd) != 0) + reiser4_preempt_point(); + + spin_lock_txnh(txnh); + + cd.atom->txnh_count -= 1; + txnh->atom = NULL; + /* remove transaction handle from atom's list of transaction handles */ + list_del_init(&txnh->txnh_link); + + spin_unlock_txnh(txnh); + atom_dec_and_unlock(cd.atom); + /* if we don't want to do a commit (TXNH_DONT_COMMIT is set, probably + * because it takes time) by current thread, we do that work + * asynchronously by ktxnmgrd daemon. */ + if (cd.wake_ktxnmgrd_up) + ktxnmgrd_kick(&get_current_super_private()->tmgr); + + return 0; +} + +/* TRY_CAPTURE */ + +/* This routine attempts a single block-capture request. It may return -E_REPEAT if some + condition indicates that the request should be retried, and it may block if the + txn_capture mode does not include the TXN_CAPTURE_NONBLOCKING request flag. + + This routine encodes the basic logic of block capturing described by: + + http://namesys.com/v4/v4.html + + Our goal here is to ensure that any two blocks that contain dependent modifications + should commit at the same time. This function enforces this discipline by initiating + fusion whenever a transaction handle belonging to one atom requests to read or write a + block belonging to another atom (TXN_CAPTURE_WRITE or TXN_CAPTURE_READ_ATOMIC). + + In addition, this routine handles the initial assignment of atoms to blocks and + transaction handles. These are possible outcomes of this function: + + 1. The block and handle are already part of the same atom: return immediate success + + 2. The block is assigned but the handle is not: call capture_assign_txnh to assign + the handle to the block's atom. + + 3. The handle is assigned but the block is not: call capture_assign_block to assign + the block to the handle's atom. + + 4. Both handle and block are assigned, but to different atoms: call capture_init_fusion + to fuse atoms. + + 5. Neither block nor handle are assigned: create a new atom and assign them both. + + 6. A read request for a non-captured block: return immediate success. + + This function acquires and releases the handle's spinlock. This function is called + under the jnode lock and if the return value is 0, it returns with the jnode lock still + held. If the return is -E_REPEAT or some other error condition, the jnode lock is + released. The external interface (reiser4_try_capture) manages re-aquiring the jnode + lock in the failure case. +*/ +static int try_capture_block( + txn_handle * txnh, jnode * node, txn_capture mode, + txn_atom ** atom_alloc) +{ + txn_atom *block_atom; + txn_atom *txnh_atom; + + /* Should not call capture for READ_NONCOM requests, handled in reiser4_try_capture. */ + assert("jmacd-567", CAPTURE_TYPE(mode) != TXN_CAPTURE_READ_NONCOM); + + /* FIXME-ZAM-HANS: FIXME_LATER_JMACD Should assert that atom->tree == + * node->tree somewhere. */ + assert("umka-194", txnh != NULL); + assert("umka-195", node != NULL); + + /* The jnode is already locked! Being called from reiser4_try_capture(). */ + assert_spin_locked(&(node->guard)); + block_atom = node->atom; + + /* Get txnh spinlock, this allows us to compare txn_atom pointers but it doesn't + let us touch the atoms themselves. */ + spin_lock_txnh(txnh); + txnh_atom = txnh->atom; + /* Process of capturing continues into one of four branches depends on + which atoms from (block atom (node->atom), current atom (txnh->atom)) + exist. */ + if (txnh_atom == NULL) { + if (block_atom == NULL) { + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + /* assign empty atom to the txnh and repeat */ + return atom_begin_and_assign_to_txnh(atom_alloc, txnh); + } else { + atomic_inc(&block_atom->refcount); + /* node spin-lock isn't needed anymore */ + spin_unlock_jnode(node); + if (!spin_trylock_atom(block_atom)) { + spin_unlock_txnh(txnh); + spin_lock_atom(block_atom); + spin_lock_txnh(txnh); + } + /* re-check state after getting txnh and the node + * atom spin-locked */ + if (node->atom != block_atom || txnh->atom != NULL) { + spin_unlock_txnh(txnh); + atom_dec_and_unlock(block_atom); + return RETERR(-E_REPEAT); + } + atomic_dec(&block_atom->refcount); + if (block_atom->stage > ASTAGE_CAPTURE_WAIT || + (block_atom->stage == ASTAGE_CAPTURE_WAIT && + block_atom->txnh_count != 0)) + return capture_fuse_wait(txnh, block_atom, NULL, mode); + capture_assign_txnh_nolock(block_atom, txnh); + spin_unlock_txnh(txnh); + spin_unlock_atom(block_atom); + return RETERR(-E_REPEAT); + } + } else { + /* It is time to perform deadlock prevention check over the + node we want to capture. It is possible this node was locked + for read without capturing it. The optimization which allows + to do it helps us in keeping atoms independent as long as + possible but it may cause lock/fuse deadlock problems. + + A number of similar deadlock situations with locked but not + captured nodes were found. In each situation there are two + or more threads: one of them does flushing while another one + does routine balancing or tree lookup. The flushing thread + (F) sleeps in long term locking request for node (N), another + thread (A) sleeps in trying to capture some node already + belonging the atom F, F has a state which prevents + immediately fusion . + + Deadlocks of this kind cannot happen if node N was properly + captured by thread A. The F thread fuse atoms before locking + therefore current atom of thread F and current atom of thread + A became the same atom and thread A may proceed. This does + not work if node N was not captured because the fusion of + atom does not happens. + + The following scheme solves the deadlock: If + longterm_lock_znode locks and does not capture a znode, that + znode is marked as MISSED_IN_CAPTURE. A node marked this way + is processed by the code below which restores the missed + capture and fuses current atoms of all the node lock owners + by calling the fuse_not_fused_lock_owners() function. */ + if (JF_ISSET(node, JNODE_MISSED_IN_CAPTURE)) { + JF_CLR(node, JNODE_MISSED_IN_CAPTURE); + if (jnode_is_znode(node) && znode_is_locked(JZNODE(node))) { + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + fuse_not_fused_lock_owners(txnh, JZNODE(node)); + return RETERR(-E_REPEAT); + } + } + if (block_atom == NULL) { + atomic_inc(&txnh_atom->refcount); + spin_unlock_txnh(txnh); + if (!spin_trylock_atom(txnh_atom)) { + spin_unlock_jnode(node); + spin_lock_atom(txnh_atom); + spin_lock_jnode(node); + } + if (txnh->atom != txnh_atom || node->atom != NULL + || JF_ISSET(node, JNODE_IS_DYING)) { + spin_unlock_jnode(node); + atom_dec_and_unlock(txnh_atom); + return RETERR(-E_REPEAT); + } + atomic_dec(&txnh_atom->refcount); + capture_assign_block_nolock(txnh_atom, node); + spin_unlock_atom(txnh_atom); + } else { + if (txnh_atom != block_atom) { + if (mode & TXN_CAPTURE_DONT_FUSE) { + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + /* we are in a "no-fusion" mode and @node is + * already part of transaction. */ + return RETERR(-E_NO_NEIGHBOR); + } + return capture_init_fusion(node, txnh, mode); + } + spin_unlock_txnh(txnh); + } + } + return 0; +} + +static txn_capture +build_capture_mode(jnode * node, znode_lock_mode lock_mode, txn_capture flags) +{ + txn_capture cap_mode; + + assert_spin_locked(&(node->guard)); + + /* FIXME_JMACD No way to set TXN_CAPTURE_READ_MODIFY yet. */ + + if (lock_mode == ZNODE_WRITE_LOCK) { + cap_mode = TXN_CAPTURE_WRITE; + } else if (node->atom != NULL) { + cap_mode = TXN_CAPTURE_WRITE; + } else if (0 && /* txnh->mode == TXN_READ_FUSING && */ + jnode_get_level(node) == LEAF_LEVEL) { + /* NOTE-NIKITA TXN_READ_FUSING is not currently used */ + /* We only need a READ_FUSING capture at the leaf level. This + is because the internal levels of the tree (twigs included) + are redundant from the point of the user that asked for a + read-fusing transcrash. The user only wants to read-fuse + atoms due to reading uncommitted data that another user has + written. It is the file system that reads/writes the + internal tree levels, the user only reads/writes leaves. */ + cap_mode = TXN_CAPTURE_READ_ATOMIC; + } else { + /* In this case (read lock at a non-leaf) there's no reason to + * capture. */ + /* cap_mode = TXN_CAPTURE_READ_NONCOM; */ + return 0; + } + + cap_mode |= (flags & (TXN_CAPTURE_NONBLOCKING | TXN_CAPTURE_DONT_FUSE)); + assert("nikita-3186", cap_mode != 0); + return cap_mode; +} + +/* This is an external interface to try_capture_block(), it calls + try_capture_block() repeatedly as long as -E_REPEAT is returned. + + @node: node to capture, + @lock_mode: read or write lock is used in capture mode calculation, + @flags: see txn_capture flags enumeration, + @can_coc : can copy-on-capture + + @return: 0 - node was successfully captured, -E_REPEAT - capture request + cannot be processed immediately as it was requested in flags, + < 0 - other errors. +*/ +int reiser4_try_capture(jnode *node, znode_lock_mode lock_mode, + txn_capture flags) +{ + txn_atom *atom_alloc = NULL; + txn_capture cap_mode; + txn_handle *txnh = get_current_context()->trans; + int ret; + + assert_spin_locked(&(node->guard)); + + repeat: + if (JF_ISSET(node, JNODE_IS_DYING)) + return RETERR(-EINVAL); + if (node->atom != NULL && txnh->atom == node->atom) + return 0; + cap_mode = build_capture_mode(node, lock_mode, flags); + if (cap_mode == 0 || + (!(cap_mode & TXN_CAPTURE_WTYPES) && node->atom == NULL)) { + /* Mark this node as "MISSED". It helps in further deadlock + * analysis */ + if (jnode_is_znode(node)) + JF_SET(node, JNODE_MISSED_IN_CAPTURE); + return 0; + } + /* Repeat try_capture as long as -E_REPEAT is returned. */ + ret = try_capture_block(txnh, node, cap_mode, &atom_alloc); + /* Regardless of non_blocking: + + If ret == 0 then jnode is still locked. + If ret != 0 then jnode is unlocked. + */ +#if REISER4_DEBUG + if (ret == 0) + assert_spin_locked(&(node->guard)); + else + assert_spin_not_locked(&(node->guard)); +#endif + assert_spin_not_locked(&(txnh->guard)); + + if (ret == -E_REPEAT) { + /* E_REPEAT implies all locks were released, therefore we need + to take the jnode's lock again. */ + spin_lock_jnode(node); + + /* Although this may appear to be a busy loop, it is not. + There are several conditions that cause E_REPEAT to be + returned by the call to try_capture_block, all cases + indicating some kind of state change that means you should + retry the request and will get a different result. In some + cases this could be avoided with some extra code, but + generally it is done because the necessary locks were + released as a result of the operation and repeating is the + simplest thing to do (less bug potential). The cases are: + atom fusion returns E_REPEAT after it completes (jnode and + txnh were unlocked); race conditions in assign_block, + assign_txnh, and init_fusion return E_REPEAT (trylock + failure); after going to sleep in capture_fuse_wait + (request was blocked but may now succeed). I'm not quite + sure how capture_copy works yet, but it may also return + E_REPEAT. When the request is legitimately blocked, the + requestor goes to sleep in fuse_wait, so this is not a busy + loop. */ + /* NOTE-NIKITA: still don't understand: + + try_capture_block->capture_assign_txnh->spin_trylock_atom->E_REPEAT + + looks like busy loop? + */ + goto repeat; + } + + /* free extra atom object that was possibly allocated by + try_capture_block(). + + Do this before acquiring jnode spin lock to + minimize time spent under lock. --nikita */ + if (atom_alloc != NULL) { + kmem_cache_free(_atom_slab, atom_alloc); + } + + if (ret != 0) { + if (ret == -E_BLOCK) { + assert("nikita-3360", + cap_mode & TXN_CAPTURE_NONBLOCKING); + ret = -E_REPEAT; + } + + /* Failure means jnode is not locked. FIXME_LATER_JMACD May + want to fix the above code to avoid releasing the lock and + re-acquiring it, but there are cases were failure occurs + when the lock is not held, and those cases would need to be + modified to re-take the lock. */ + spin_lock_jnode(node); + } + + /* Jnode is still locked. */ + assert_spin_locked(&(node->guard)); + return ret; +} + +static void release_two_atoms(txn_atom *one, txn_atom *two) +{ + spin_unlock_atom(one); + atom_dec_and_unlock(two); + spin_lock_atom(one); + atom_dec_and_unlock(one); +} + +/* This function sets up a call to try_capture_block and repeats as long as -E_REPEAT is + returned by that routine. The txn_capture request mode is computed here depending on + the transaction handle's type and the lock request. This is called from the depths of + the lock manager with the jnode lock held and it always returns with the jnode lock + held. +*/ + +/* fuse all 'active' atoms of lock owners of given node. */ +static void fuse_not_fused_lock_owners(txn_handle * txnh, znode * node) +{ + lock_handle *lh; + int repeat; + txn_atom *atomh, *atomf; + reiser4_context *me = get_current_context(); + reiser4_context *ctx = NULL; + + assert_spin_not_locked(&(ZJNODE(node)->guard)); + assert_spin_not_locked(&(txnh->hlock)); + + repeat: + repeat = 0; + atomh = txnh_get_atom(txnh); + spin_unlock_txnh(txnh); + assert("zam-692", atomh != NULL); + + spin_lock_zlock(&node->lock); + /* inspect list of lock owners */ + list_for_each_entry(lh, &node->lock.owners, owners_link) { + ctx = get_context_by_lock_stack(lh->owner); + if (ctx == me) + continue; + /* below we use two assumptions to avoid addition spin-locks + for checking the condition : + + 1) if the lock stack has lock, the transaction should be + opened, i.e. ctx->trans != NULL; + + 2) reading of well-aligned ctx->trans->atom is atomic, if it + equals to the address of spin-locked atomh, we take that + the atoms are the same, nothing has to be captured. */ + if (atomh != ctx->trans->atom) { + reiser4_wake_up(lh->owner); + repeat = 1; + break; + } + } + if (repeat) { + if (!spin_trylock_txnh(ctx->trans)) { + spin_unlock_zlock(&node->lock); + spin_unlock_atom(atomh); + goto repeat; + } + atomf = ctx->trans->atom; + if (atomf == NULL) { + capture_assign_txnh_nolock(atomh, ctx->trans); + /* release zlock lock _after_ assigning the atom to the + * transaction handle, otherwise the lock owner thread + * may unlock all znodes, exit kernel context and here + * we would access an invalid transaction handle. */ + spin_unlock_zlock(&node->lock); + spin_unlock_atom(atomh); + spin_unlock_txnh(ctx->trans); + goto repeat; + } + assert("zam-1059", atomf != atomh); + spin_unlock_zlock(&node->lock); + atomic_inc(&atomh->refcount); + atomic_inc(&atomf->refcount); + spin_unlock_txnh(ctx->trans); + if (atomf > atomh) { + spin_lock_atom_nested(atomf); + } else { + spin_unlock_atom(atomh); + spin_lock_atom(atomf); + spin_lock_atom_nested(atomh); + } + if (atomh == atomf || !atom_isopen(atomh) || !atom_isopen(atomf)) { + release_two_atoms(atomf, atomh); + goto repeat; + } + atomic_dec(&atomh->refcount); + atomic_dec(&atomf->refcount); + capture_fuse_into(atomf, atomh); + goto repeat; + } + spin_unlock_zlock(&node->lock); + spin_unlock_atom(atomh); +} + +/* This is the interface to capture unformatted nodes via their struct page + reference. Currently it is only used in reiser4_invalidatepage */ +int try_capture_page_to_invalidate(struct page *pg) +{ + int ret; + jnode *node; + + assert("umka-292", pg != NULL); + assert("nikita-2597", PageLocked(pg)); + + if (IS_ERR(node = jnode_of_page(pg))) { + return PTR_ERR(node); + } + + spin_lock_jnode(node); + unlock_page(pg); + + ret = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + spin_unlock_jnode(node); + jput(node); + lock_page(pg); + return ret; +} + +/* This informs the transaction manager when a node is deleted. Add the block to the + atom's delete set and uncapture the block. + +VS-FIXME-HANS: this E_REPEAT paradigm clutters the code and creates a need for +explanations. find all the functions that use it, and unless there is some very +good reason to use it (I have not noticed one so far and I doubt it exists, but maybe somewhere somehow....), +move the loop to inside the function. + +VS-FIXME-HANS: can this code be at all streamlined? In particular, can you lock and unlock the jnode fewer times? + */ +void reiser4_uncapture_page(struct page *pg) +{ + jnode *node; + txn_atom *atom; + + assert("umka-199", pg != NULL); + assert("nikita-3155", PageLocked(pg)); + + clear_page_dirty_for_io(pg); + + reiser4_wait_page_writeback(pg); + + node = jprivate(pg); + BUG_ON(node == NULL); + + spin_lock_jnode(node); + + atom = jnode_get_atom(node); + if (atom == NULL) { + assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY)); + spin_unlock_jnode(node); + return; + } + + /* We can remove jnode from transaction even if it is on flush queue + * prepped list, we only need to be sure that flush queue is not being + * written by reiser4_write_fq(). reiser4_write_fq() does not use atom + * spin lock for protection of the prepped nodes list, instead + * write_fq() increments atom's nr_running_queues counters for the time + * when prepped list is not protected by spin lock. Here we check this + * counter if we want to remove jnode from flush queue and, if the + * counter is not zero, wait all reiser4_write_fq() for this atom to + * complete. This is not significant overhead. */ + while (JF_ISSET(node, JNODE_FLUSH_QUEUED) && atom->nr_running_queues) { + spin_unlock_jnode(node); + /* + * at this moment we want to wait for "atom event", viz. wait + * until @node can be removed from flush queue. But + * reiser4_atom_wait_event() cannot be called with page locked, + * because it deadlocks with jnode_extent_write(). Unlock page, + * after making sure (through get_page()) that it cannot + * be released from memory. + */ + get_page(pg); + unlock_page(pg); + reiser4_atom_wait_event(atom); + lock_page(pg); + /* + * page may has been detached by ->writepage()->releasepage(). + */ + reiser4_wait_page_writeback(pg); + spin_lock_jnode(node); + put_page(pg); + atom = jnode_get_atom(node); +/* VS-FIXME-HANS: improve the commenting in this function */ + if (atom == NULL) { + spin_unlock_jnode(node); + return; + } + } + reiser4_uncapture_block(node); + spin_unlock_atom(atom); + jput(node); +} + +/* this is used in extent's kill hook to uncapture and unhash jnodes attached to + * inode's tree of jnodes */ +void reiser4_uncapture_jnode(jnode * node) +{ + txn_atom *atom; + + assert_spin_locked(&(node->guard)); + assert("", node->pg == 0); + + atom = jnode_get_atom(node); + if (atom == NULL) { + assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY)); + spin_unlock_jnode(node); + return; + } + + reiser4_uncapture_block(node); + spin_unlock_atom(atom); + jput(node); +} + +/* No-locking version of assign_txnh. Sets the transaction handle's atom pointer, + increases atom refcount and txnh_count, adds to txnh_list. */ +static void capture_assign_txnh_nolock(txn_atom *atom, txn_handle *txnh) +{ + assert("umka-200", atom != NULL); + assert("umka-201", txnh != NULL); + + assert_spin_locked(&(txnh->hlock)); + assert_spin_locked(&(atom->alock)); + assert("jmacd-824", txnh->atom == NULL); + assert("nikita-3540", atom_isopen(atom)); + BUG_ON(txnh->atom != NULL); + + atomic_inc(&atom->refcount); + txnh->atom = atom; + reiser4_ctx_gfp_mask_set(); + list_add_tail(&txnh->txnh_link, &atom->txnh_list); + atom->txnh_count += 1; +} + +/* No-locking version of assign_block. Sets the block's atom pointer, references the + block, adds it to the clean or dirty capture_jnode list, increments capture_count. */ +static void capture_assign_block_nolock(txn_atom *atom, jnode *node) +{ + assert("umka-202", atom != NULL); + assert("umka-203", node != NULL); + assert_spin_locked(&(node->guard)); + assert_spin_locked(&(atom->alock)); + assert("jmacd-323", node->atom == NULL); + BUG_ON(!list_empty_careful(&node->capture_link)); + assert("nikita-3470", !JF_ISSET(node, JNODE_DIRTY)); + + /* Pointer from jnode to atom is not counted in atom->refcount. */ + node->atom = atom; + + list_add_tail(&node->capture_link, ATOM_CLEAN_LIST(atom)); + atom->capture_count += 1; + /* reference to jnode is acquired by atom. */ + jref(node); + + ON_DEBUG(count_jnode(atom, node, NOT_CAPTURED, CLEAN_LIST, 1)); + + LOCK_CNT_INC(t_refs); +} + +/* common code for dirtying both unformatted jnodes and formatted znodes. */ +static void do_jnode_make_dirty(jnode * node, txn_atom * atom) +{ + assert_spin_locked(&(node->guard)); + assert_spin_locked(&(atom->alock)); + assert("jmacd-3981", !JF_ISSET(node, JNODE_DIRTY)); + + JF_SET(node, JNODE_DIRTY); + + if (!JF_ISSET(node, JNODE_CLUSTER_PAGE)) + get_current_context()->nr_marked_dirty++; + + /* We grab2flush_reserve one additional block only if node was + not CREATED and jnode_flush did not sort it into neither + relocate set nor overwrite one. If node is in overwrite or + relocate set we assume that atom's flush reserved counter was + already adjusted. */ + if (!JF_ISSET(node, JNODE_CREATED) && !JF_ISSET(node, JNODE_RELOC) + && !JF_ISSET(node, JNODE_OVRWR) && jnode_is_leaf(node) + && !jnode_is_cluster_page(node)) { + assert("vs-1093", !reiser4_blocknr_is_fake(&node->blocknr)); + assert("vs-1506", *jnode_get_block(node) != 0); + grabbed2flush_reserved_nolock(atom, (__u64) 1); + JF_SET(node, JNODE_FLUSH_RESERVED); + } + + if (!JF_ISSET(node, JNODE_FLUSH_QUEUED)) { + /* If the atom is not set yet, it will be added to the appropriate list in + capture_assign_block_nolock. */ + /* Sometimes a node is set dirty before being captured -- the case for new + jnodes. In that case the jnode will be added to the appropriate list + in capture_assign_block_nolock. Another reason not to re-link jnode is + that jnode is on a flush queue (see flush.c for details) */ + + int level = jnode_get_level(node); + + assert("nikita-3152", !JF_ISSET(node, JNODE_OVRWR)); + assert("zam-654", atom->stage < ASTAGE_PRE_COMMIT); + assert("nikita-2607", 0 <= level); + assert("nikita-2606", level <= REAL_MAX_ZTREE_HEIGHT); + + /* move node to atom's dirty list */ + list_move_tail(&node->capture_link, ATOM_DIRTY_LIST(atom, level)); + ON_DEBUG(count_jnode + (atom, node, NODE_LIST(node), DIRTY_LIST, 1)); + } +} + +/* Set the dirty status for this (spin locked) jnode. */ +void jnode_make_dirty_locked(jnode * node) +{ + assert("umka-204", node != NULL); + assert_spin_locked(&(node->guard)); + + if (REISER4_DEBUG && rofs_jnode(node)) { + warning("nikita-3365", "Dirtying jnode on rofs"); + dump_stack(); + } + + /* Fast check for already dirty node */ + if (!JF_ISSET(node, JNODE_DIRTY)) { + txn_atom *atom; + + atom = jnode_get_atom(node); + assert("vs-1094", atom); + /* Check jnode dirty status again because node spin lock might + * be released inside jnode_get_atom(). */ + if (likely(!JF_ISSET(node, JNODE_DIRTY))) + do_jnode_make_dirty(node, atom); + spin_unlock_atom(atom); + } +} + +/* Set the dirty status for this znode. */ +void znode_make_dirty(znode * z) +{ + jnode *node; + struct page *page; + + assert("umka-204", z != NULL); + assert("nikita-3290", znode_above_root(z) || znode_is_loaded(z)); + assert("nikita-3560", znode_is_write_locked(z)); + + node = ZJNODE(z); + /* znode is longterm locked, we can check dirty bit without spinlock */ + if (JF_ISSET(node, JNODE_DIRTY)) { + /* znode is dirty already. All we have to do is to change znode version */ + z->version = znode_build_version(jnode_get_tree(node)); + return; + } + + spin_lock_jnode(node); + jnode_make_dirty_locked(node); + page = jnode_page(node); + if (page != NULL) { + /* this is useful assertion (allows one to check that no + * modifications are lost due to update of in-flight page), + * but it requires locking on page to check PG_writeback + * bit. */ + /* assert("nikita-3292", + !PageWriteback(page) || ZF_ISSET(z, JNODE_WRITEBACK)); */ + get_page(page); + + /* jnode lock is not needed for the rest of + * znode_set_dirty(). */ + spin_unlock_jnode(node); + /* reiser4 file write code calls set_page_dirty for + * unformatted nodes, for formatted nodes we do it here. */ + set_page_dirty_notag(page); + put_page(page); + /* bump version counter in znode */ + z->version = znode_build_version(jnode_get_tree(node)); + } else { + assert("zam-596", znode_above_root(JZNODE(node))); + spin_unlock_jnode(node); + } + + assert("nikita-1900", znode_is_write_locked(z)); + assert("jmacd-9777", node->atom != NULL); +} + +int reiser4_sync_atom(txn_atom * atom) +{ + int result; + txn_handle *txnh; + + txnh = get_current_context()->trans; + + result = 0; + if (atom != NULL) { + if (atom->stage < ASTAGE_PRE_COMMIT) { + spin_lock_txnh(txnh); + capture_assign_txnh_nolock(atom, txnh); + result = force_commit_atom(txnh); + } else if (atom->stage < ASTAGE_POST_COMMIT) { + /* wait atom commit */ + reiser4_atom_wait_event(atom); + /* try once more */ + result = RETERR(-E_REPEAT); + } else + spin_unlock_atom(atom); + } + return result; +} + +#if REISER4_DEBUG + +/* move jnode form one list to another + call this after atom->capture_count is updated */ +void +count_jnode(txn_atom * atom, jnode * node, atom_list old_list, + atom_list new_list, int check_lists) +{ + struct list_head *pos; + + assert("zam-1018", atom_is_protected(atom)); + assert_spin_locked(&(node->guard)); + assert("", NODE_LIST(node) == old_list); + + switch (NODE_LIST(node)) { + case NOT_CAPTURED: + break; + case DIRTY_LIST: + assert("", atom->dirty > 0); + atom->dirty--; + break; + case CLEAN_LIST: + assert("", atom->clean > 0); + atom->clean--; + break; + case FQ_LIST: + assert("", atom->fq > 0); + atom->fq--; + break; + case WB_LIST: + assert("", atom->wb > 0); + atom->wb--; + break; + case OVRWR_LIST: + assert("", atom->ovrwr > 0); + atom->ovrwr--; + break; + default: + impossible("", ""); + } + + switch (new_list) { + case NOT_CAPTURED: + break; + case DIRTY_LIST: + atom->dirty++; + break; + case CLEAN_LIST: + atom->clean++; + break; + case FQ_LIST: + atom->fq++; + break; + case WB_LIST: + atom->wb++; + break; + case OVRWR_LIST: + atom->ovrwr++; + break; + default: + impossible("", ""); + } + ASSIGN_NODE_LIST(node, new_list); + if (0 && check_lists) { + int count; + tree_level level; + + count = 0; + + /* flush queue list */ + /* reiser4_check_fq(atom); */ + + /* dirty list */ + count = 0; + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + list_for_each(pos, ATOM_DIRTY_LIST(atom, level)) + count++; + } + if (count != atom->dirty) + warning("", "dirty counter %d, real %d\n", atom->dirty, + count); + + /* clean list */ + count = 0; + list_for_each(pos, ATOM_CLEAN_LIST(atom)) + count++; + if (count != atom->clean) + warning("", "clean counter %d, real %d\n", atom->clean, + count); + + /* wb list */ + count = 0; + list_for_each(pos, ATOM_WB_LIST(atom)) + count++; + if (count != atom->wb) + warning("", "wb counter %d, real %d\n", atom->wb, + count); + + /* overwrite list */ + count = 0; + list_for_each(pos, ATOM_OVRWR_LIST(atom)) + count++; + + if (count != atom->ovrwr) + warning("", "ovrwr counter %d, real %d\n", atom->ovrwr, + count); + } + assert("vs-1624", atom->num_queued == atom->fq); + if (atom->capture_count != + atom->dirty + atom->clean + atom->ovrwr + atom->wb + atom->fq) { + printk + ("count %d, dirty %d clean %d ovrwr %d wb %d fq %d\n", + atom->capture_count, atom->dirty, atom->clean, atom->ovrwr, + atom->wb, atom->fq); + assert("vs-1622", + atom->capture_count == + atom->dirty + atom->clean + atom->ovrwr + atom->wb + + atom->fq); + } +} + +#endif + +int reiser4_capture_super_block(struct super_block *s) +{ + int result; + znode *uber; + lock_handle lh; + + init_lh(&lh); + result = get_uber_znode(reiser4_get_tree(s), + ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI, &lh); + if (result) + return result; + + uber = lh.node; + /* Grabbing one block for superblock */ + result = reiser4_grab_space_force((__u64) 1, BA_RESERVED); + if (result != 0) + return result; + + znode_make_dirty(uber); + + done_lh(&lh); + return 0; +} + +/* Wakeup every handle on the atom's WAITFOR list */ +static void wakeup_atom_waitfor_list(txn_atom * atom) +{ + txn_wait_links *wlinks; + + assert("umka-210", atom != NULL); + + /* atom is locked */ + list_for_each_entry(wlinks, &atom->fwaitfor_list, _fwaitfor_link) { + if (wlinks->waitfor_cb == NULL || + wlinks->waitfor_cb(atom, wlinks)) + /* Wake up. */ + reiser4_wake_up(wlinks->_lock_stack); + } +} + +/* Wakeup every handle on the atom's WAITING list */ +static void wakeup_atom_waiting_list(txn_atom * atom) +{ + txn_wait_links *wlinks; + + assert("umka-211", atom != NULL); + + /* atom is locked */ + list_for_each_entry(wlinks, &atom->fwaiting_list, _fwaiting_link) { + if (wlinks->waiting_cb == NULL || + wlinks->waiting_cb(atom, wlinks)) + /* Wake up. */ + reiser4_wake_up(wlinks->_lock_stack); + } +} + +/* helper function used by capture_fuse_wait() to avoid "spurious wake-ups" */ +static int wait_for_fusion(txn_atom * atom, txn_wait_links * wlinks) +{ + assert("nikita-3330", atom != NULL); + assert_spin_locked(&(atom->alock)); + + /* atom->txnh_count == 1 is for waking waiters up if we are releasing + * last transaction handle. */ + return atom->stage != ASTAGE_CAPTURE_WAIT || atom->txnh_count == 1; +} + +/* The general purpose of this function is to wait on the first of two possible events. + The situation is that a handle (and its atom atomh) is blocked trying to capture a + block (i.e., node) but the node's atom (atomf) is in the CAPTURE_WAIT state. The + handle's atom (atomh) is not in the CAPTURE_WAIT state. However, atomh could fuse with + another atom or, due to age, enter the CAPTURE_WAIT state itself, at which point it + needs to unblock the handle to avoid deadlock. When the txnh is unblocked it will + proceed and fuse the two atoms in the CAPTURE_WAIT state. + + In other words, if either atomh or atomf change state, the handle will be awakened, + thus there are two lists per atom: WAITING and WAITFOR. + + This is also called by capture_assign_txnh with (atomh == NULL) to wait for atomf to + close but it is not assigned to an atom of its own. + + Lock ordering in this method: all four locks are held: JNODE_LOCK, TXNH_LOCK, + BOTH_ATOM_LOCKS. Result: all four locks are released. +*/ +static int capture_fuse_wait(txn_handle * txnh, txn_atom * atomf, + txn_atom * atomh, txn_capture mode) +{ + int ret; + txn_wait_links wlinks; + + assert("umka-213", txnh != NULL); + assert("umka-214", atomf != NULL); + + if ((mode & TXN_CAPTURE_NONBLOCKING) != 0) { + spin_unlock_txnh(txnh); + spin_unlock_atom(atomf); + + if (atomh) { + spin_unlock_atom(atomh); + } + + return RETERR(-E_BLOCK); + } + + /* Initialize the waiting list links. */ + init_wlinks(&wlinks); + + /* Add txnh to atomf's waitfor list, unlock atomf. */ + list_add_tail(&wlinks._fwaitfor_link, &atomf->fwaitfor_list); + wlinks.waitfor_cb = wait_for_fusion; + atomic_inc(&atomf->refcount); + spin_unlock_atom(atomf); + + if (atomh) { + /* Add txnh to atomh's waiting list, unlock atomh. */ + list_add_tail(&wlinks._fwaiting_link, &atomh->fwaiting_list); + atomic_inc(&atomh->refcount); + spin_unlock_atom(atomh); + } + + /* Go to sleep. */ + spin_unlock_txnh(txnh); + + ret = reiser4_prepare_to_sleep(wlinks._lock_stack); + if (ret == 0) { + reiser4_go_to_sleep(wlinks._lock_stack); + ret = RETERR(-E_REPEAT); + } + + /* Remove from the waitfor list. */ + spin_lock_atom(atomf); + + list_del(&wlinks._fwaitfor_link); + atom_dec_and_unlock(atomf); + + if (atomh) { + /* Remove from the waiting list. */ + spin_lock_atom(atomh); + list_del(&wlinks._fwaiting_link); + atom_dec_and_unlock(atomh); + } + return ret; +} + +static void lock_two_atoms(txn_atom * one, txn_atom * two) +{ + assert("zam-1067", one != two); + + /* lock the atom with lesser address first */ + if (one < two) { + spin_lock_atom(one); + spin_lock_atom_nested(two); + } else { + spin_lock_atom(two); + spin_lock_atom_nested(one); + } +} + +/* Perform the necessary work to prepare for fusing two atoms, which involves + * acquiring two atom locks in the proper order. If one of the node's atom is + * blocking fusion (i.e., it is in the CAPTURE_WAIT stage) and the handle's + * atom is not then the handle's request is put to sleep. If the node's atom + * is committing, then the node can be copy-on-captured. Otherwise, pick the + * atom with fewer pointers to be fused into the atom with more pointer and + * call capture_fuse_into. + */ +static int capture_init_fusion(jnode *node, txn_handle *txnh, txn_capture mode) +{ + txn_atom * txnh_atom = txnh->atom; + txn_atom * block_atom = node->atom; + + atomic_inc(&txnh_atom->refcount); + atomic_inc(&block_atom->refcount); + + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + + lock_two_atoms(txnh_atom, block_atom); + + if (txnh->atom != txnh_atom || node->atom != block_atom ) { + release_two_atoms(txnh_atom, block_atom); + return RETERR(-E_REPEAT); + } + + atomic_dec(&txnh_atom->refcount); + atomic_dec(&block_atom->refcount); + + assert ("zam-1066", atom_isopen(txnh_atom)); + + if (txnh_atom->stage >= block_atom->stage || + (block_atom->stage == ASTAGE_CAPTURE_WAIT && block_atom->txnh_count == 0)) { + capture_fuse_into(txnh_atom, block_atom); + return RETERR(-E_REPEAT); + } + spin_lock_txnh(txnh); + return capture_fuse_wait(txnh, block_atom, txnh_atom, mode); +} + +/* This function splices together two jnode lists (small and large) and sets all jnodes in + the small list to point to the large atom. Returns the length of the list. */ +static int +capture_fuse_jnode_lists(txn_atom *large, struct list_head *large_head, + struct list_head *small_head) +{ + int count = 0; + jnode *node; + + assert("umka-218", large != NULL); + assert("umka-219", large_head != NULL); + assert("umka-220", small_head != NULL); + /* small atom should be locked also. */ + assert_spin_locked(&(large->alock)); + + /* For every jnode on small's capture list... */ + list_for_each_entry(node, small_head, capture_link) { + count += 1; + + /* With the jnode lock held, update atom pointer. */ + spin_lock_jnode(node); + node->atom = large; + spin_unlock_jnode(node); + } + + /* Splice the lists. */ + list_splice_init(small_head, large_head->prev); + + return count; +} + +/* This function splices together two txnh lists (small and large) and sets all txn handles in + the small list to point to the large atom. Returns the length of the list. */ +static int +capture_fuse_txnh_lists(txn_atom *large, struct list_head *large_head, + struct list_head *small_head) +{ + int count = 0; + txn_handle *txnh; + + assert("umka-221", large != NULL); + assert("umka-222", large_head != NULL); + assert("umka-223", small_head != NULL); + + /* Adjust every txnh to the new atom. */ + list_for_each_entry(txnh, small_head, txnh_link) { + count += 1; + + /* With the txnh lock held, update atom pointer. */ + spin_lock_txnh(txnh); + txnh->atom = large; + spin_unlock_txnh(txnh); + } + + /* Splice the txn_handle list. */ + list_splice_init(small_head, large_head->prev); + + return count; +} + +/* This function fuses two atoms. The captured nodes and handles belonging to SMALL are + added to LARGE and their ->atom pointers are all updated. The associated counts are + updated as well, and any waiting handles belonging to either are awakened. Finally the + smaller atom's refcount is decremented. +*/ +static void capture_fuse_into(txn_atom * small, txn_atom * large) +{ + int level; + unsigned zcount = 0; + unsigned tcount = 0; + + assert("umka-224", small != NULL); + assert("umka-225", small != NULL); + + assert_spin_locked(&(large->alock)); + assert_spin_locked(&(small->alock)); + + assert("jmacd-201", atom_isopen(small)); + assert("jmacd-202", atom_isopen(large)); + + /* Splice and update the per-level dirty jnode lists */ + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + zcount += + capture_fuse_jnode_lists(large, + ATOM_DIRTY_LIST(large, level), + ATOM_DIRTY_LIST(small, level)); + } + + /* Splice and update the [clean,dirty] jnode and txnh lists */ + zcount += + capture_fuse_jnode_lists(large, ATOM_CLEAN_LIST(large), + ATOM_CLEAN_LIST(small)); + zcount += + capture_fuse_jnode_lists(large, ATOM_OVRWR_LIST(large), + ATOM_OVRWR_LIST(small)); + zcount += + capture_fuse_jnode_lists(large, ATOM_WB_LIST(large), + ATOM_WB_LIST(small)); + zcount += + capture_fuse_jnode_lists(large, &large->inodes, &small->inodes); + tcount += + capture_fuse_txnh_lists(large, &large->txnh_list, + &small->txnh_list); + + /* Check our accounting. */ + assert("jmacd-1063", + zcount + small->num_queued == small->capture_count); + assert("jmacd-1065", tcount == small->txnh_count); + + /* sum numbers of waiters threads */ + large->nr_waiters += small->nr_waiters; + small->nr_waiters = 0; + + /* splice flush queues */ + reiser4_fuse_fq(large, small); + + /* update counter of jnode on every atom' list */ + ON_DEBUG(large->dirty += small->dirty; + small->dirty = 0; + large->clean += small->clean; + small->clean = 0; + large->ovrwr += small->ovrwr; + small->ovrwr = 0; + large->wb += small->wb; + small->wb = 0; + large->fq += small->fq; + small->fq = 0;); + + /* count flushers in result atom */ + large->nr_flushers += small->nr_flushers; + small->nr_flushers = 0; + + /* update counts of flushed nodes */ + large->flushed += small->flushed; + small->flushed = 0; + + /* Transfer list counts to large. */ + large->txnh_count += small->txnh_count; + large->capture_count += small->capture_count; + + /* Add all txnh references to large. */ + atomic_add(small->txnh_count, &large->refcount); + atomic_sub(small->txnh_count, &small->refcount); + + /* Reset small counts */ + small->txnh_count = 0; + small->capture_count = 0; + + /* Assign the oldest start_time, merge flags. */ + large->start_time = min(large->start_time, small->start_time); + large->flags |= small->flags; + + /* Merge blocknr sets. */ + blocknr_set_merge(&small->wandered_map, &large->wandered_map); + + /* Merge delete sets. */ + atom_dset_merge(small, large); + + /* Merge allocated/deleted file counts */ + large->nr_objects_deleted += small->nr_objects_deleted; + large->nr_objects_created += small->nr_objects_created; + + small->nr_objects_deleted = 0; + small->nr_objects_created = 0; + + /* Merge allocated blocks counts */ + large->nr_blocks_allocated += small->nr_blocks_allocated; + + large->nr_running_queues += small->nr_running_queues; + small->nr_running_queues = 0; + + /* Merge blocks reserved for overwrite set. */ + large->flush_reserved += small->flush_reserved; + small->flush_reserved = 0; + + if (large->stage < small->stage) { + /* Large only needs to notify if it has changed state. */ + reiser4_atom_set_stage(large, small->stage); + wakeup_atom_waiting_list(large); + } + + reiser4_atom_set_stage(small, ASTAGE_INVALID); + + /* Notify any waiters--small needs to unload its wait lists. Waiters + actually remove themselves from the list before returning from the + fuse_wait function. */ + wakeup_atom_waiting_list(small); + + /* Unlock atoms */ + spin_unlock_atom(large); + atom_dec_and_unlock(small); +} + +/* TXNMGR STUFF */ + +/* Release a block from the atom, reversing the effects of being captured, + do not release atom's reference to jnode due to holding spin-locks. + Currently this is only called when the atom commits. + + NOTE: this function does not release a (journal) reference to jnode + due to locking optimizations, you should call jput() somewhere after + calling reiser4_uncapture_block(). */ +void reiser4_uncapture_block(jnode * node) +{ + txn_atom *atom; + + assert("umka-226", node != NULL); + atom = node->atom; + assert("umka-228", atom != NULL); + + assert("jmacd-1021", node->atom == atom); + assert_spin_locked(&(node->guard)); + assert("jmacd-1023", atom_is_protected(atom)); + + JF_CLR(node, JNODE_DIRTY); + JF_CLR(node, JNODE_RELOC); + JF_CLR(node, JNODE_OVRWR); + JF_CLR(node, JNODE_CREATED); + JF_CLR(node, JNODE_WRITEBACK); + JF_CLR(node, JNODE_REPACK); + + list_del_init(&node->capture_link); + if (JF_ISSET(node, JNODE_FLUSH_QUEUED)) { + assert("zam-925", atom_isopen(atom)); + assert("vs-1623", NODE_LIST(node) == FQ_LIST); + ON_DEBUG(atom->num_queued--); + JF_CLR(node, JNODE_FLUSH_QUEUED); + } + atom->capture_count -= 1; + ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), NOT_CAPTURED, 1)); + node->atom = NULL; + + spin_unlock_jnode(node); + LOCK_CNT_DEC(t_refs); +} + +/* Unconditional insert of jnode into atom's overwrite list. Currently used in + bitmap-based allocator code for adding modified bitmap blocks the + transaction. @atom and @node are spin locked */ +void insert_into_atom_ovrwr_list(txn_atom * atom, jnode * node) +{ + assert("zam-538", atom_is_protected(atom)); + assert_spin_locked(&(node->guard)); + assert("zam-899", JF_ISSET(node, JNODE_OVRWR)); + assert("zam-543", node->atom == NULL); + assert("vs-1433", !jnode_is_unformatted(node) && !jnode_is_znode(node)); + + list_add(&node->capture_link, ATOM_OVRWR_LIST(atom)); + jref(node); + node->atom = atom; + atom->capture_count++; + ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), OVRWR_LIST, 1)); +} + +static int count_deleted_blocks_actor(txn_atom * atom, + const reiser4_block_nr * a, + const reiser4_block_nr * b, void *data) +{ + reiser4_block_nr *counter = data; + + assert("zam-995", data != NULL); + assert("zam-996", a != NULL); + if (b == NULL) + *counter += 1; + else + *counter += *b; + return 0; +} + +reiser4_block_nr txnmgr_count_deleted_blocks(void) +{ + reiser4_block_nr result; + txn_mgr *tmgr = &get_super_private(reiser4_get_current_sb())->tmgr; + txn_atom *atom; + + result = 0; + + spin_lock_txnmgr(tmgr); + list_for_each_entry(atom, &tmgr->atoms_list, atom_link) { + spin_lock_atom(atom); + if (atom_isopen(atom)) + atom_dset_deferred_apply(atom, count_deleted_blocks_actor, &result, 0); + spin_unlock_atom(atom); + } + spin_unlock_txnmgr(tmgr); + + return result; +} + +void atom_dset_init(txn_atom *atom) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + blocknr_list_init(&atom->discard.delete_set); + } else { + blocknr_set_init(&atom->nodiscard.delete_set); + } +} + +void atom_dset_destroy(txn_atom *atom) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + blocknr_list_destroy(&atom->discard.delete_set); + } else { + blocknr_set_destroy(&atom->nodiscard.delete_set); + } +} + +void atom_dset_merge(txn_atom *from, txn_atom *to) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + blocknr_list_merge(&from->discard.delete_set, &to->discard.delete_set); + } else { + blocknr_set_merge(&from->nodiscard.delete_set, &to->nodiscard.delete_set); + } +} + +int atom_dset_deferred_apply(txn_atom* atom, + blocknr_set_actor_f actor, + void *data, + int delete) +{ + int ret; + + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + ret = blocknr_list_iterator(atom, + &atom->discard.delete_set, + actor, + data, + delete); + } else { + ret = blocknr_set_iterator(atom, + &atom->nodiscard.delete_set, + actor, + data, + delete); + } + + return ret; +} + +extern int atom_dset_deferred_add_extent(txn_atom *atom, + void **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len) +{ + int ret; + + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + ret = blocknr_list_add_extent(atom, + &atom->discard.delete_set, + (blocknr_list_entry**)new_entry, + start, + len); + } else { + ret = blocknr_set_add_extent(atom, + &atom->nodiscard.delete_set, + (blocknr_set_entry**)new_entry, + start, + len); + } + + return ret; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/txnmgr.h b/fs/reiser4/txnmgr.h new file mode 100644 index 000000000000..72b84a26ff92 --- /dev/null +++ b/fs/reiser4/txnmgr.h @@ -0,0 +1,755 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* data-types and function declarations for transaction manager. See txnmgr.c + * for details. */ + +#ifndef __REISER4_TXNMGR_H__ +#define __REISER4_TXNMGR_H__ + +#include "forward.h" +#include "dformat.h" + +#include +#include +#include +#include +#include +#include + +/* TYPE DECLARATIONS */ + +/* This enumeration describes the possible types of a capture request (reiser4_try_capture). + A capture request dynamically assigns a block to the calling thread's transaction + handle. */ +typedef enum { + /* A READ_ATOMIC request indicates that a block will be read and that the caller's + atom should fuse in order to ensure that the block commits atomically with the + caller. */ + TXN_CAPTURE_READ_ATOMIC = (1 << 0), + + /* A READ_NONCOM request indicates that a block will be read and that the caller is + willing to read a non-committed block without causing atoms to fuse. */ + TXN_CAPTURE_READ_NONCOM = (1 << 1), + + /* A READ_MODIFY request indicates that a block will be read but that the caller + wishes for the block to be captured as it will be written. This capture request + mode is not currently used, but eventually it will be useful for preventing + deadlock in read-modify-write cycles. */ + TXN_CAPTURE_READ_MODIFY = (1 << 2), + + /* A WRITE capture request indicates that a block will be modified and that atoms + should fuse to make the commit atomic. */ + TXN_CAPTURE_WRITE = (1 << 3), + + /* CAPTURE_TYPES is a mask of the four above capture types, used to separate the + exclusive type designation from extra bits that may be supplied -- see + below. */ + TXN_CAPTURE_TYPES = (TXN_CAPTURE_READ_ATOMIC | + TXN_CAPTURE_READ_NONCOM | TXN_CAPTURE_READ_MODIFY | + TXN_CAPTURE_WRITE), + + /* A subset of CAPTURE_TYPES, CAPTURE_WTYPES is a mask of request types that + indicate modification will occur. */ + TXN_CAPTURE_WTYPES = (TXN_CAPTURE_READ_MODIFY | TXN_CAPTURE_WRITE), + + /* An option to reiser4_try_capture, NONBLOCKING indicates that the caller would + prefer not to sleep waiting for an aging atom to commit. */ + TXN_CAPTURE_NONBLOCKING = (1 << 4), + + /* An option to reiser4_try_capture to prevent atom fusion, just simple + capturing is allowed */ + TXN_CAPTURE_DONT_FUSE = (1 << 5) + + /* This macro selects only the exclusive capture request types, stripping out any + options that were supplied (i.e., NONBLOCKING). */ +#define CAPTURE_TYPE(x) ((x) & TXN_CAPTURE_TYPES) +} txn_capture; + +/* There are two kinds of transaction handle: WRITE_FUSING and READ_FUSING, the only + difference is in the handling of read requests. A WRITE_FUSING transaction handle + defaults read capture requests to TXN_CAPTURE_READ_NONCOM whereas a READ_FUSIONG + transaction handle defaults to TXN_CAPTURE_READ_ATOMIC. */ +typedef enum { + TXN_WRITE_FUSING = (1 << 0), + TXN_READ_FUSING = (1 << 1) | TXN_WRITE_FUSING, /* READ implies WRITE */ +} txn_mode; + +/* Every atom has a stage, which is one of these exclusive values: */ +typedef enum { + /* Initially an atom is free. */ + ASTAGE_FREE = 0, + + /* An atom begins by entering the CAPTURE_FUSE stage, where it proceeds to capture + blocks and fuse with other atoms. */ + ASTAGE_CAPTURE_FUSE = 1, + + /* We need to have a ASTAGE_CAPTURE_SLOW in which an atom fuses with one node for every X nodes it flushes to disk where X > 1. */ + + /* When an atom reaches a certain age it must do all it can to commit. An atom in + the CAPTURE_WAIT stage refuses new transaction handles and prevents fusion from + atoms in the CAPTURE_FUSE stage. */ + ASTAGE_CAPTURE_WAIT = 2, + + /* Waiting for I/O before commit. Copy-on-capture (see + http://namesys.com/v4/v4.html). */ + ASTAGE_PRE_COMMIT = 3, + + /* Post-commit overwrite I/O. Steal-on-capture. */ + ASTAGE_POST_COMMIT = 4, + + /* Atom which waits for the removal of the last reference to (it? ) to + * be deleted from memory */ + ASTAGE_DONE = 5, + + /* invalid atom. */ + ASTAGE_INVALID = 6, + +} txn_stage; + +/* Certain flags may be set in the txn_atom->flags field. */ +typedef enum { + /* Indicates that the atom should commit as soon as possible. */ + ATOM_FORCE_COMMIT = (1 << 0), + /* to avoid endless loop, mark the atom (which was considered as too + * small) after failed attempt to fuse it. */ + ATOM_CANCEL_FUSION = (1 << 1) +} txn_flags; + +/* Flags for controlling commit_txnh */ +typedef enum { + /* Wait commit atom completion in commit_txnh */ + TXNH_WAIT_COMMIT = 0x2, + /* Don't commit atom when this handle is closed */ + TXNH_DONT_COMMIT = 0x4 +} txn_handle_flags_t; + +/* TYPE DEFINITIONS */ + +/* A note on lock ordering: the handle & jnode spinlock protects reading of their ->atom + fields, so typically an operation on the atom through either of these objects must (1) + lock the object, (2) read the atom pointer, (3) lock the atom. + + During atom fusion, the process holds locks on both atoms at once. Then, it iterates + through the list of handles and pages held by the smaller of the two atoms. For each + handle and page referencing the smaller atom, the fusing process must: (1) lock the + object, and (2) update the atom pointer. + + You can see that there is a conflict of lock ordering here, so the more-complex + procedure should have priority, i.e., the fusing process has priority so that it is + guaranteed to make progress and to avoid restarts. + + This decision, however, means additional complexity for aquiring the atom lock in the + first place. + + The general original procedure followed in the code was: + + TXN_OBJECT *obj = ...; + TXN_ATOM *atom; + + spin_lock (& obj->_lock); + + atom = obj->_atom; + + if (! spin_trylock_atom (atom)) + { + spin_unlock (& obj->_lock); + RESTART OPERATION, THERE WAS A RACE; + } + + ELSE YOU HAVE BOTH ATOM AND OBJ LOCKED + + It has however been found that this wastes CPU a lot in a manner that is + hard to profile. So, proper refcounting was added to atoms, and new + standard locking sequence is like following: + + TXN_OBJECT *obj = ...; + TXN_ATOM *atom; + + spin_lock (& obj->_lock); + + atom = obj->_atom; + + if (! spin_trylock_atom (atom)) + { + atomic_inc (& atom->refcount); + spin_unlock (& obj->_lock); + spin_lock (&atom->_lock); + atomic_dec (& atom->refcount); + // HERE atom is locked + spin_unlock (&atom->_lock); + RESTART OPERATION, THERE WAS A RACE; + } + + ELSE YOU HAVE BOTH ATOM AND OBJ LOCKED + + (core of this is implemented in trylock_throttle() function) + + See the jnode_get_atom() function for a common case. + + As an additional (and important) optimization allowing to avoid restarts, + it is possible to re-check required pre-conditions at the HERE point in + code above and proceed without restarting if they are still satisfied. +*/ + +/* An atomic transaction: this is the underlying system representation + of a transaction, not the one seen by clients. + + Invariants involving this data-type: + + [sb-fake-allocated] +*/ +struct txn_atom { + /* The spinlock protecting the atom, held during fusion and various other state + changes. */ + spinlock_t alock; + + /* The atom's reference counter, increasing (in case of a duplication + of an existing reference or when we are sure that some other + reference exists) may be done without taking spinlock, decrementing + of the ref. counter requires a spinlock to be held. + + Each transaction handle counts in ->refcount. All jnodes count as + one reference acquired in atom_begin_andlock(), released in + commit_current_atom(). + */ + atomic_t refcount; + + /* The atom_id identifies the atom in persistent records such as the log. */ + __u32 atom_id; + + /* Flags holding any of the txn_flags enumerated values (e.g., + ATOM_FORCE_COMMIT). */ + __u32 flags; + + /* Number of open handles. */ + __u32 txnh_count; + + /* The number of znodes captured by this atom. Equal to the sum of lengths of the + dirty_nodes[level] and clean_nodes lists. */ + __u32 capture_count; + +#if REISER4_DEBUG + int clean; + int dirty; + int ovrwr; + int wb; + int fq; +#endif + + __u32 flushed; + + /* Current transaction stage. */ + txn_stage stage; + + /* Start time. */ + unsigned long start_time; + + /* The atom's delete sets. + "simple" are blocknr_set instances and are used when discard is disabled. + "discard" are blocknr_list instances and are used when discard is enabled. */ + union { + struct { + /* The atom's delete set. It collects block numbers of the nodes + which were deleted during the transaction. */ + struct list_head delete_set; + } nodiscard; + + struct { + /* The atom's delete set. It collects all blocks that have been + deallocated (both immediate and deferred) during the transaction. + These blocks are considered for discarding at commit time. + For details see discard.c */ + struct list_head delete_set; + } discard; + }; + + /* The atom's wandered_block mapping. */ + struct list_head wandered_map; + + /* The transaction's list of dirty captured nodes--per level. Index + by (level). dirty_nodes[0] is for znode-above-root */ + struct list_head dirty_nodes[REAL_MAX_ZTREE_HEIGHT + 1]; + + /* The transaction's list of clean captured nodes. */ + struct list_head clean_nodes; + + /* The atom's overwrite set */ + struct list_head ovrwr_nodes; + + /* nodes which are being written to disk */ + struct list_head writeback_nodes; + + /* list of inodes */ + struct list_head inodes; + + /* List of handles associated with this atom. */ + struct list_head txnh_list; + + /* Transaction list link: list of atoms in the transaction manager. */ + struct list_head atom_link; + + /* List of handles waiting FOR this atom: see 'capture_fuse_wait' comment. */ + struct list_head fwaitfor_list; + + /* List of this atom's handles that are waiting: see 'capture_fuse_wait' comment. */ + struct list_head fwaiting_list; + + /* Numbers of objects which were deleted/created in this transaction + thereby numbers of objects IDs which were released/deallocated. */ + int nr_objects_deleted; + int nr_objects_created; + /* number of blocks allocated during the transaction */ + __u64 nr_blocks_allocated; + /* All atom's flush queue objects are on this list */ + struct list_head flush_queues; +#if REISER4_DEBUG + /* number of flush queues for this atom. */ + int nr_flush_queues; + /* Number of jnodes which were removed from atom's lists and put + on flush_queue */ + int num_queued; +#endif + /* number of threads who wait for this atom to complete commit */ + int nr_waiters; + /* number of threads which do jnode_flush() over this atom */ + int nr_flushers; + /* number of flush queues which are IN_USE and jnodes from fq->prepped + are submitted to disk by the reiser4_write_fq() routine. */ + int nr_running_queues; + /* A counter of grabbed unformatted nodes, see a description of the + * reiser4 space reservation scheme at block_alloc.c */ + reiser4_block_nr flush_reserved; +#if REISER4_DEBUG + void *committer; +#endif + struct super_block *super; +}; + +#define ATOM_DIRTY_LIST(atom, level) (&(atom)->dirty_nodes[level]) +#define ATOM_CLEAN_LIST(atom) (&(atom)->clean_nodes) +#define ATOM_OVRWR_LIST(atom) (&(atom)->ovrwr_nodes) +#define ATOM_WB_LIST(atom) (&(atom)->writeback_nodes) +#define ATOM_FQ_LIST(fq) (&(fq)->prepped) + +#define NODE_LIST(node) (node)->list +#define ASSIGN_NODE_LIST(node, list) ON_DEBUG(NODE_LIST(node) = list) +ON_DEBUG(void + count_jnode(txn_atom *, jnode *, atom_list old_list, + atom_list new_list, int check_lists)); + +/* A transaction handle: the client obtains and commits this handle which is assigned by + the system to a txn_atom. */ +struct txn_handle { + /* Spinlock protecting ->atom pointer */ + spinlock_t hlock; + + /* Flags for controlling commit_txnh() behavior */ + /* from txn_handle_flags_t */ + txn_handle_flags_t flags; + + /* Whether it is READ_FUSING or WRITE_FUSING. */ + txn_mode mode; + + /* If assigned, the atom it is part of. */ + txn_atom *atom; + + /* Transaction list link. Head is in txn_atom. */ + struct list_head txnh_link; +}; + +/* The transaction manager: one is contained in the reiser4_super_info_data */ +struct txn_mgr { + /* A spinlock protecting the atom list, id_count, flush_control */ + spinlock_t tmgr_lock; + + /* List of atoms. */ + struct list_head atoms_list; + + /* Number of atoms. */ + int atom_count; + + /* A counter used to assign atom->atom_id values. */ + __u32 id_count; + + /* a mutex object for commit serialization */ + struct mutex commit_mutex; + + /* a list of all txnmrgs served by particular daemon. */ + struct list_head linkage; + + /* description of daemon for this txnmgr */ + ktxnmgrd_context *daemon; + + /* parameters. Adjustable through mount options. */ + unsigned int atom_max_size; + unsigned int atom_max_age; + unsigned int atom_min_size; + /* max number of concurrent flushers for one atom, 0 - unlimited. */ + unsigned int atom_max_flushers; + struct dentry *debugfs_atom_count; + struct dentry *debugfs_id_count; +}; + +/* FUNCTION DECLARATIONS */ + +/* These are the externally (within Reiser4) visible transaction functions, therefore they + are prefixed with "txn_". For comments, see txnmgr.c. */ + +extern int init_txnmgr_static(void); +extern void done_txnmgr_static(void); + +extern void reiser4_init_txnmgr(txn_mgr *); +extern void reiser4_done_txnmgr(txn_mgr *); + +extern int reiser4_txn_reserve(int reserved); + +extern void reiser4_txn_begin(reiser4_context * context); +extern int reiser4_txn_end(reiser4_context * context); + +extern void reiser4_txn_restart(reiser4_context * context); +extern void reiser4_txn_restart_current(void); + +extern int txnmgr_force_commit_all(struct super_block *, int); +extern int current_atom_should_commit(void); + +extern jnode *find_first_dirty_jnode(txn_atom *, int); + +extern int commit_some_atoms(txn_mgr *); +extern int force_commit_atom(txn_handle *); +extern int flush_current_atom(int, long, long *, txn_atom **, jnode *); + +extern int flush_some_atom(jnode *, long *, const struct writeback_control *, int); + +extern void reiser4_atom_set_stage(txn_atom * atom, txn_stage stage); + +extern int same_slum_check(jnode * base, jnode * check, int alloc_check, + int alloc_value); +extern void atom_dec_and_unlock(txn_atom * atom); + +extern int reiser4_try_capture(jnode * node, znode_lock_mode mode, txn_capture flags); +extern int try_capture_page_to_invalidate(struct page *pg); + +extern void reiser4_uncapture_page(struct page *pg); +extern void reiser4_uncapture_block(jnode *); +extern void reiser4_uncapture_jnode(jnode *); + +extern int reiser4_capture_inode(struct inode *); +extern int reiser4_uncapture_inode(struct inode *); + +extern txn_atom *get_current_atom_locked_nocheck(void); + +#if REISER4_DEBUG + +/** + * atom_is_protected - make sure that nobody but us can do anything with atom + * @atom: atom to be checked + * + * This is used to assert that atom either entered commit stages or is spin + * locked. + */ +static inline int atom_is_protected(txn_atom *atom) +{ + if (atom->stage >= ASTAGE_PRE_COMMIT) + return 1; + assert_spin_locked(&(atom->alock)); + return 1; +} + +#endif + +/* Get the current atom and spinlock it if current atom present. May not return NULL */ +static inline txn_atom *get_current_atom_locked(void) +{ + txn_atom *atom; + + atom = get_current_atom_locked_nocheck(); + assert("zam-761", atom != NULL); + + return atom; +} + +extern txn_atom *jnode_get_atom(jnode *); + +extern void reiser4_atom_wait_event(txn_atom *); +extern void reiser4_atom_send_event(txn_atom *); + +extern void insert_into_atom_ovrwr_list(txn_atom * atom, jnode * node); +extern int reiser4_capture_super_block(struct super_block *s); +int capture_bulk(jnode **, int count); + +/* See the comment on the function blocknrset.c:blocknr_set_add for the + calling convention of these three routines. */ +extern int blocknr_set_init_static(void); +extern void blocknr_set_done_static(void); +extern void blocknr_set_init(struct list_head * bset); +extern void blocknr_set_destroy(struct list_head * bset); +extern void blocknr_set_merge(struct list_head * from, struct list_head * into); +extern int blocknr_set_add_extent(txn_atom * atom, + struct list_head * bset, + blocknr_set_entry ** new_bsep, + const reiser4_block_nr * start, + const reiser4_block_nr * len); +extern int blocknr_set_add_pair(txn_atom * atom, struct list_head * bset, + blocknr_set_entry ** new_bsep, + const reiser4_block_nr * a, + const reiser4_block_nr * b); + +typedef int (*blocknr_set_actor_f) (txn_atom *, const reiser4_block_nr *, + const reiser4_block_nr *, void *); + +extern int blocknr_set_iterator(txn_atom * atom, struct list_head * bset, + blocknr_set_actor_f actor, void *data, + int delete); + +/* This is the block list interface (see blocknrlist.c) */ +extern int blocknr_list_init_static(void); +extern void blocknr_list_done_static(void); +extern void blocknr_list_init(struct list_head *blist); +extern void blocknr_list_destroy(struct list_head *blist); +extern void blocknr_list_merge(struct list_head *from, struct list_head *to); +extern void blocknr_list_sort_and_join(struct list_head *blist); +/** + * The @atom should be locked. + */ +extern int blocknr_list_add_extent(txn_atom *atom, + struct list_head *blist, + blocknr_list_entry **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len); +extern int blocknr_list_iterator(txn_atom *atom, + struct list_head *blist, + blocknr_set_actor_f actor, + void *data, + int delete); + +/* These are wrappers for accessing and modifying atom's delete lists, + depending on whether discard is enabled or not. + If it is enabled, (less memory efficient) blocknr_list is used for delete + list storage. Otherwise, blocknr_set is used for this purpose. */ +extern void atom_dset_init(txn_atom *atom); +extern void atom_dset_destroy(txn_atom *atom); +extern void atom_dset_merge(txn_atom *from, txn_atom *to); +extern int atom_dset_deferred_apply(txn_atom* atom, + blocknr_set_actor_f actor, + void *data, + int delete); +extern int atom_dset_deferred_add_extent(txn_atom *atom, + void **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len); + +/* flush code takes care about how to fuse flush queues */ +extern void flush_init_atom(txn_atom * atom); +extern void flush_fuse_queues(txn_atom * large, txn_atom * small); + +static inline void spin_lock_atom(txn_atom *atom) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_atom) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock(&(atom->alock)); + + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_lock_atom_nested(txn_atom *atom) +{ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock_nested(&(atom->alock), SINGLE_DEPTH_NESTING); + + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); +} + +static inline int spin_trylock_atom(txn_atom *atom) +{ + if (spin_trylock(&(atom->alock))) { + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); + return 1; + } + return 0; +} + +static inline void spin_unlock_atom(txn_atom *atom) +{ + assert_spin_locked(&(atom->alock)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_atom)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_atom); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(atom->alock)); +} + +static inline void spin_lock_txnh(txn_handle *txnh) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock(&(txnh->hlock)); + + LOCK_CNT_INC(spin_locked_txnh); + LOCK_CNT_INC(spin_locked); +} + +static inline int spin_trylock_txnh(txn_handle *txnh) +{ + if (spin_trylock(&(txnh->hlock))) { + LOCK_CNT_INC(spin_locked_txnh); + LOCK_CNT_INC(spin_locked); + return 1; + } + return 0; +} + +static inline void spin_unlock_txnh(txn_handle *txnh) +{ + assert_spin_locked(&(txnh->hlock)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_txnh)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_txnh); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(txnh->hlock)); +} + +#define spin_ordering_pred_txnmgr(tmgr) \ + ( LOCK_CNT_NIL(spin_locked_atom) && \ + LOCK_CNT_NIL(spin_locked_txnh) && \ + LOCK_CNT_NIL(spin_locked_jnode) && \ + LOCK_CNT_NIL(rw_locked_zlock) && \ + LOCK_CNT_NIL(rw_locked_dk) && \ + LOCK_CNT_NIL(rw_locked_tree) ) + +static inline void spin_lock_txnmgr(txn_mgr *mgr) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_atom) && + LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock(&(mgr->tmgr_lock)); + + LOCK_CNT_INC(spin_locked_txnmgr); + LOCK_CNT_INC(spin_locked); +} + +static inline int spin_trylock_txnmgr(txn_mgr *mgr) +{ + if (spin_trylock(&(mgr->tmgr_lock))) { + LOCK_CNT_INC(spin_locked_txnmgr); + LOCK_CNT_INC(spin_locked); + return 1; + } + return 0; +} + +static inline void spin_unlock_txnmgr(txn_mgr *mgr) +{ + assert_spin_locked(&(mgr->tmgr_lock)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_txnmgr)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_txnmgr); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(mgr->tmgr_lock)); +} + +typedef enum { + FQ_IN_USE = 0x1 +} flush_queue_state_t; + +typedef struct flush_queue flush_queue_t; + +/* This is an accumulator for jnodes prepared for writing to disk. A flush queue + is filled by the jnode_flush() routine, and written to disk under memory + pressure or at atom commit time. */ +/* LOCKING: fq state and fq->atom are protected by guard spinlock, fq->nr_queued + field and fq->prepped list can be modified if atom is spin-locked and fq + object is "in-use" state. For read-only traversal of the fq->prepped list + and reading of the fq->nr_queued field it is enough to keep fq "in-use" or + only have atom spin-locked. */ +struct flush_queue { + /* linkage element is the first in this structure to make debugging + easier. See field in atom struct for description of list. */ + struct list_head alink; + /* A spinlock to protect changes of fq state and fq->atom pointer */ + spinlock_t guard; + /* flush_queue state: [in_use | ready] */ + flush_queue_state_t state; + /* A list which contains queued nodes, queued nodes are removed from any + * atom's list and put on this ->prepped one. */ + struct list_head prepped; + /* number of submitted i/o requests */ + atomic_t nr_submitted; + /* number of i/o errors */ + atomic_t nr_errors; + /* An atom this flush queue is attached to */ + txn_atom *atom; + /* A wait queue head to wait on i/o completion */ + wait_queue_head_t wait; +#if REISER4_DEBUG + /* A thread which took this fq in exclusive use, NULL if fq is free, + * used for debugging. */ + struct task_struct *owner; +#endif +}; + +extern int reiser4_fq_by_atom(txn_atom *, flush_queue_t **); +extern void reiser4_fq_put_nolock(flush_queue_t *); +extern void reiser4_fq_put(flush_queue_t *); +extern void reiser4_fuse_fq(txn_atom * to, txn_atom * from); +extern void queue_jnode(flush_queue_t *, jnode *); + +extern int reiser4_write_fq(flush_queue_t *, long *, int); +extern int current_atom_finish_all_fq(void); +extern void init_atom_fq_parts(txn_atom *); + +extern reiser4_block_nr txnmgr_count_deleted_blocks(void); + +extern void znode_make_dirty(znode * node); +extern void jnode_make_dirty_locked(jnode * node); + +extern int reiser4_sync_atom(txn_atom * atom); + +#if REISER4_DEBUG +extern int atom_fq_parts_are_clean(txn_atom *); +#endif + +extern void add_fq_to_bio(flush_queue_t *, struct bio *); +extern flush_queue_t *get_fq_for_current_atom(void); + +void reiser4_invalidate_list(struct list_head * head); + +# endif /* __REISER4_TXNMGR_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/type_safe_hash.h b/fs/reiser4/type_safe_hash.h new file mode 100644 index 000000000000..b2fdacdf00f4 --- /dev/null +++ b/fs/reiser4/type_safe_hash.h @@ -0,0 +1,320 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* A hash table class that uses hash chains (singly-linked) and is + parametrized to provide type safety. */ + +#ifndef __REISER4_TYPE_SAFE_HASH_H__ +#define __REISER4_TYPE_SAFE_HASH_H__ + +#include "debug.h" + +#include +/* Step 1: Use TYPE_SAFE_HASH_DECLARE() to define the TABLE and LINK objects + based on the object type. You need to declare the item type before + this definition, define it after this definition. */ +#define TYPE_SAFE_HASH_DECLARE(PREFIX,ITEM_TYPE) \ + \ +typedef struct PREFIX##_hash_table_ PREFIX##_hash_table; \ +typedef struct PREFIX##_hash_link_ PREFIX##_hash_link; \ + \ +struct PREFIX##_hash_table_ \ +{ \ + ITEM_TYPE **_table; \ + __u32 _buckets; \ +}; \ + \ +struct PREFIX##_hash_link_ \ +{ \ + ITEM_TYPE *_next; \ +} + +/* Step 2: Define the object type of the hash: give it field of type + PREFIX_hash_link. */ + +/* Step 3: Use TYPE_SAFE_HASH_DEFINE to define the hash table interface using + the type and field name used in step 3. The arguments are: + + ITEM_TYPE The item type being hashed + KEY_TYPE The type of key being hashed + KEY_NAME The name of the key field within the item + LINK_NAME The name of the link field within the item, which you must make type PREFIX_hash_link) + HASH_FUNC The name of the hash function (or macro, takes const pointer to key) + EQ_FUNC The name of the equality function (or macro, takes const pointer to two keys) + + It implements these functions: + + prefix_hash_init Initialize the table given its size. + prefix_hash_insert Insert an item + prefix_hash_insert_index Insert an item w/ precomputed hash_index + prefix_hash_find Find an item by key + prefix_hash_find_index Find an item w/ precomputed hash_index + prefix_hash_remove Remove an item, returns 1 if found, 0 if not found + prefix_hash_remove_index Remove an item w/ precomputed hash_index + + If you'd like something to be done differently, feel free to ask me + for modifications. Additional features that could be added but + have not been: + + prefix_hash_remove_key Find and remove an item by key + prefix_hash_remove_key_index Find and remove an item by key w/ precomputed hash_index + + The hash_function currently receives only the key as an argument, + meaning it must somehow know the number of buckets. If this is a + problem let me know. + + This hash table uses a single-linked hash chain. This means + insertion is fast but deletion requires searching the chain. + + There is also the doubly-linked hash chain approach, under which + deletion requires no search but the code is longer and it takes two + pointers per item. + + The circularly-linked approach has the shortest code but requires + two pointers per bucket, doubling the size of the bucket array (in + addition to two pointers per item). +*/ +#define TYPE_SAFE_HASH_DEFINE(PREFIX,ITEM_TYPE,KEY_TYPE,KEY_NAME,LINK_NAME,HASH_FUNC,EQ_FUNC) \ + \ +static __inline__ void \ +PREFIX##_check_hash (PREFIX##_hash_table *table UNUSED_ARG, \ + __u32 hash UNUSED_ARG) \ +{ \ + assert("nikita-2780", hash < table->_buckets); \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_init (PREFIX##_hash_table *hash, \ + __u32 buckets) \ +{ \ + hash->_table = (ITEM_TYPE**) KMALLOC (sizeof (ITEM_TYPE*) * buckets); \ + hash->_buckets = buckets; \ + if (hash->_table == NULL) \ + { \ + return RETERR(-ENOMEM); \ + } \ + memset (hash->_table, 0, sizeof (ITEM_TYPE*) * buckets); \ + ON_DEBUG(printk(#PREFIX "_hash_table: %i buckets\n", buckets)); \ + return 0; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_done (PREFIX##_hash_table *hash) \ +{ \ + if (REISER4_DEBUG && hash->_table != NULL) { \ + __u32 i; \ + for (i = 0 ; i < hash->_buckets ; ++ i) \ + assert("nikita-2905", hash->_table[i] == NULL); \ + } \ + if (hash->_table != NULL) \ + KFREE (hash->_table, sizeof (ITEM_TYPE*) * hash->_buckets); \ + hash->_table = NULL; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_prefetch_next (ITEM_TYPE *item) \ +{ \ + prefetch(item->LINK_NAME._next); \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_prefetch_bucket (PREFIX##_hash_table *hash, \ + __u32 index) \ +{ \ + prefetch(hash->_table[index]); \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find_index (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + KEY_TYPE const *find_key) \ +{ \ + ITEM_TYPE *item; \ + \ + PREFIX##_check_hash(hash, hash_index); \ + \ + for (item = hash->_table[hash_index]; \ + item != NULL; \ + item = item->LINK_NAME._next) \ + { \ + prefetch(item->LINK_NAME._next); \ + prefetch(item->LINK_NAME._next + offsetof(ITEM_TYPE, KEY_NAME)); \ + if (EQ_FUNC (& item->KEY_NAME, find_key)) \ + { \ + return item; \ + } \ + } \ + \ + return NULL; \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find_index_lru (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + KEY_TYPE const *find_key) \ +{ \ + ITEM_TYPE ** item = &hash->_table[hash_index]; \ + \ + PREFIX##_check_hash(hash, hash_index); \ + \ + while (*item != NULL) { \ + prefetch(&(*item)->LINK_NAME._next); \ + if (EQ_FUNC (&(*item)->KEY_NAME, find_key)) { \ + ITEM_TYPE *found; \ + \ + found = *item; \ + *item = found->LINK_NAME._next; \ + found->LINK_NAME._next = hash->_table[hash_index]; \ + hash->_table[hash_index] = found; \ + return found; \ + } \ + item = &(*item)->LINK_NAME._next; \ + } \ + return NULL; \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_remove_index (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + ITEM_TYPE *del_item) \ +{ \ + ITEM_TYPE ** hash_item_p = &hash->_table[hash_index]; \ + \ + PREFIX##_check_hash(hash, hash_index); \ + \ + while (*hash_item_p != NULL) { \ + prefetch(&(*hash_item_p)->LINK_NAME._next); \ + if (*hash_item_p == del_item) { \ + *hash_item_p = (*hash_item_p)->LINK_NAME._next; \ + return 1; \ + } \ + hash_item_p = &(*hash_item_p)->LINK_NAME._next; \ + } \ + return 0; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert_index (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + ITEM_TYPE *ins_item) \ +{ \ + PREFIX##_check_hash(hash, hash_index); \ + \ + ins_item->LINK_NAME._next = hash->_table[hash_index]; \ + hash->_table[hash_index] = ins_item; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert_index_rcu (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + ITEM_TYPE *ins_item) \ +{ \ + PREFIX##_check_hash(hash, hash_index); \ + \ + ins_item->LINK_NAME._next = hash->_table[hash_index]; \ + smp_wmb(); \ + hash->_table[hash_index] = ins_item; \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find (PREFIX##_hash_table *hash, \ + KEY_TYPE const *find_key) \ +{ \ + return PREFIX##_hash_find_index (hash, HASH_FUNC(hash, find_key), find_key); \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find_lru (PREFIX##_hash_table *hash, \ + KEY_TYPE const *find_key) \ +{ \ + return PREFIX##_hash_find_index_lru (hash, HASH_FUNC(hash, find_key), find_key); \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_remove (PREFIX##_hash_table *hash, \ + ITEM_TYPE *del_item) \ +{ \ + return PREFIX##_hash_remove_index (hash, \ + HASH_FUNC(hash, &del_item->KEY_NAME), del_item); \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_remove_rcu (PREFIX##_hash_table *hash, \ + ITEM_TYPE *del_item) \ +{ \ + return PREFIX##_hash_remove (hash, del_item); \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert (PREFIX##_hash_table *hash, \ + ITEM_TYPE *ins_item) \ +{ \ + return PREFIX##_hash_insert_index (hash, \ + HASH_FUNC(hash, &ins_item->KEY_NAME), ins_item); \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert_rcu (PREFIX##_hash_table *hash, \ + ITEM_TYPE *ins_item) \ +{ \ + return PREFIX##_hash_insert_index_rcu (hash, HASH_FUNC(hash, &ins_item->KEY_NAME), \ + ins_item); \ +} \ + \ +static __inline__ ITEM_TYPE * \ +PREFIX##_hash_first (PREFIX##_hash_table *hash, __u32 ind) \ +{ \ + ITEM_TYPE *first; \ + \ + for (first = NULL; ind < hash->_buckets; ++ ind) { \ + first = hash->_table[ind]; \ + if (first != NULL) \ + break; \ + } \ + return first; \ +} \ + \ +static __inline__ ITEM_TYPE * \ +PREFIX##_hash_next (PREFIX##_hash_table *hash, \ + ITEM_TYPE *item) \ +{ \ + ITEM_TYPE *next; \ + \ + if (item == NULL) \ + return NULL; \ + next = item->LINK_NAME._next; \ + if (next == NULL) \ + next = PREFIX##_hash_first (hash, HASH_FUNC(hash, &item->KEY_NAME) + 1); \ + return next; \ +} \ + \ +typedef struct {} PREFIX##_hash_dummy + +#define for_all_ht_buckets(table, head) \ +for ((head) = &(table) -> _table[ 0 ] ; \ + (head) != &(table) -> _table[ (table) -> _buckets ] ; ++ (head)) + +#define for_all_in_bucket(bucket, item, next, field) \ +for ((item) = *(bucket), (next) = (item) ? (item) -> field._next : NULL ; \ + (item) != NULL ; \ + (item) = (next), (next) = (item) ? (item) -> field._next : NULL ) + +#define for_all_in_htable(table, prefix, item, next) \ +for ((item) = prefix ## _hash_first ((table), 0), \ + (next) = prefix ## _hash_next ((table), (item)) ; \ + (item) != NULL ; \ + (item) = (next), \ + (next) = prefix ## _hash_next ((table), (item))) + +/* __REISER4_TYPE_SAFE_HASH_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/vfs_ops.c b/fs/reiser4/vfs_ops.c new file mode 100644 index 000000000000..68a3bcd70bd7 --- /dev/null +++ b/fs/reiser4/vfs_ops.c @@ -0,0 +1,260 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Interface to VFS. Reiser4 {super|export|dentry}_operations are defined + here. */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/file/file.h" +#include "plugin/security/perm.h" +#include "plugin/disk_format/disk_format.h" +#include "plugin/plugin.h" +#include "plugin/plugin_set.h" +#include "plugin/object.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "page_cache.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "reiser4.h" +#include "entd.h" +#include "status_flags.h" +#include "flush.h" +#include "dscale.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* update inode stat-data by calling plugin */ +int reiser4_update_sd(struct inode *object) +{ + file_plugin *fplug; + + assert("nikita-2338", object != NULL); + /* check for read-only file system. */ + if (IS_RDONLY(object)) + return 0; + + fplug = inode_file_plugin(object); + assert("nikita-2339", fplug != NULL); + return fplug->write_sd_by_inode(object); +} + +/* helper function: increase inode nlink count and call plugin method to save + updated stat-data. + + Used by link/create and during creation of dot and dotdot in mkdir +*/ +int reiser4_add_nlink(struct inode *object /* object to which link is added */ , + struct inode *parent /* parent where new entry will be */ + , + int write_sd_p /* true if stat-data has to be + * updated */ ) +{ + file_plugin *fplug; + int result; + + assert("nikita-1351", object != NULL); + + fplug = inode_file_plugin(object); + assert("nikita-1445", fplug != NULL); + + /* ask plugin whether it can add yet another link to this + object */ + if (!fplug->can_add_link(object)) + return RETERR(-EMLINK); + + assert("nikita-2211", fplug->add_link != NULL); + /* call plugin to do actual addition of link */ + result = fplug->add_link(object, parent); + + /* optionally update stat data */ + if (result == 0 && write_sd_p) + result = fplug->write_sd_by_inode(object); + return result; +} + +/* helper function: decrease inode nlink count and call plugin method to save + updated stat-data. + + Used by unlink/create +*/ +int reiser4_del_nlink(struct inode *object /* object from which link is + * removed */ , + struct inode *parent /* parent where entry was */ , + int write_sd_p /* true is stat-data has to be + * updated */ ) +{ + file_plugin *fplug; + int result; + + assert("nikita-1349", object != NULL); + + fplug = inode_file_plugin(object); + assert("nikita-1350", fplug != NULL); + assert("nikita-1446", object->i_nlink > 0); + assert("nikita-2210", fplug->rem_link != NULL); + + /* call plugin to do actual deletion of link */ + result = fplug->rem_link(object, parent); + + /* optionally update stat data */ + if (result == 0 && write_sd_p) + result = fplug->write_sd_by_inode(object); + return result; +} + +/* Release reiser4 dentry. This is d_op->d_release() method. */ +static void reiser4_d_release(struct dentry *dentry /* dentry released */ ) +{ + reiser4_free_dentry_fsdata(dentry); +} + +/* + * Called by reiser4_sync_inodes(), during speculative write-back (through + * pdflush, or balance_dirty_pages()). + */ +void reiser4_writeout(struct super_block *sb, struct writeback_control *wbc) +{ + long written = 0; + int repeats = 0; + int result; + + /* + * Performs early flushing, trying to free some memory. If there + * is nothing to flush, commits some atoms. + * + * Commit all atoms if reiser4_writepages_dispatch() is called + * from sys_sync() or sys_fsync() + */ + if (wbc->sync_mode != WB_SYNC_NONE) { + txnmgr_force_commit_all(sb, 0); + return; + } + + BUG_ON(reiser4_get_super_fake(sb) == NULL); + do { + long nr_submitted = 0; + jnode *node = NULL; + + /* do not put more requests to overload write queue */ + if (bdi_write_congested(inode_to_bdi(reiser4_get_super_fake(sb)))) { + //blk_flush_plug(current); + break; + } + repeats++; + BUG_ON(wbc->nr_to_write <= 0); + + if (get_current_context()->entd) { + entd_context *ent = get_entd_context(sb); + + if (ent->cur_request->node) + /* + * this is ent thread and it managed to capture + * requested page itself - start flush from + * that page + */ + node = ent->cur_request->node; + } + + result = flush_some_atom(node, &nr_submitted, wbc, + JNODE_FLUSH_WRITE_BLOCKS); + if (result != 0) + warning("nikita-31001", "Flush failed: %i", result); + if (node) + /* drop the reference aquired + in find_or_create_extent() */ + jput(node); + if (!nr_submitted) + break; + + wbc->nr_to_write -= nr_submitted; + written += nr_submitted; + } while (wbc->nr_to_write > 0); +} + +/* tell VM how many pages were dirtied */ +void reiser4_throttle_write(struct inode *inode) +{ + reiser4_context *ctx; + + ctx = get_current_context(); + reiser4_txn_restart(ctx); + current->journal_info = NULL; + balance_dirty_pages_ratelimited(inode->i_mapping); + current->journal_info = ctx; +} + +const int REISER4_MAGIC_OFFSET = 16 * 4096; /* offset to magic string from the + * beginning of device */ + +/* + * Reiser4 initialization/shutdown. + * + * Code below performs global reiser4 initialization that is done either as + * part of kernel initialization (when reiser4 is statically built-in), or + * during reiser4 module load (when compiled as module). + */ + +void reiser4_handle_error(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + + if (!sb) + return; + reiser4_status_write(REISER4_STATUS_DAMAGED, 0, + "Filesystem error occured"); + switch (get_super_private(sb)->onerror) { + case 1: + reiser4_panic("foobar-42", "Filesystem error occured\n"); + default: + if (sb->s_flags & MS_RDONLY) + return; + sb->s_flags |= MS_RDONLY; + break; + } +} + +struct dentry_operations reiser4_dentry_operations = { + .d_revalidate = NULL, + .d_hash = NULL, + .d_compare = NULL, + .d_delete = NULL, + .d_release = reiser4_d_release, + .d_iput = NULL, +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/vfs_ops.h b/fs/reiser4/vfs_ops.h new file mode 100644 index 000000000000..9c8819fc726f --- /dev/null +++ b/fs/reiser4/vfs_ops.h @@ -0,0 +1,60 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* vfs_ops.c's exported symbols */ + +#if !defined( __FS_REISER4_VFS_OPS_H__ ) +#define __FS_REISER4_VFS_OPS_H__ + +#include "forward.h" +#include "coord.h" +#include "seal.h" +#include "plugin/file/file.h" +#include "super.h" +#include "readahead.h" + +#include /* for loff_t */ +#include /* for struct address_space */ +#include /* for struct dentry */ +#include +#include + +/* address space operations */ +int reiser4_writepage(struct page *, struct writeback_control *); +int reiser4_set_page_dirty(struct page *); +void reiser4_invalidatepage(struct page *, unsigned int offset, unsigned int length); +int reiser4_releasepage(struct page *, gfp_t); + +#ifdef CONFIG_MIGRATION +int reiser4_migratepage(struct address_space *, struct page *, + struct page *, enum migrate_mode); +#else +#define reiser4_migratepage NULL +#endif /* CONFIG_MIGRATION */ + +extern int reiser4_update_sd(struct inode *); +extern int reiser4_add_nlink(struct inode *, struct inode *, int); +extern int reiser4_del_nlink(struct inode *, struct inode *, int); + +extern int reiser4_start_up_io(struct page *page); +extern void reiser4_throttle_write(struct inode *); +extern int jnode_is_releasable(jnode *); + +#define CAPTURE_APAGE_BURST (1024l) +void reiser4_writeout(struct super_block *, struct writeback_control *); + +extern void reiser4_handle_error(void); + +/* __FS_REISER4_VFS_OPS_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/wander.c b/fs/reiser4/wander.c new file mode 100644 index 000000000000..5dfb30ae3a46 --- /dev/null +++ b/fs/reiser4/wander.c @@ -0,0 +1,1757 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Reiser4 Wandering Log */ + +/* You should read http://www.namesys.com/txn-doc.html + + That describes how filesystem operations are performed as atomic + transactions, and how we try to arrange it so that we can write most of the + data only once while performing the operation atomically. + + For the purposes of this code, it is enough for it to understand that it + has been told a given block should be written either once, or twice (if + twice then once to the wandered location and once to the real location). + + This code guarantees that those blocks that are defined to be part of an + atom either all take effect or none of them take effect. + + The "relocate set" of nodes are submitted to write by the jnode_flush() + routine, and the "overwrite set" is submitted by reiser4_write_log(). + This is because with the overwrite set we seek to optimize writes, and + with the relocate set we seek to cause disk order to correlate with the + "parent first order" (preorder). + + reiser4_write_log() allocates and writes wandered blocks and maintains + additional on-disk structures of the atom as wander records (each wander + record occupies one block) for storing of the "wandered map" (a table which + contains a relation between wandered and real block numbers) and other + information which might be needed at transaction recovery time. + + The wander records are unidirectionally linked into a circle: each wander + record contains a block number of the next wander record, the last wander + record points to the first one. + + One wander record (named "tx head" in this file) has a format which is + different from the other wander records. The "tx head" has a reference to the + "tx head" block of the previously committed atom. Also, "tx head" contains + fs information (the free blocks counter, and the oid allocator state) which + is logged in a special way . + + There are two journal control blocks, named journal header and journal + footer which have fixed on-disk locations. The journal header has a + reference to the "tx head" block of the last committed atom. The journal + footer points to the "tx head" of the last flushed atom. The atom is + "played" when all blocks from its overwrite set are written to disk the + second time (i.e. written to their real locations). + + NOTE: People who know reiserfs internals and its journal structure might be + confused with these terms journal footer and journal header. There is a table + with terms of similar semantics in reiserfs (reiser3) and reiser4: + + REISER3 TERM | REISER4 TERM | DESCRIPTION + --------------------+-----------------------+---------------------------- + commit record | journal header | atomic write of this record + | | ends transaction commit + --------------------+-----------------------+---------------------------- + journal header | journal footer | atomic write of this record + | | ends post-commit writes. + | | After successful + | | writing of this journal + | | blocks (in reiser3) or + | | wandered blocks/records are + | | free for re-use. + --------------------+-----------------------+---------------------------- + + The atom commit process is the following: + + 1. The overwrite set is taken from atom's clean list, and its size is + counted. + + 2. The number of necessary wander records (including tx head) is calculated, + and the wander record blocks are allocated. + + 3. Allocate wandered blocks and populate wander records by wandered map. + + 4. submit write requests for wander records and wandered blocks. + + 5. wait until submitted write requests complete. + + 6. update journal header: change the pointer to the block number of just + written tx head, submit an i/o for modified journal header block and wait + for i/o completion. + + NOTE: The special logging for bitmap blocks and some reiser4 super block + fields makes processes of atom commit, flush and recovering a bit more + complex (see comments in the source code for details). + + The atom playing process is the following: + + 1. Write atom's overwrite set in-place. + + 2. Wait on i/o. + + 3. Update journal footer: change the pointer to block number of tx head + block of the atom we currently flushing, submit an i/o, wait on i/o + completion. + + 4. Free disk space which was used for wandered blocks and wander records. + + After the freeing of wandered blocks and wander records we have that journal + footer points to the on-disk structure which might be overwritten soon. + Neither the log writer nor the journal recovery procedure use that pointer + for accessing the data. When the journal recovery procedure finds the oldest + transaction it compares the journal footer pointer value with the "prev_tx" + pointer value in tx head, if values are equal the oldest not flushed + transaction is found. + + NOTE on disk space leakage: the information about of what blocks and how many + blocks are allocated for wandered blocks, wandered records is not written to + the disk because of special logging for bitmaps and some super blocks + counters. After a system crash we the reiser4 does not remember those + objects allocation, thus we have no such a kind of disk space leakage. +*/ + +/* Special logging of reiser4 super block fields. */ + +/* There are some reiser4 super block fields (free block count and OID allocator + state (number of files and next free OID) which are logged separately from + super block to avoid unnecessary atom fusion. + + So, the reiser4 super block can be not captured by a transaction with + allocates/deallocates disk blocks or create/delete file objects. Moreover, + the reiser4 on-disk super block is not touched when such a transaction is + committed and flushed. Those "counters logged specially" are logged in "tx + head" blocks and in the journal footer block. + + A step-by-step description of special logging: + + 0. The per-atom information about deleted or created files and allocated or + freed blocks is collected during the transaction. The atom's + ->nr_objects_created and ->nr_objects_deleted are for object + deletion/creation tracking, the numbers of allocated and freed blocks are + calculated using atom's delete set and atom's capture list -- all new and + relocated nodes should be on atom's clean list and should have JNODE_RELOC + bit set. + + 1. The "logged specially" reiser4 super block fields have their "committed" + versions in the reiser4 in-memory super block. They get modified only at + atom commit time. The atom's commit thread has an exclusive access to those + "committed" fields because the log writer implementation supports only one + atom commit a time (there is a per-fs "commit" mutex). At + that time "committed" counters are modified using per-atom information + collected during the transaction. These counters are stored on disk as a + part of tx head block when atom is committed. + + 2. When the atom is flushed the value of the free block counter and the OID + allocator state get written to the journal footer block. A special journal + procedure (journal_recover_sb_data()) takes those values from the journal + footer and updates the reiser4 in-memory super block. + + NOTE: That means free block count and OID allocator state are logged + separately from the reiser4 super block regardless of the fact that the + reiser4 super block has fields to store both the free block counter and the + OID allocator. + + Writing the whole super block at commit time requires knowing true values of + all its fields without changes made by not yet committed transactions. It is + possible by having their "committed" version of the super block like the + reiser4 bitmap blocks have "committed" and "working" versions. However, + another scheme was implemented which stores special logged values in the + unused free space inside transaction head block. In my opinion it has an + advantage of not writing whole super block when only part of it was + modified. */ + +#include "debug.h" +#include "dformat.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "page_cache.h" +#include "wander.h" +#include "reiser4.h" +#include "super.h" +#include "vfs_ops.h" +#include "writeout.h" +#include "inode.h" +#include "entd.h" + +#include +#include /* for struct super_block */ +#include /* for struct page */ +#include +#include /* for struct bio */ +#include + +static int write_jnodes_to_disk_extent( + jnode *, int, const reiser4_block_nr *, flush_queue_t *, int); + +/* The commit_handle is a container for objects needed at atom commit time */ +struct commit_handle { + /* A pointer to atom's list of OVRWR nodes */ + struct list_head *overwrite_set; + /* atom's overwrite set size */ + int overwrite_set_size; + /* jnodes for wander record blocks */ + struct list_head tx_list; + /* number of wander records */ + __u32 tx_size; + /* 'committed' sb counters are saved here until atom is completely + flushed */ + __u64 free_blocks; + __u64 nr_files; + __u64 next_oid; + /* A pointer to the atom which is being committed */ + txn_atom *atom; + /* A pointer to current super block */ + struct super_block *super; + /* The counter of modified bitmaps */ + reiser4_block_nr nr_bitmap; +}; + +static void init_commit_handle(struct commit_handle *ch, txn_atom *atom) +{ + memset(ch, 0, sizeof(struct commit_handle)); + INIT_LIST_HEAD(&ch->tx_list); + + ch->atom = atom; + ch->super = reiser4_get_current_sb(); +} + +static void done_commit_handle(struct commit_handle *ch) +{ + assert("zam-690", list_empty(&ch->tx_list)); +} + +/* fill journal header block data */ +static void format_journal_header(struct commit_handle *ch) +{ + struct reiser4_super_info_data *sbinfo; + struct journal_header *header; + jnode *txhead; + + sbinfo = get_super_private(ch->super); + assert("zam-479", sbinfo != NULL); + assert("zam-480", sbinfo->journal_header != NULL); + + txhead = list_entry(ch->tx_list.next, jnode, capture_link); + + jload(sbinfo->journal_header); + + header = (struct journal_header *)jdata(sbinfo->journal_header); + assert("zam-484", header != NULL); + + put_unaligned(cpu_to_le64(*jnode_get_block(txhead)), + &header->last_committed_tx); + + jrelse(sbinfo->journal_header); +} + +/* fill journal footer block data */ +static void format_journal_footer(struct commit_handle *ch) +{ + struct reiser4_super_info_data *sbinfo; + struct journal_footer *footer; + jnode *tx_head; + + sbinfo = get_super_private(ch->super); + + tx_head = list_entry(ch->tx_list.next, jnode, capture_link); + + assert("zam-493", sbinfo != NULL); + assert("zam-494", sbinfo->journal_header != NULL); + + check_me("zam-691", jload(sbinfo->journal_footer) == 0); + + footer = (struct journal_footer *)jdata(sbinfo->journal_footer); + assert("zam-495", footer != NULL); + + put_unaligned(cpu_to_le64(*jnode_get_block(tx_head)), + &footer->last_flushed_tx); + put_unaligned(cpu_to_le64(ch->free_blocks), &footer->free_blocks); + + put_unaligned(cpu_to_le64(ch->nr_files), &footer->nr_files); + put_unaligned(cpu_to_le64(ch->next_oid), &footer->next_oid); + + jrelse(sbinfo->journal_footer); +} + +/* wander record capacity depends on current block size */ +static int wander_record_capacity(const struct super_block *super) +{ + return (super->s_blocksize - + sizeof(struct wander_record_header)) / + sizeof(struct wander_entry); +} + +/* Fill first wander record (tx head) in accordance with supplied given data */ +static void format_tx_head(struct commit_handle *ch) +{ + jnode *tx_head; + jnode *next; + struct tx_header *header; + + tx_head = list_entry(ch->tx_list.next, jnode, capture_link); + assert("zam-692", &ch->tx_list != &tx_head->capture_link); + + next = list_entry(tx_head->capture_link.next, jnode, capture_link); + if (&ch->tx_list == &next->capture_link) + next = tx_head; + + header = (struct tx_header *)jdata(tx_head); + + assert("zam-460", header != NULL); + assert("zam-462", ch->super->s_blocksize >= sizeof(struct tx_header)); + + memset(jdata(tx_head), 0, (size_t) ch->super->s_blocksize); + memcpy(jdata(tx_head), TX_HEADER_MAGIC, TX_HEADER_MAGIC_SIZE); + + put_unaligned(cpu_to_le32(ch->tx_size), &header->total); + put_unaligned(cpu_to_le64(get_super_private(ch->super)->last_committed_tx), + &header->prev_tx); + put_unaligned(cpu_to_le64(*jnode_get_block(next)), &header->next_block); + put_unaligned(cpu_to_le64(ch->free_blocks), &header->free_blocks); + put_unaligned(cpu_to_le64(ch->nr_files), &header->nr_files); + put_unaligned(cpu_to_le64(ch->next_oid), &header->next_oid); +} + +/* prepare ordinary wander record block (fill all service fields) */ +static void +format_wander_record(struct commit_handle *ch, jnode *node, __u32 serial) +{ + struct wander_record_header *LRH; + jnode *next; + + assert("zam-464", node != NULL); + + LRH = (struct wander_record_header *)jdata(node); + next = list_entry(node->capture_link.next, jnode, capture_link); + + if (&ch->tx_list == &next->capture_link) + next = list_entry(ch->tx_list.next, jnode, capture_link); + + assert("zam-465", LRH != NULL); + assert("zam-463", + ch->super->s_blocksize > sizeof(struct wander_record_header)); + + memset(jdata(node), 0, (size_t) ch->super->s_blocksize); + memcpy(jdata(node), WANDER_RECORD_MAGIC, WANDER_RECORD_MAGIC_SIZE); + + put_unaligned(cpu_to_le32(ch->tx_size), &LRH->total); + put_unaligned(cpu_to_le32(serial), &LRH->serial); + put_unaligned(cpu_to_le64(*jnode_get_block(next)), &LRH->next_block); +} + +/* add one wandered map entry to formatted wander record */ +static void +store_entry(jnode * node, int index, const reiser4_block_nr * a, + const reiser4_block_nr * b) +{ + char *data; + struct wander_entry *pairs; + + data = jdata(node); + assert("zam-451", data != NULL); + + pairs = + (struct wander_entry *)(data + sizeof(struct wander_record_header)); + + put_unaligned(cpu_to_le64(*a), &pairs[index].original); + put_unaligned(cpu_to_le64(*b), &pairs[index].wandered); +} + +/* currently, wander records contains contain only wandered map, which depend on + overwrite set size */ +static void get_tx_size(struct commit_handle *ch) +{ + assert("zam-440", ch->overwrite_set_size != 0); + assert("zam-695", ch->tx_size == 0); + + /* count all ordinary wander records + ( - 1) / + 1 and add one + for tx head block */ + ch->tx_size = + (ch->overwrite_set_size - 1) / wander_record_capacity(ch->super) + + 2; +} + +/* A special structure for using in store_wmap_actor() for saving its state + between calls */ +struct store_wmap_params { + jnode *cur; /* jnode of current wander record to fill */ + int idx; /* free element index in wander record */ + int capacity; /* capacity */ + +#if REISER4_DEBUG + struct list_head *tx_list; +#endif +}; + +/* an actor for use in blocknr_set_iterator routine which populates the list + of pre-formatted wander records by wandered map info */ +static int +store_wmap_actor(txn_atom * atom UNUSED_ARG, const reiser4_block_nr * a, + const reiser4_block_nr * b, void *data) +{ + struct store_wmap_params *params = data; + + if (params->idx >= params->capacity) { + /* a new wander record should be taken from the tx_list */ + params->cur = list_entry(params->cur->capture_link.next, jnode, capture_link); + assert("zam-454", + params->tx_list != ¶ms->cur->capture_link); + + params->idx = 0; + } + + store_entry(params->cur, params->idx, a, b); + params->idx++; + + return 0; +} + +/* This function is called after Relocate set gets written to disk, Overwrite + set is written to wandered locations and all wander records are written + also. Updated journal header blocks contains a pointer (block number) to + first wander record of the just written transaction */ +static int update_journal_header(struct commit_handle *ch) +{ + struct reiser4_super_info_data *sbinfo = get_super_private(ch->super); + jnode *jh = sbinfo->journal_header; + jnode *head = list_entry(ch->tx_list.next, jnode, capture_link); + int ret; + + format_journal_header(ch); + + ret = write_jnodes_to_disk_extent(jh, 1, jnode_get_block(jh), NULL, + WRITEOUT_FLUSH_FUA); + if (ret) + return ret; + + /* blk_run_address_space(sbinfo->fake->i_mapping); + * blk_run_queues(); */ + + ret = jwait_io(jh, WRITE); + + if (ret) + return ret; + + sbinfo->last_committed_tx = *jnode_get_block(head); + + return 0; +} + +/* This function is called after write-back is finished. We update journal + footer block and free blocks which were occupied by wandered blocks and + transaction wander records */ +static int update_journal_footer(struct commit_handle *ch) +{ + reiser4_super_info_data *sbinfo = get_super_private(ch->super); + + jnode *jf = sbinfo->journal_footer; + + int ret; + + format_journal_footer(ch); + + ret = write_jnodes_to_disk_extent(jf, 1, jnode_get_block(jf), NULL, + WRITEOUT_FLUSH_FUA); + if (ret) + return ret; + + /* blk_run_address_space(sbinfo->fake->i_mapping); + * blk_run_queue(); */ + + ret = jwait_io(jf, WRITE); + if (ret) + return ret; + + return 0; +} + +/* free block numbers of wander records of already written in place transaction */ +static void dealloc_tx_list(struct commit_handle *ch) +{ + while (!list_empty(&ch->tx_list)) { + jnode *cur = list_entry(ch->tx_list.next, jnode, capture_link); + list_del(&cur->capture_link); + ON_DEBUG(INIT_LIST_HEAD(&cur->capture_link)); + reiser4_dealloc_block(jnode_get_block(cur), 0, + BA_DEFER | BA_FORMATTED); + + unpin_jnode_data(cur); + reiser4_drop_io_head(cur); + } +} + +/* An actor for use in block_nr_iterator() routine which frees wandered blocks + from atom's overwrite set. */ +static int +dealloc_wmap_actor(txn_atom * atom UNUSED_ARG, + const reiser4_block_nr * a UNUSED_ARG, + const reiser4_block_nr * b, void *data UNUSED_ARG) +{ + + assert("zam-499", b != NULL); + assert("zam-500", *b != 0); + assert("zam-501", !reiser4_blocknr_is_fake(b)); + + reiser4_dealloc_block(b, 0, BA_DEFER | BA_FORMATTED); + return 0; +} + +/* free wandered block locations of already written in place transaction */ +static void dealloc_wmap(struct commit_handle *ch) +{ + assert("zam-696", ch->atom != NULL); + + blocknr_set_iterator(ch->atom, &ch->atom->wandered_map, + dealloc_wmap_actor, NULL, 1); +} + +/* helper function for alloc wandered blocks, which refill set of block + numbers needed for wandered blocks */ +static int +get_more_wandered_blocks(int count, reiser4_block_nr * start, int *len) +{ + reiser4_blocknr_hint hint; + int ret; + + reiser4_block_nr wide_len = count; + + /* FIXME-ZAM: A special policy needed for allocation of wandered blocks + ZAM-FIXME-HANS: yes, what happened to our discussion of using a fixed + reserved allocation area so as to get the best qualities of fixed + journals? */ + reiser4_blocknr_hint_init(&hint); + hint.block_stage = BLOCK_GRABBED; + + ret = reiser4_alloc_blocks(&hint, start, &wide_len, + BA_FORMATTED | BA_USE_DEFAULT_SEARCH_START); + *len = (int)wide_len; + + return ret; +} + +/* + * roll back changes made before issuing BIO in the case of IO error. + */ +static void undo_bio(struct bio *bio) +{ + int i; + + for (i = 0; i < bio->bi_vcnt; ++i) { + struct page *pg; + jnode *node; + + pg = bio->bi_io_vec[i].bv_page; + end_page_writeback(pg); + node = jprivate(pg); + spin_lock_jnode(node); + JF_CLR(node, JNODE_WRITEBACK); + JF_SET(node, JNODE_DIRTY); + spin_unlock_jnode(node); + } + bio_put(bio); +} + +/* put overwrite set back to atom's clean list */ +static void put_overwrite_set(struct commit_handle *ch) +{ + jnode *cur; + + list_for_each_entry(cur, ch->overwrite_set, capture_link) + jrelse_tail(cur); +} + +/* Count overwrite set size, grab disk space for wandered blocks allocation. + Since we have a separate list for atom's overwrite set we just scan the list, + count bitmap and other not leaf nodes which wandered blocks allocation we + have to grab space for. */ +static int get_overwrite_set(struct commit_handle *ch) +{ + int ret; + jnode *cur; + __u64 nr_not_leaves = 0; +#if REISER4_DEBUG + __u64 nr_formatted_leaves = 0; + __u64 nr_unformatted_leaves = 0; +#endif + + assert("zam-697", ch->overwrite_set_size == 0); + + ch->overwrite_set = ATOM_OVRWR_LIST(ch->atom); + cur = list_entry(ch->overwrite_set->next, jnode, capture_link); + + while (ch->overwrite_set != &cur->capture_link) { + jnode *next = list_entry(cur->capture_link.next, jnode, capture_link); + + /* Count bitmap locks for getting correct statistics what number + * of blocks were cleared by the transaction commit. */ + if (jnode_get_type(cur) == JNODE_BITMAP) + ch->nr_bitmap++; + + assert("zam-939", JF_ISSET(cur, JNODE_OVRWR) + || jnode_get_type(cur) == JNODE_BITMAP); + + if (jnode_is_znode(cur) && znode_above_root(JZNODE(cur))) { + /* we replace fake znode by another (real) + znode which is suggested by disk_layout + plugin */ + + /* FIXME: it looks like fake znode should be + replaced by jnode supplied by + disk_layout. */ + + struct super_block *s = reiser4_get_current_sb(); + reiser4_super_info_data *sbinfo = + get_current_super_private(); + + if (sbinfo->df_plug->log_super) { + jnode *sj = sbinfo->df_plug->log_super(s); + + assert("zam-593", sj != NULL); + + if (IS_ERR(sj)) + return PTR_ERR(sj); + + spin_lock_jnode(sj); + JF_SET(sj, JNODE_OVRWR); + insert_into_atom_ovrwr_list(ch->atom, sj); + spin_unlock_jnode(sj); + + /* jload it as the rest of overwrite set */ + jload_gfp(sj, reiser4_ctx_gfp_mask_get(), 0); + + ch->overwrite_set_size++; + } + spin_lock_jnode(cur); + reiser4_uncapture_block(cur); + jput(cur); + + } else { + int ret; + ch->overwrite_set_size++; + ret = jload_gfp(cur, reiser4_ctx_gfp_mask_get(), 0); + if (ret) + reiser4_panic("zam-783", + "cannot load e-flushed jnode back (ret = %d)\n", + ret); + } + + /* Count not leaves here because we have to grab disk space + * for wandered blocks. They were not counted as "flush + * reserved". Counting should be done _after_ nodes are pinned + * into memory by jload(). */ + if (!jnode_is_leaf(cur)) + nr_not_leaves++; + else { +#if REISER4_DEBUG + /* at this point @cur either has JNODE_FLUSH_RESERVED + * or is eflushed. Locking is not strong enough to + * write an assertion checking for this. */ + if (jnode_is_znode(cur)) + nr_formatted_leaves++; + else + nr_unformatted_leaves++; +#endif + JF_CLR(cur, JNODE_FLUSH_RESERVED); + } + + cur = next; + } + + /* Grab space for writing (wandered blocks) of not leaves found in + * overwrite set. */ + ret = reiser4_grab_space_force(nr_not_leaves, BA_RESERVED); + if (ret) + return ret; + + /* Disk space for allocation of wandered blocks of leaf nodes already + * reserved as "flush reserved", move it to grabbed space counter. */ + spin_lock_atom(ch->atom); + assert("zam-940", + nr_formatted_leaves + nr_unformatted_leaves <= + ch->atom->flush_reserved); + flush_reserved2grabbed(ch->atom, ch->atom->flush_reserved); + spin_unlock_atom(ch->atom); + + return ch->overwrite_set_size; +} + +/** + * write_jnodes_to_disk_extent - submit write request + * @head: + * @first: first jnode of the list + * @nr: number of jnodes on the list + * @block_p: + * @fq: + * @flags: used to decide whether page is to get PG_reclaim flag + * + * Submits a write request for @nr jnodes beginning from the @first, other + * jnodes are after the @first on the double-linked "capture" list. All jnodes + * will be written to the disk region of @nr blocks starting with @block_p block + * number. If @fq is not NULL it means that waiting for i/o completion will be + * done more efficiently by using flush_queue_t objects. + * This function is the one which writes list of jnodes in batch mode. It does + * all low-level things as bio construction and page states manipulation. + * + * ZAM-FIXME-HANS: brief me on why this function exists, and why bios are + * aggregated in this function instead of being left to the layers below + * + * FIXME: ZAM->HANS: What layer are you talking about? Can you point me to that? + * Why that layer needed? Why BIOs cannot be constructed here? + */ +static int write_jnodes_to_disk_extent( + jnode *first, int nr, const reiser4_block_nr *block_p, + flush_queue_t *fq, int flags) +{ + struct super_block *super = reiser4_get_current_sb(); + int op_flags = (flags & WRITEOUT_FLUSH_FUA) ? REQ_PREFLUSH | REQ_FUA : 0; + jnode *cur = first; + reiser4_block_nr block; + + assert("zam-571", first != NULL); + assert("zam-572", block_p != NULL); + assert("zam-570", nr > 0); + + block = *block_p; + + while (nr > 0) { + struct bio *bio; + int nr_blocks = min(nr, BIO_MAX_PAGES); + int i; + int nr_used; + + bio = bio_alloc(GFP_NOIO, nr_blocks); + if (!bio) + return RETERR(-ENOMEM); + + bio_set_dev(bio, super->s_bdev); + bio->bi_iter.bi_sector = block * (super->s_blocksize >> 9); + for (nr_used = 0, i = 0; i < nr_blocks; i++) { + struct page *pg; + + pg = jnode_page(cur); + assert("zam-573", pg != NULL); + + get_page(pg); + + lock_and_wait_page_writeback(pg); + + if (!bio_add_page(bio, pg, super->s_blocksize, 0)) { + /* + * underlying device is satiated. Stop adding + * pages to the bio. + */ + unlock_page(pg); + put_page(pg); + break; + } + + spin_lock_jnode(cur); + assert("nikita-3166", + pg->mapping == jnode_get_mapping(cur)); + assert("zam-912", !JF_ISSET(cur, JNODE_WRITEBACK)); +#if REISER4_DEBUG + spin_lock(&cur->load); + assert("nikita-3165", !jnode_is_releasable(cur)); + spin_unlock(&cur->load); +#endif + JF_SET(cur, JNODE_WRITEBACK); + JF_CLR(cur, JNODE_DIRTY); + ON_DEBUG(cur->written++); + + assert("edward-1647", + ergo(jnode_is_znode(cur), JF_ISSET(cur, JNODE_PARSED))); + spin_unlock_jnode(cur); + /* + * update checksum + */ + if (jnode_is_znode(cur)) { + zload(JZNODE(cur)); + if (node_plugin_by_node(JZNODE(cur))->csum) + node_plugin_by_node(JZNODE(cur))->csum(JZNODE(cur), 0); + zrelse(JZNODE(cur)); + } + ClearPageError(pg); + set_page_writeback(pg); + + if (get_current_context()->entd) { + /* this is ent thread */ + entd_context *ent = get_entd_context(super); + struct wbq *rq, *next; + + spin_lock(&ent->guard); + + if (pg == ent->cur_request->page) { + /* + * entd is called for this page. This + * request is not in th etodo list + */ + ent->cur_request->written = 1; + } else { + /* + * if we have written a page for which writepage + * is called for - move request to another list. + */ + list_for_each_entry_safe(rq, next, &ent->todo_list, link) { + assert("", rq->magic == WBQ_MAGIC); + if (pg == rq->page) { + /* + * remove request from + * entd's queue, but do + * not wake up a thread + * which put this + * request + */ + list_del_init(&rq->link); + ent->nr_todo_reqs --; + list_add_tail(&rq->link, &ent->done_list); + ent->nr_done_reqs ++; + rq->written = 1; + break; + } + } + } + spin_unlock(&ent->guard); + } + + clear_page_dirty_for_io(pg); + + unlock_page(pg); + + cur = list_entry(cur->capture_link.next, jnode, capture_link); + nr_used++; + } + if (nr_used > 0) { + assert("nikita-3453", + bio->bi_iter.bi_size == super->s_blocksize * nr_used); + assert("nikita-3454", bio->bi_vcnt == nr_used); + + /* Check if we are allowed to write at all */ + if (super->s_flags & MS_RDONLY) + undo_bio(bio); + else { + add_fq_to_bio(fq, bio); + bio_get(bio); + bio_set_op_attrs(bio, WRITE, op_flags); + submit_bio(bio); + bio_put(bio); + } + + block += nr_used - 1; + update_blocknr_hint_default(super, &block); + block += 1; + } else { + bio_put(bio); + } + nr -= nr_used; + } + + return 0; +} + +/* This is a procedure which recovers a contiguous sequences of disk block + numbers in the given list of j-nodes and submits write requests on this + per-sequence basis */ +int +write_jnode_list(struct list_head *head, flush_queue_t *fq, + long *nr_submitted, int flags) +{ + int ret; + jnode *beg = list_entry(head->next, jnode, capture_link); + + while (head != &beg->capture_link) { + int nr = 1; + jnode *cur = list_entry(beg->capture_link.next, jnode, capture_link); + + while (head != &cur->capture_link) { + if (*jnode_get_block(cur) != *jnode_get_block(beg) + nr) + break; + ++nr; + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + + ret = write_jnodes_to_disk_extent( + beg, nr, jnode_get_block(beg), fq, flags); + if (ret) + return ret; + + if (nr_submitted) + *nr_submitted += nr; + + beg = cur; + } + + return 0; +} + +/* add given wandered mapping to atom's wandered map */ +static int +add_region_to_wmap(jnode * cur, int len, const reiser4_block_nr * block_p) +{ + int ret; + blocknr_set_entry *new_bsep = NULL; + reiser4_block_nr block; + + txn_atom *atom; + + assert("zam-568", block_p != NULL); + block = *block_p; + assert("zam-569", len > 0); + + while ((len--) > 0) { + do { + atom = get_current_atom_locked(); + assert("zam-536", + !reiser4_blocknr_is_fake(jnode_get_block(cur))); + ret = + blocknr_set_add_pair(atom, &atom->wandered_map, + &new_bsep, + jnode_get_block(cur), &block); + } while (ret == -E_REPEAT); + + if (ret) { + /* deallocate blocks which were not added to wandered + map */ + reiser4_block_nr wide_len = len; + + reiser4_dealloc_blocks(&block, &wide_len, + BLOCK_NOT_COUNTED, + BA_FORMATTED + /* formatted, without defer */ ); + + return ret; + } + + spin_unlock_atom(atom); + + cur = list_entry(cur->capture_link.next, jnode, capture_link); + ++block; + } + + return 0; +} + +/* Allocate wandered blocks for current atom's OVERWRITE SET and immediately + submit IO for allocated blocks. We assume that current atom is in a stage + when any atom fusion is impossible and atom is unlocked and it is safe. */ +static int alloc_wandered_blocks(struct commit_handle *ch, flush_queue_t *fq) +{ + reiser4_block_nr block; + + int rest; + int len; + int ret; + + jnode *cur; + + assert("zam-534", ch->overwrite_set_size > 0); + + rest = ch->overwrite_set_size; + + cur = list_entry(ch->overwrite_set->next, jnode, capture_link); + while (ch->overwrite_set != &cur->capture_link) { + assert("zam-567", JF_ISSET(cur, JNODE_OVRWR)); + + ret = get_more_wandered_blocks(rest, &block, &len); + if (ret) + return ret; + + rest -= len; + + ret = add_region_to_wmap(cur, len, &block); + if (ret) + return ret; + + ret = write_jnodes_to_disk_extent(cur, len, &block, fq, 0); + if (ret) + return ret; + + while ((len--) > 0) { + assert("zam-604", + ch->overwrite_set != &cur->capture_link); + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + } + + return 0; +} + +/* allocate given number of nodes over the journal area and link them into a + list, return pointer to the first jnode in the list */ +static int alloc_tx(struct commit_handle *ch, flush_queue_t * fq) +{ + reiser4_blocknr_hint hint; + reiser4_block_nr allocated = 0; + reiser4_block_nr first, len; + jnode *cur; + jnode *txhead; + int ret; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + assert("zam-698", ch->tx_size > 0); + assert("zam-699", list_empty_careful(&ch->tx_list)); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + while (allocated < (unsigned)ch->tx_size) { + len = (ch->tx_size - allocated); + + reiser4_blocknr_hint_init(&hint); + + hint.block_stage = BLOCK_GRABBED; + + /* FIXME: there should be some block allocation policy for + nodes which contain wander records */ + + /* We assume that disk space for wandered record blocks can be + * taken from reserved area. */ + ret = reiser4_alloc_blocks(&hint, &first, &len, + BA_FORMATTED | BA_RESERVED | + BA_USE_DEFAULT_SEARCH_START); + reiser4_blocknr_hint_done(&hint); + + if (ret) + return ret; + + allocated += len; + + /* create jnodes for all wander records */ + while (len--) { + cur = reiser4_alloc_io_head(&first); + + if (cur == NULL) { + ret = RETERR(-ENOMEM); + goto free_not_assigned; + } + + ret = jinit_new(cur, reiser4_ctx_gfp_mask_get()); + + if (ret != 0) { + jfree(cur); + goto free_not_assigned; + } + + pin_jnode_data(cur); + + list_add_tail(&cur->capture_link, &ch->tx_list); + + first++; + } + } + + { /* format a on-disk linked list of wander records */ + int serial = 1; + + txhead = list_entry(ch->tx_list.next, jnode, capture_link); + format_tx_head(ch); + + cur = list_entry(txhead->capture_link.next, jnode, capture_link); + while (&ch->tx_list != &cur->capture_link) { + format_wander_record(ch, cur, serial++); + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + } + + { /* Fill wander records with Wandered Set */ + struct store_wmap_params params; + txn_atom *atom; + + params.cur = list_entry(txhead->capture_link.next, jnode, capture_link); + + params.idx = 0; + params.capacity = + wander_record_capacity(reiser4_get_current_sb()); + + atom = get_current_atom_locked(); + blocknr_set_iterator(atom, &atom->wandered_map, + &store_wmap_actor, ¶ms, 0); + spin_unlock_atom(atom); + } + + { /* relse all jnodes from tx_list */ + cur = list_entry(ch->tx_list.next, jnode, capture_link); + while (&ch->tx_list != &cur->capture_link) { + jrelse(cur); + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + } + + ret = write_jnode_list(&ch->tx_list, fq, NULL, 0); + + return ret; + + free_not_assigned: + /* We deallocate blocks not yet assigned to jnodes on tx_list. The + caller takes care about invalidating of tx list */ + reiser4_dealloc_blocks(&first, &len, BLOCK_NOT_COUNTED, BA_FORMATTED); + + return ret; +} + +static int commit_tx(struct commit_handle *ch) +{ + flush_queue_t *fq; + int ret; + + /* Grab more space for wandered records. */ + ret = reiser4_grab_space_force((__u64) (ch->tx_size), BA_RESERVED); + if (ret) + return ret; + + fq = get_fq_for_current_atom(); + if (IS_ERR(fq)) + return PTR_ERR(fq); + + spin_unlock_atom(fq->atom); + do { + ret = alloc_wandered_blocks(ch, fq); + if (ret) + break; + ret = alloc_tx(ch, fq); + if (ret) + break; + } while (0); + + reiser4_fq_put(fq); + if (ret) + return ret; + ret = current_atom_finish_all_fq(); + if (ret) + return ret; + return update_journal_header(ch); +} + +static int write_tx_back(struct commit_handle * ch) +{ + flush_queue_t *fq; + int ret; + + fq = get_fq_for_current_atom(); + if (IS_ERR(fq)) + return PTR_ERR(fq); + spin_unlock_atom(fq->atom); + ret = write_jnode_list( + ch->overwrite_set, fq, NULL, WRITEOUT_FOR_PAGE_RECLAIM); + reiser4_fq_put(fq); + if (ret) + return ret; + ret = current_atom_finish_all_fq(); + if (ret) + return ret; + return update_journal_footer(ch); +} + +/* We assume that at this moment all captured blocks are marked as RELOC or + WANDER (belong to Relocate o Overwrite set), all nodes from Relocate set + are submitted to write. +*/ + +int reiser4_write_logs(long *nr_submitted) +{ + txn_atom *atom; + struct super_block *super = reiser4_get_current_sb(); + reiser4_super_info_data *sbinfo = get_super_private(super); + struct commit_handle ch; + int ret; + + writeout_mode_enable(); + + /* block allocator may add j-nodes to the clean_list */ + ret = reiser4_pre_commit_hook(); + if (ret) + return ret; + + /* No locks are required if we take atom which stage >= + * ASTAGE_PRE_COMMIT */ + atom = get_current_context()->trans->atom; + assert("zam-965", atom != NULL); + + /* relocate set is on the atom->clean_nodes list after + * current_atom_complete_writes() finishes. It can be safely + * uncaptured after commit_mutex is locked, because any atom that + * captures these nodes is guaranteed to commit after current one. + * + * This can only be done after reiser4_pre_commit_hook(), because it is where + * early flushed jnodes with CREATED bit are transferred to the + * overwrite list. */ + reiser4_invalidate_list(ATOM_CLEAN_LIST(atom)); + spin_lock_atom(atom); + /* There might be waiters for the relocate nodes which we have + * released, wake them up. */ + reiser4_atom_send_event(atom); + spin_unlock_atom(atom); + + if (REISER4_DEBUG) { + int level; + + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; ++level) + assert("nikita-3352", + list_empty_careful(ATOM_DIRTY_LIST(atom, level))); + } + + sbinfo->nr_files_committed += (unsigned)atom->nr_objects_created; + sbinfo->nr_files_committed -= (unsigned)atom->nr_objects_deleted; + + init_commit_handle(&ch, atom); + + ch.free_blocks = sbinfo->blocks_free_committed; + ch.nr_files = sbinfo->nr_files_committed; + /* ZAM-FIXME-HANS: email me what the contention level is for the super + * lock. */ + ch.next_oid = oid_next(super); + + /* count overwrite set and place it in a separate list */ + ret = get_overwrite_set(&ch); + + if (ret <= 0) { + /* It is possible that overwrite set is empty here, it means + all captured nodes are clean */ + goto up_and_ret; + } + + /* Inform the caller about what number of dirty pages will be + * submitted to disk. */ + *nr_submitted += ch.overwrite_set_size - ch.nr_bitmap; + + /* count all records needed for storing of the wandered set */ + get_tx_size(&ch); + + ret = commit_tx(&ch); + if (ret) + goto up_and_ret; + + spin_lock_atom(atom); + reiser4_atom_set_stage(atom, ASTAGE_POST_COMMIT); + spin_unlock_atom(atom); + reiser4_post_commit_hook(); + + ret = write_tx_back(&ch); + + up_and_ret: + if (ret) { + /* there could be fq attached to current atom; the only way to + remove them is: */ + current_atom_finish_all_fq(); + } + + /* free blocks of flushed transaction */ + dealloc_tx_list(&ch); + dealloc_wmap(&ch); + + reiser4_post_write_back_hook(); + + put_overwrite_set(&ch); + + done_commit_handle(&ch); + + writeout_mode_disable(); + + return ret; +} + +/* consistency checks for journal data/control blocks: header, footer, log + records, transactions head blocks. All functions return zero on success. */ + +static int check_journal_header(const jnode * node UNUSED_ARG) +{ + /* FIXME: journal header has no magic field yet. */ + return 0; +} + +/* wait for write completion for all jnodes from given list */ +static int wait_on_jnode_list(struct list_head *head) +{ + jnode *scan; + int ret = 0; + + list_for_each_entry(scan, head, capture_link) { + struct page *pg = jnode_page(scan); + + if (pg) { + if (PageWriteback(pg)) + wait_on_page_writeback(pg); + + if (PageError(pg)) + ret++; + } + } + + return ret; +} + +static int check_journal_footer(const jnode * node UNUSED_ARG) +{ + /* FIXME: journal footer has no magic field yet. */ + return 0; +} + +static int check_tx_head(const jnode * node) +{ + struct tx_header *header = (struct tx_header *)jdata(node); + + if (memcmp(&header->magic, TX_HEADER_MAGIC, TX_HEADER_MAGIC_SIZE) != 0) { + warning("zam-627", "tx head at block %s corrupted\n", + sprint_address(jnode_get_block(node))); + return RETERR(-EIO); + } + + return 0; +} + +static int check_wander_record(const jnode * node) +{ + struct wander_record_header *RH = + (struct wander_record_header *)jdata(node); + + if (memcmp(&RH->magic, WANDER_RECORD_MAGIC, WANDER_RECORD_MAGIC_SIZE) != + 0) { + warning("zam-628", "wander record at block %s corrupted\n", + sprint_address(jnode_get_block(node))); + return RETERR(-EIO); + } + + return 0; +} + +/* fill commit_handler structure by everything what is needed for update_journal_footer */ +static int restore_commit_handle(struct commit_handle *ch, jnode *tx_head) +{ + struct tx_header *TXH; + int ret; + + ret = jload(tx_head); + if (ret) + return ret; + + TXH = (struct tx_header *)jdata(tx_head); + + ch->free_blocks = le64_to_cpu(get_unaligned(&TXH->free_blocks)); + ch->nr_files = le64_to_cpu(get_unaligned(&TXH->nr_files)); + ch->next_oid = le64_to_cpu(get_unaligned(&TXH->next_oid)); + + jrelse(tx_head); + + list_add(&tx_head->capture_link, &ch->tx_list); + + return 0; +} + +/* replay one transaction: restore and write overwrite set in place */ +static int replay_transaction(const struct super_block *s, + jnode * tx_head, + const reiser4_block_nr * log_rec_block_p, + const reiser4_block_nr * end_block, + unsigned int nr_wander_records) +{ + reiser4_block_nr log_rec_block = *log_rec_block_p; + struct commit_handle ch; + LIST_HEAD(overwrite_set); + jnode *log; + int ret; + + init_commit_handle(&ch, NULL); + ch.overwrite_set = &overwrite_set; + + restore_commit_handle(&ch, tx_head); + + while (log_rec_block != *end_block) { + struct wander_record_header *header; + struct wander_entry *entry; + + int i; + + if (nr_wander_records == 0) { + warning("zam-631", + "number of wander records in the linked list" + " greater than number stored in tx head.\n"); + ret = RETERR(-EIO); + goto free_ow_set; + } + + log = reiser4_alloc_io_head(&log_rec_block); + if (log == NULL) + return RETERR(-ENOMEM); + + ret = jload(log); + if (ret < 0) { + reiser4_drop_io_head(log); + return ret; + } + + ret = check_wander_record(log); + if (ret) { + jrelse(log); + reiser4_drop_io_head(log); + return ret; + } + + header = (struct wander_record_header *)jdata(log); + log_rec_block = le64_to_cpu(get_unaligned(&header->next_block)); + + entry = (struct wander_entry *)(header + 1); + + /* restore overwrite set from wander record content */ + for (i = 0; i < wander_record_capacity(s); i++) { + reiser4_block_nr block; + jnode *node; + + block = le64_to_cpu(get_unaligned(&entry->wandered)); + if (block == 0) + break; + + node = reiser4_alloc_io_head(&block); + if (node == NULL) { + ret = RETERR(-ENOMEM); + /* + * FIXME-VS:??? + */ + jrelse(log); + reiser4_drop_io_head(log); + goto free_ow_set; + } + + ret = jload(node); + + if (ret < 0) { + reiser4_drop_io_head(node); + /* + * FIXME-VS:??? + */ + jrelse(log); + reiser4_drop_io_head(log); + goto free_ow_set; + } + + block = le64_to_cpu(get_unaligned(&entry->original)); + + assert("zam-603", block != 0); + + jnode_set_block(node, &block); + + list_add_tail(&node->capture_link, ch.overwrite_set); + + ++entry; + } + + jrelse(log); + reiser4_drop_io_head(log); + + --nr_wander_records; + } + + if (nr_wander_records != 0) { + warning("zam-632", "number of wander records in the linked list" + " less than number stored in tx head.\n"); + ret = RETERR(-EIO); + goto free_ow_set; + } + + { /* write wandered set in place */ + write_jnode_list(ch.overwrite_set, NULL, NULL, 0); + ret = wait_on_jnode_list(ch.overwrite_set); + + if (ret) { + ret = RETERR(-EIO); + goto free_ow_set; + } + } + + ret = update_journal_footer(&ch); + + free_ow_set: + + while (!list_empty(ch.overwrite_set)) { + jnode *cur = list_entry(ch.overwrite_set->next, jnode, capture_link); + list_del_init(&cur->capture_link); + jrelse(cur); + reiser4_drop_io_head(cur); + } + + list_del_init(&tx_head->capture_link); + + done_commit_handle(&ch); + + return ret; +} + +/* find oldest committed and not played transaction and play it. The transaction + * was committed and journal header block was updated but the blocks from the + * process of writing the atom's overwrite set in-place and updating of journal + * footer block were not completed. This function completes the process by + * recovering the atom's overwrite set from their wandered locations and writes + * them in-place and updating the journal footer. */ +static int replay_oldest_transaction(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + jnode *jf = sbinfo->journal_footer; + unsigned int total; + struct journal_footer *F; + struct tx_header *T; + + reiser4_block_nr prev_tx; + reiser4_block_nr last_flushed_tx; + reiser4_block_nr log_rec_block = 0; + + jnode *tx_head; + + int ret; + + if ((ret = jload(jf)) < 0) + return ret; + + F = (struct journal_footer *)jdata(jf); + + last_flushed_tx = le64_to_cpu(get_unaligned(&F->last_flushed_tx)); + + jrelse(jf); + + if (sbinfo->last_committed_tx == last_flushed_tx) { + /* all transactions are replayed */ + return 0; + } + + prev_tx = sbinfo->last_committed_tx; + + /* searching for oldest not flushed transaction */ + while (1) { + tx_head = reiser4_alloc_io_head(&prev_tx); + if (!tx_head) + return RETERR(-ENOMEM); + + ret = jload(tx_head); + if (ret < 0) { + reiser4_drop_io_head(tx_head); + return ret; + } + + ret = check_tx_head(tx_head); + if (ret) { + jrelse(tx_head); + reiser4_drop_io_head(tx_head); + return ret; + } + + T = (struct tx_header *)jdata(tx_head); + + prev_tx = le64_to_cpu(get_unaligned(&T->prev_tx)); + + if (prev_tx == last_flushed_tx) + break; + + jrelse(tx_head); + reiser4_drop_io_head(tx_head); + } + + total = le32_to_cpu(get_unaligned(&T->total)); + log_rec_block = le64_to_cpu(get_unaligned(&T->next_block)); + + pin_jnode_data(tx_head); + jrelse(tx_head); + + ret = + replay_transaction(s, tx_head, &log_rec_block, + jnode_get_block(tx_head), total - 1); + + unpin_jnode_data(tx_head); + reiser4_drop_io_head(tx_head); + + if (ret) + return ret; + return -E_REPEAT; +} + +/* The reiser4 journal current implementation was optimized to not to capture + super block if certain super blocks fields are modified. Currently, the set + is (, ). These fields are logged by + special way which includes storing them in each transaction head block at + atom commit time and writing that information to journal footer block at + atom flush time. For getting info from journal footer block to the + in-memory super block there is a special function + reiser4_journal_recover_sb_data() which should be called after disk format + plugin re-reads super block after journal replaying. +*/ + +/* get the information from journal footer in-memory super block */ +int reiser4_journal_recover_sb_data(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + struct journal_footer *jf; + int ret; + + assert("zam-673", sbinfo->journal_footer != NULL); + + ret = jload(sbinfo->journal_footer); + if (ret != 0) + return ret; + + ret = check_journal_footer(sbinfo->journal_footer); + if (ret != 0) + goto out; + + jf = (struct journal_footer *)jdata(sbinfo->journal_footer); + + /* was there at least one flushed transaction? */ + if (jf->last_flushed_tx) { + + /* restore free block counter logged in this transaction */ + reiser4_set_free_blocks(s, le64_to_cpu(get_unaligned(&jf->free_blocks))); + + /* restore oid allocator state */ + oid_init_allocator(s, + le64_to_cpu(get_unaligned(&jf->nr_files)), + le64_to_cpu(get_unaligned(&jf->next_oid))); + } + out: + jrelse(sbinfo->journal_footer); + return ret; +} + +/* reiser4 replay journal procedure */ +int reiser4_journal_replay(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + jnode *jh, *jf; + struct journal_header *header; + int nr_tx_replayed = 0; + int ret; + + assert("zam-582", sbinfo != NULL); + + jh = sbinfo->journal_header; + jf = sbinfo->journal_footer; + + if (!jh || !jf) { + /* it is possible that disk layout does not support journal + structures, we just warn about this */ + warning("zam-583", + "journal control blocks were not loaded by disk layout plugin. " + "journal replaying is not possible.\n"); + return 0; + } + + /* Take free block count from journal footer block. The free block + counter value corresponds the last flushed transaction state */ + ret = jload(jf); + if (ret < 0) + return ret; + + ret = check_journal_footer(jf); + if (ret) { + jrelse(jf); + return ret; + } + + jrelse(jf); + + /* store last committed transaction info in reiser4 in-memory super + block */ + ret = jload(jh); + if (ret < 0) + return ret; + + ret = check_journal_header(jh); + if (ret) { + jrelse(jh); + return ret; + } + + header = (struct journal_header *)jdata(jh); + sbinfo->last_committed_tx = le64_to_cpu(get_unaligned(&header->last_committed_tx)); + + jrelse(jh); + + /* replay committed transactions */ + while ((ret = replay_oldest_transaction(s)) == -E_REPEAT) + nr_tx_replayed++; + + return ret; +} + +/* load journal control block (either journal header or journal footer block) */ +static int +load_journal_control_block(jnode ** node, const reiser4_block_nr * block) +{ + int ret; + + *node = reiser4_alloc_io_head(block); + if (!(*node)) + return RETERR(-ENOMEM); + + ret = jload(*node); + + if (ret) { + reiser4_drop_io_head(*node); + *node = NULL; + return ret; + } + + pin_jnode_data(*node); + jrelse(*node); + + return 0; +} + +/* unload journal header or footer and free jnode */ +static void unload_journal_control_block(jnode ** node) +{ + if (*node) { + unpin_jnode_data(*node); + reiser4_drop_io_head(*node); + *node = NULL; + } +} + +/* release journal control blocks */ +void reiser4_done_journal_info(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + + assert("zam-476", sbinfo != NULL); + + unload_journal_control_block(&sbinfo->journal_header); + unload_journal_control_block(&sbinfo->journal_footer); + rcu_barrier(); +} + +/* load journal control blocks */ +int reiser4_init_journal_info(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + journal_location *loc; + int ret; + + loc = &sbinfo->jloc; + + assert("zam-651", loc != NULL); + assert("zam-652", loc->header != 0); + assert("zam-653", loc->footer != 0); + + ret = load_journal_control_block(&sbinfo->journal_header, &loc->header); + + if (ret) + return ret; + + ret = load_journal_control_block(&sbinfo->journal_footer, &loc->footer); + + if (ret) { + unload_journal_control_block(&sbinfo->journal_header); + } + + return ret; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/wander.h b/fs/reiser4/wander.h new file mode 100644 index 000000000000..8746710b66be --- /dev/null +++ b/fs/reiser4/wander.h @@ -0,0 +1,135 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined (__FS_REISER4_WANDER_H__) +#define __FS_REISER4_WANDER_H__ + +#include "dformat.h" + +#include /* for struct super_block */ + +/* REISER4 JOURNAL ON-DISK DATA STRUCTURES */ + +#define TX_HEADER_MAGIC "TxMagic4" +#define WANDER_RECORD_MAGIC "LogMagc4" + +#define TX_HEADER_MAGIC_SIZE (8) +#define WANDER_RECORD_MAGIC_SIZE (8) + +/* journal header block format */ +struct journal_header { + /* last written transaction head location */ + d64 last_committed_tx; +}; + +typedef struct journal_location { + reiser4_block_nr footer; + reiser4_block_nr header; +} journal_location; + +/* The wander.c head comment describes usage and semantic of all these structures */ +/* journal footer block format */ +struct journal_footer { + /* last flushed transaction location. */ + /* This block number is no more valid after the transaction it points + to gets flushed, this number is used only at journal replaying time + for detection of the end of on-disk list of committed transactions + which were not flushed completely */ + d64 last_flushed_tx; + + /* free block counter is written in journal footer at transaction + flushing , not in super block because free blocks counter is logged + by another way than super block fields (root pointer, for + example). */ + d64 free_blocks; + + /* number of used OIDs and maximal used OID are logged separately from + super block */ + d64 nr_files; + d64 next_oid; +}; + +/* Each wander record (except the first one) has unified format with wander + record header followed by an array of log entries */ +struct wander_record_header { + /* when there is no predefined location for wander records, this magic + string should help reiser4fsck. */ + char magic[WANDER_RECORD_MAGIC_SIZE]; + + /* transaction id */ + d64 id; + + /* total number of wander records in current transaction */ + d32 total; + + /* this block number in transaction */ + d32 serial; + + /* number of previous block in commit */ + d64 next_block; +}; + +/* The first wander record (transaction head) of written transaction has the + special format */ +struct tx_header { + /* magic string makes first block in transaction different from other + logged blocks, it should help fsck. */ + char magic[TX_HEADER_MAGIC_SIZE]; + + /* transaction id */ + d64 id; + + /* total number of records (including this first tx head) in the + transaction */ + d32 total; + + /* align next field to 8-byte boundary; this field always is zero */ + d32 padding; + + /* block number of previous transaction head */ + d64 prev_tx; + + /* next wander record location */ + d64 next_block; + + /* committed versions of free blocks counter */ + d64 free_blocks; + + /* number of used OIDs (nr_files) and maximal used OID are logged + separately from super block */ + d64 nr_files; + d64 next_oid; +}; + +/* A transaction gets written to disk as a set of wander records (each wander + record size is fs block) */ + +/* As it was told above a wander The rest of wander record is filled by these log entries, unused space filled + by zeroes */ +struct wander_entry { + d64 original; /* block original location */ + d64 wandered; /* block wandered location */ +}; + +/* REISER4 JOURNAL WRITER FUNCTIONS */ + +extern int reiser4_write_logs(long *); +extern int reiser4_journal_replay(struct super_block *); +extern int reiser4_journal_recover_sb_data(struct super_block *); + +extern int reiser4_init_journal_info(struct super_block *); +extern void reiser4_done_journal_info(struct super_block *); + +extern int write_jnode_list(struct list_head *, flush_queue_t *, long *, int); + +#endif /* __FS_REISER4_WANDER_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/writeout.h b/fs/reiser4/writeout.h new file mode 100644 index 000000000000..fb9d2e493940 --- /dev/null +++ b/fs/reiser4/writeout.h @@ -0,0 +1,21 @@ +/* Copyright 2002, 2003, 2004 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined (__FS_REISER4_WRITEOUT_H__) + +#define WRITEOUT_SINGLE_STREAM (0x1) +#define WRITEOUT_FOR_PAGE_RECLAIM (0x2) +#define WRITEOUT_FLUSH_FUA (0x4) + +extern int reiser4_get_writeout_flags(void); + +#endif /* __FS_REISER4_WRITEOUT_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/znode.c b/fs/reiser4/znode.c new file mode 100644 index 000000000000..f7c77ef5435a --- /dev/null +++ b/fs/reiser4/znode.c @@ -0,0 +1,1027 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ +/* Znode manipulation functions. */ +/* Znode is the in-memory header for a tree node. It is stored + separately from the node itself so that it does not get written to + disk. In this respect znode is like buffer head or page head. We + also use znodes for additional reiser4 specific purposes: + + . they are organized into tree structure which is a part of whole + reiser4 tree. + . they are used to implement node grained locking + . they are used to keep additional state associated with a + node + . they contain links to lists used by the transaction manager + + Znode is attached to some variable "block number" which is instance of + fs/reiser4/tree.h:reiser4_block_nr type. Znode can exist without + appropriate node being actually loaded in memory. Existence of znode itself + is regulated by reference count (->x_count) in it. Each time thread + acquires reference to znode through call to zget(), ->x_count is + incremented and decremented on call to zput(). Data (content of node) are + brought in memory through call to zload(), which also increments ->d_count + reference counter. zload can block waiting on IO. Call to zrelse() + decreases this counter. Also, ->c_count keeps track of number of child + znodes and prevents parent znode from being recycled until all of its + children are. ->c_count is decremented whenever child goes out of existence + (being actually recycled in zdestroy()) which can be some time after last + reference to this child dies if we support some form of LRU cache for + znodes. + +*/ +/* EVERY ZNODE'S STORY + + 1. His infancy. + + Once upon a time, the znode was born deep inside of zget() by call to + zalloc(). At the return from zget() znode had: + + . reference counter (x_count) of 1 + . assigned block number, marked as used in bitmap + . pointer to parent znode. Root znode parent pointer points + to its father: "fake" znode. This, in turn, has NULL parent pointer. + . hash table linkage + . no data loaded from disk + . no node plugin + . no sibling linkage + + 2. His childhood + + Each node is either brought into memory as a result of tree traversal, or + created afresh, creation of the root being a special case of the latter. In + either case it's inserted into sibling list. This will typically require + some ancillary tree traversing, but ultimately both sibling pointers will + exist and JNODE_LEFT_CONNECTED and JNODE_RIGHT_CONNECTED will be true in + zjnode.state. + + 3. His youth. + + If znode is bound to already existing node in a tree, its content is read + from the disk by call to zload(). At that moment, JNODE_LOADED bit is set + in zjnode.state and zdata() function starts to return non null for this + znode. zload() further calls zparse() that determines which node layout + this node is rendered in, and sets ->nplug on success. + + If znode is for new node just created, memory for it is allocated and + zinit_new() function is called to initialise data, according to selected + node layout. + + 4. His maturity. + + After this point, znode lingers in memory for some time. Threads can + acquire references to znode either by blocknr through call to zget(), or by + following a pointer to unallocated znode from internal item. Each time + reference to znode is obtained, x_count is increased. Thread can read/write + lock znode. Znode data can be loaded through calls to zload(), d_count will + be increased appropriately. If all references to znode are released + (x_count drops to 0), znode is not recycled immediately. Rather, it is + still cached in the hash table in the hope that it will be accessed + shortly. + + There are two ways in which znode existence can be terminated: + + . sudden death: node bound to this znode is removed from the tree + . overpopulation: znode is purged out of memory due to memory pressure + + 5. His death. + + Death is complex process. + + When we irrevocably commit ourselves to decision to remove node from the + tree, JNODE_HEARD_BANSHEE bit is set in zjnode.state of corresponding + znode. This is done either in ->kill_hook() of internal item or in + reiser4_kill_root() function when tree root is removed. + + At this moment znode still has: + + . locks held on it, necessary write ones + . references to it + . disk block assigned to it + . data loaded from the disk + . pending requests for lock + + But once JNODE_HEARD_BANSHEE bit set, last call to unlock_znode() does node + deletion. Node deletion includes two phases. First all ways to get + references to that znode (sibling and parent links and hash lookup using + block number stored in parent node) should be deleted -- it is done through + sibling_list_remove(), also we assume that nobody uses down link from + parent node due to its nonexistence or proper parent node locking and + nobody uses parent pointers from children due to absence of them. Second we + invalidate all pending lock requests which still are on znode's lock + request queue, this is done by reiser4_invalidate_lock(). Another + JNODE_IS_DYING znode status bit is used to invalidate pending lock requests. + Once it set all requesters are forced to return -EINVAL from + longterm_lock_znode(). Future locking attempts are not possible because all + ways to get references to that znode are removed already. Last, node is + uncaptured from transaction. + + When last reference to the dying znode is just about to be released, + block number for this lock is released and znode is removed from the + hash table. + + Now znode can be recycled. + + [it's possible to free bitmap block and remove znode from the hash + table when last lock is released. This will result in having + referenced but completely orphaned znode] + + 6. Limbo + + As have been mentioned above znodes with reference counter 0 are + still cached in a hash table. Once memory pressure increases they are + purged out of there [this requires something like LRU list for + efficient implementation. LRU list would also greatly simplify + implementation of coord cache that would in this case morph to just + scanning some initial segment of LRU list]. Data loaded into + unreferenced znode are flushed back to the durable storage if + necessary and memory is freed. Znodes themselves can be recycled at + this point too. + +*/ + +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/plugin_header.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "tree_walk.h" +#include "super.h" +#include "reiser4.h" + +#include +#include +#include +#include + +static z_hash_table *get_htable(reiser4_tree *, + const reiser4_block_nr * const blocknr); +static z_hash_table *znode_get_htable(const znode *); +static void zdrop(znode *); + +/* hash table support */ + +/* compare two block numbers for equality. Used by hash-table macros */ +static inline int +blknreq(const reiser4_block_nr * b1, const reiser4_block_nr * b2) +{ + assert("nikita-534", b1 != NULL); + assert("nikita-535", b2 != NULL); + + return *b1 == *b2; +} + +/* Hash znode by block number. Used by hash-table macros */ +/* Audited by: umka (2002.06.11) */ +static inline __u32 +blknrhashfn(z_hash_table * table, const reiser4_block_nr * b) +{ + assert("nikita-536", b != NULL); + + return *b & (REISER4_ZNODE_HASH_TABLE_SIZE - 1); +} + +/* The hash table definition */ +#define KMALLOC(size) reiser4_vmalloc(size) +#define KFREE(ptr, size) vfree(ptr) +TYPE_SAFE_HASH_DEFINE(z, znode, reiser4_block_nr, zjnode.key.z, zjnode.link.z, + blknrhashfn, blknreq); +#undef KFREE +#undef KMALLOC + +/* slab for znodes */ +static struct kmem_cache *znode_cache; + +int znode_shift_order; + +/** + * init_znodes - create znode cache + * + * Initializes slab cache of znodes. It is part of reiser4 module initialization. + */ +int init_znodes(void) +{ + znode_cache = kmem_cache_create("znode", sizeof(znode), 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (znode_cache == NULL) + return RETERR(-ENOMEM); + + for (znode_shift_order = 0; (1 << znode_shift_order) < sizeof(znode); + ++znode_shift_order); + --znode_shift_order; + return 0; +} + +/** + * done_znodes - delete znode cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_znodes(void) +{ + destroy_reiser4_cache(&znode_cache); +} + +/* call this to initialise tree of znodes */ +int znodes_tree_init(reiser4_tree * tree /* tree to initialise znodes for */ ) +{ + int result; + assert("umka-050", tree != NULL); + + rwlock_init(&tree->dk_lock); + + result = z_hash_init(&tree->zhash_table, REISER4_ZNODE_HASH_TABLE_SIZE); + if (result != 0) + return result; + result = z_hash_init(&tree->zfake_table, REISER4_ZNODE_HASH_TABLE_SIZE); + return result; +} + +/* free this znode */ +void zfree(znode * node /* znode to free */ ) +{ + assert("nikita-465", node != NULL); + assert("nikita-2120", znode_page(node) == NULL); + assert("nikita-2301", list_empty_careful(&node->lock.owners)); + assert("nikita-2302", list_empty_careful(&node->lock.requestors)); + assert("nikita-2663", (list_empty_careful(&ZJNODE(node)->capture_link) && + NODE_LIST(ZJNODE(node)) == NOT_CAPTURED)); + assert("nikita-3220", list_empty(&ZJNODE(node)->jnodes)); + assert("nikita-3293", !znode_is_right_connected(node)); + assert("nikita-3294", !znode_is_left_connected(node)); + assert("nikita-3295", node->left == NULL); + assert("nikita-3296", node->right == NULL); + + /* not yet phash_jnode_destroy(ZJNODE(node)); */ + + kmem_cache_free(znode_cache, node); +} + +/* call this to free tree of znodes */ +void znodes_tree_done(reiser4_tree * tree /* tree to finish with znodes of */ ) +{ + znode *node; + znode *next; + z_hash_table *ztable; + + /* scan znode hash-tables and kill all znodes, then free hash tables + * themselves. */ + + assert("nikita-795", tree != NULL); + + ztable = &tree->zhash_table; + + if (ztable->_table != NULL) { + for_all_in_htable(ztable, z, node, next) { + node->c_count = 0; + node->in_parent.node = NULL; + assert("nikita-2179", atomic_read(&ZJNODE(node)->x_count) == 0); + zdrop(node); + } + + z_hash_done(&tree->zhash_table); + } + + ztable = &tree->zfake_table; + + if (ztable->_table != NULL) { + for_all_in_htable(ztable, z, node, next) { + node->c_count = 0; + node->in_parent.node = NULL; + assert("nikita-2179", atomic_read(&ZJNODE(node)->x_count) == 0); + zdrop(node); + } + + z_hash_done(&tree->zfake_table); + } +} + +/* ZNODE STRUCTURES */ + +/* allocate fresh znode */ +znode *zalloc(gfp_t gfp_flag /* allocation flag */ ) +{ + znode *node; + + node = kmem_cache_alloc(znode_cache, gfp_flag); + return node; +} + +/* Initialize fields of znode + @node: znode to initialize; + @parent: parent znode; + @tree: tree we are in. */ +void zinit(znode * node, const znode * parent, reiser4_tree * tree) +{ + assert("nikita-466", node != NULL); + assert("umka-268", current_tree != NULL); + + memset(node, 0, sizeof *node); + + assert("umka-051", tree != NULL); + + jnode_init(&node->zjnode, tree, JNODE_FORMATTED_BLOCK); + reiser4_init_lock(&node->lock); + init_parent_coord(&node->in_parent, parent); +} + +/* + * remove znode from indices. This is called jput() when last reference on + * znode is released. + */ +void znode_remove(znode * node /* znode to remove */ , reiser4_tree * tree) +{ + assert("nikita-2108", node != NULL); + assert("nikita-470", node->c_count == 0); + assert_rw_write_locked(&(tree->tree_lock)); + + /* remove reference to this znode from cbk cache */ + cbk_cache_invalidate(node, tree); + + /* update c_count of parent */ + if (znode_parent(node) != NULL) { + assert("nikita-472", znode_parent(node)->c_count > 0); + /* father, onto your hands I forward my spirit... */ + znode_parent(node)->c_count--; + node->in_parent.node = NULL; + } else { + /* orphaned znode?! Root? */ + } + + /* remove znode from hash-table */ + z_hash_remove_rcu(znode_get_htable(node), node); +} + +/* zdrop() -- Remove znode from the tree. + + This is called when znode is removed from the memory. */ +static void zdrop(znode * node /* znode to finish with */ ) +{ + jdrop(ZJNODE(node)); +} + +/* + * put znode into right place in the hash table. This is called by relocate + * code. + */ +int znode_rehash(znode * node /* node to rehash */ , + const reiser4_block_nr * new_block_nr /* new block number */ ) +{ + z_hash_table *oldtable; + z_hash_table *newtable; + reiser4_tree *tree; + + assert("nikita-2018", node != NULL); + + tree = znode_get_tree(node); + oldtable = znode_get_htable(node); + newtable = get_htable(tree, new_block_nr); + + write_lock_tree(tree); + /* remove znode from hash-table */ + z_hash_remove_rcu(oldtable, node); + + /* assertion no longer valid due to RCU */ + /* assert("nikita-2019", z_hash_find(newtable, new_block_nr) == NULL); */ + + /* update blocknr */ + znode_set_block(node, new_block_nr); + node->zjnode.key.z = *new_block_nr; + + /* insert it into hash */ + z_hash_insert_rcu(newtable, node); + write_unlock_tree(tree); + return 0; +} + +/* ZNODE LOOKUP, GET, PUT */ + +/* zlook() - get znode with given block_nr in a hash table or return NULL + + If result is non-NULL then the znode's x_count is incremented. Internal version + accepts pre-computed hash index. The hash table is accessed under caller's + tree->hash_lock. +*/ +znode *zlook(reiser4_tree * tree, const reiser4_block_nr * const blocknr) +{ + znode *result; + __u32 hash; + z_hash_table *htable; + + assert("jmacd-506", tree != NULL); + assert("jmacd-507", blocknr != NULL); + + htable = get_htable(tree, blocknr); + hash = blknrhashfn(htable, blocknr); + + rcu_read_lock(); + result = z_hash_find_index(htable, hash, blocknr); + + if (result != NULL) { + add_x_ref(ZJNODE(result)); + result = znode_rip_check(tree, result); + } + rcu_read_unlock(); + + return result; +} + +/* return hash table where znode with block @blocknr is (or should be) + * stored */ +static z_hash_table *get_htable(reiser4_tree * tree, + const reiser4_block_nr * const blocknr) +{ + z_hash_table *table; + if (is_disk_addr_unallocated(blocknr)) + table = &tree->zfake_table; + else + table = &tree->zhash_table; + return table; +} + +/* return hash table where znode @node is (or should be) stored */ +static z_hash_table *znode_get_htable(const znode * node) +{ + return get_htable(znode_get_tree(node), znode_get_block(node)); +} + +/* zget() - get znode from hash table, allocating it if necessary. + + First a call to zlook, locating a x-referenced znode if one + exists. If znode is not found, allocate new one and return. Result + is returned with x_count reference increased. + + LOCKS TAKEN: TREE_LOCK, ZNODE_LOCK + LOCK ORDERING: NONE +*/ +znode *zget(reiser4_tree * tree, + const reiser4_block_nr * const blocknr, + znode * parent, tree_level level, gfp_t gfp_flag) +{ + znode *result; + __u32 hashi; + + z_hash_table *zth; + + assert("jmacd-512", tree != NULL); + assert("jmacd-513", blocknr != NULL); + assert("jmacd-514", level < REISER4_MAX_ZTREE_HEIGHT); + + zth = get_htable(tree, blocknr); + hashi = blknrhashfn(zth, blocknr); + + /* NOTE-NIKITA address-as-unallocated-blocknr still is not + implemented. */ + + z_hash_prefetch_bucket(zth, hashi); + + rcu_read_lock(); + /* Find a matching BLOCKNR in the hash table. If the znode is found, + we obtain an reference (x_count) but the znode remains unlocked. + Have to worry about race conditions later. */ + result = z_hash_find_index(zth, hashi, blocknr); + /* According to the current design, the hash table lock protects new + znode references. */ + if (result != NULL) { + add_x_ref(ZJNODE(result)); + /* NOTE-NIKITA it should be so, but special case during + creation of new root makes such assertion highly + complicated. */ + assert("nikita-2131", 1 || znode_parent(result) == parent || + (ZF_ISSET(result, JNODE_ORPHAN) + && (znode_parent(result) == NULL))); + result = znode_rip_check(tree, result); + } + + rcu_read_unlock(); + + if (!result) { + znode *shadow; + + result = zalloc(gfp_flag); + if (!result) { + return ERR_PTR(RETERR(-ENOMEM)); + } + + zinit(result, parent, tree); + ZJNODE(result)->blocknr = *blocknr; + ZJNODE(result)->key.z = *blocknr; + result->level = level; + + write_lock_tree(tree); + + shadow = z_hash_find_index(zth, hashi, blocknr); + if (unlikely(shadow != NULL && !ZF_ISSET(shadow, JNODE_RIP))) { + jnode_list_remove(ZJNODE(result)); + zfree(result); + result = shadow; + } else { + result->version = znode_build_version(tree); + z_hash_insert_index_rcu(zth, hashi, result); + + if (parent != NULL) + ++parent->c_count; + } + + add_x_ref(ZJNODE(result)); + + write_unlock_tree(tree); + } + + assert("intelfx-6", + ergo(!reiser4_blocknr_is_fake(blocknr) && *blocknr != 0, + reiser4_check_block(blocknr, 1))); + + /* Check for invalid tree level, return -EIO */ + if (unlikely(znode_get_level(result) != level)) { + warning("jmacd-504", + "Wrong level for cached block %llu: %i expecting %i", + (unsigned long long)(*blocknr), znode_get_level(result), + level); + zput(result); + return ERR_PTR(RETERR(-EIO)); + } + + assert("nikita-1227", znode_invariant(result)); + + return result; +} + +/* ZNODE PLUGINS/DATA */ + +/* "guess" plugin for node loaded from the disk. Plugin id of node plugin is + stored at the fixed offset from the beginning of the node. */ +static node_plugin *znode_guess_plugin(const znode * node /* znode to guess + * plugin of */ ) +{ + reiser4_tree *tree; + + assert("nikita-1053", node != NULL); + assert("nikita-1055", zdata(node) != NULL); + + tree = znode_get_tree(node); + assert("umka-053", tree != NULL); + + if (reiser4_is_set(tree->super, REISER4_ONE_NODE_PLUGIN)) { + return tree->nplug; + } else { + return node_plugin_by_disk_id + (tree, &((common_node_header *) zdata(node))->plugin_id); +#ifdef GUESS_EXISTS + reiser4_plugin *plugin; + + /* NOTE-NIKITA add locking here when dynamic plugins will be + * implemented */ + for_all_plugins(REISER4_NODE_PLUGIN_TYPE, plugin) { + if ((plugin->u.node.guess != NULL) + && plugin->u.node.guess(node)) + return plugin; + } + warning("nikita-1057", "Cannot guess node plugin"); + print_znode("node", node); + return NULL; +#endif + } +} + +/* parse node header and install ->node_plugin */ +int zparse(znode * node /* znode to parse */ ) +{ + int result; + + assert("nikita-1233", node != NULL); + assert("nikita-2370", zdata(node) != NULL); + + if (node->nplug == NULL) { + node_plugin *nplug; + + nplug = znode_guess_plugin(node); + if (likely(nplug != NULL)) { + result = nplug->parse(node); + if (likely(result == 0)) + node->nplug = nplug; + } else { + result = RETERR(-EIO); + } + } else + result = 0; + return result; +} + +/* zload with readahead */ +int zload_ra(znode * node /* znode to load */ , ra_info_t * info) +{ + int result; + + assert("nikita-484", node != NULL); + assert("nikita-1377", znode_invariant(node)); + assert("jmacd-7771", !znode_above_root(node)); + assert("nikita-2125", atomic_read(&ZJNODE(node)->x_count) > 0); + assert("nikita-3016", reiser4_schedulable()); + + if (info) + formatted_readahead(node, info); + + result = jload(ZJNODE(node)); + assert("nikita-1378", znode_invariant(node)); + return result; +} + +/* load content of node into memory */ +int zload(znode *node) +{ + return zload_ra(node, NULL); +} + +/* call node plugin to initialise newly allocated node. */ +int zinit_new(znode * node /* znode to initialise */ , gfp_t gfp_flags) +{ + return jinit_new(ZJNODE(node), gfp_flags); +} + +/* drop reference to node data. When last reference is dropped, data are + unloaded. */ +void zrelse(znode * node /* znode to release references to */ ) +{ + assert("nikita-1381", znode_invariant(node)); + jrelse(ZJNODE(node)); +} + +/* returns free space in node */ +unsigned znode_free_space(znode * node /* znode to query */ ) +{ + assert("nikita-852", node != NULL); + return node_plugin_by_node(node)->free_space(node); +} + +/* left delimiting key of znode */ +reiser4_key *znode_get_rd_key(znode * node /* znode to query */ ) +{ + assert("nikita-958", node != NULL); + assert_rw_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3067", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-30671", node->rd_key_version != 0); + return &node->rd_key; +} + +/* right delimiting key of znode */ +reiser4_key *znode_get_ld_key(znode * node /* znode to query */ ) +{ + assert("nikita-974", node != NULL); + assert_rw_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3068", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-30681", node->ld_key_version != 0); + return &node->ld_key; +} + +ON_DEBUG(atomic_t delim_key_version = ATOMIC_INIT(0); + ) + +/* update right-delimiting key of @node */ +reiser4_key *znode_set_rd_key(znode * node, const reiser4_key * key) +{ + assert("nikita-2937", node != NULL); + assert("nikita-2939", key != NULL); + assert_rw_write_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3069", LOCK_CNT_GTZ(write_locked_dk)); + assert("nikita-2944", + znode_is_any_locked(node) || + znode_get_level(node) != LEAF_LEVEL || + keyge(key, &node->rd_key) || + keyeq(&node->rd_key, reiser4_min_key()) || + ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + + node->rd_key = *key; + ON_DEBUG(node->rd_key_version = atomic_inc_return(&delim_key_version)); + return &node->rd_key; +} + +/* update left-delimiting key of @node */ +reiser4_key *znode_set_ld_key(znode * node, const reiser4_key * key) +{ + assert("nikita-2940", node != NULL); + assert("nikita-2941", key != NULL); + assert_rw_write_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3070", LOCK_CNT_GTZ(write_locked_dk)); + assert("nikita-2943", + znode_is_any_locked(node) || keyeq(&node->ld_key, + reiser4_min_key())); + + node->ld_key = *key; + ON_DEBUG(node->ld_key_version = atomic_inc_return(&delim_key_version)); + return &node->ld_key; +} + +/* true if @key is inside key range for @node */ +int znode_contains_key(znode * node /* znode to look in */ , + const reiser4_key * key /* key to look for */ ) +{ + assert("nikita-1237", node != NULL); + assert("nikita-1238", key != NULL); + + /* left_delimiting_key <= key <= right_delimiting_key */ + return keyle(znode_get_ld_key(node), key) + && keyle(key, znode_get_rd_key(node)); +} + +/* same as znode_contains_key(), but lock dk lock */ +int znode_contains_key_lock(znode * node /* znode to look in */ , + const reiser4_key * key /* key to look for */ ) +{ + int result; + + assert("umka-056", node != NULL); + assert("umka-057", key != NULL); + + read_lock_dk(znode_get_tree(node)); + result = znode_contains_key(node, key); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +/* get parent pointer, assuming tree is not locked */ +znode *znode_parent_nolock(const znode * node /* child znode */ ) +{ + assert("nikita-1444", node != NULL); + return node->in_parent.node; +} + +/* get parent pointer of znode */ +znode *znode_parent(const znode * node /* child znode */ ) +{ + assert("nikita-1226", node != NULL); + assert("nikita-1406", LOCK_CNT_GTZ(rw_locked_tree)); + return znode_parent_nolock(node); +} + +/* detect uber znode used to protect in-superblock tree root pointer */ +int znode_above_root(const znode * node /* znode to query */ ) +{ + assert("umka-059", node != NULL); + + return disk_addr_eq(&ZJNODE(node)->blocknr, &UBER_TREE_ADDR); +} + +/* check that @node is root---that its block number is recorder in the tree as + that of root node */ +#if REISER4_DEBUG +static int znode_is_true_root(const znode * node /* znode to query */ ) +{ + assert("umka-060", node != NULL); + assert("umka-061", current_tree != NULL); + + return disk_addr_eq(znode_get_block(node), + &znode_get_tree(node)->root_block); +} +#endif + +/* check that @node is root */ +int znode_is_root(const znode * node /* znode to query */ ) +{ + return znode_get_level(node) == znode_get_tree(node)->height; +} + +/* Returns true is @node was just created by zget() and wasn't ever loaded + into memory. */ +/* NIKITA-HANS: yes */ +int znode_just_created(const znode * node) +{ + assert("nikita-2188", node != NULL); + return (znode_page(node) == NULL); +} + +/* obtain updated ->znode_epoch. See seal.c for description. */ +__u64 znode_build_version(reiser4_tree * tree) +{ + __u64 result; + + spin_lock(&tree->epoch_lock); + result = ++tree->znode_epoch; + spin_unlock(&tree->epoch_lock); + return result; +} + +void init_load_count(load_count * dh) +{ + assert("nikita-2105", dh != NULL); + memset(dh, 0, sizeof *dh); +} + +void done_load_count(load_count * dh) +{ + assert("nikita-2106", dh != NULL); + if (dh->node != NULL) { + for (; dh->d_ref > 0; --dh->d_ref) + zrelse(dh->node); + dh->node = NULL; + } +} + +static int incr_load_count(load_count * dh) +{ + int result; + + assert("nikita-2110", dh != NULL); + assert("nikita-2111", dh->node != NULL); + + result = zload(dh->node); + if (result == 0) + ++dh->d_ref; + return result; +} + +int incr_load_count_znode(load_count * dh, znode * node) +{ + assert("nikita-2107", dh != NULL); + assert("nikita-2158", node != NULL); + assert("nikita-2109", + ergo(dh->node != NULL, (dh->node == node) || (dh->d_ref == 0))); + + dh->node = node; + return incr_load_count(dh); +} + +int incr_load_count_jnode(load_count * dh, jnode * node) +{ + if (jnode_is_znode(node)) { + return incr_load_count_znode(dh, JZNODE(node)); + } + return 0; +} + +void copy_load_count(load_count * new, load_count * old) +{ + int ret = 0; + done_load_count(new); + new->node = old->node; + new->d_ref = 0; + + while ((new->d_ref < old->d_ref) && (ret = incr_load_count(new)) == 0) { + } + + assert("jmacd-87589", ret == 0); +} + +void move_load_count(load_count * new, load_count * old) +{ + done_load_count(new); + new->node = old->node; + new->d_ref = old->d_ref; + old->node = NULL; + old->d_ref = 0; +} + +/* convert parent pointer into coord */ +void parent_coord_to_coord(const parent_coord_t * pcoord, coord_t * coord) +{ + assert("nikita-3204", pcoord != NULL); + assert("nikita-3205", coord != NULL); + + coord_init_first_unit_nocheck(coord, pcoord->node); + coord_set_item_pos(coord, pcoord->item_pos); + coord->between = AT_UNIT; +} + +/* pack coord into parent_coord_t */ +void coord_to_parent_coord(const coord_t * coord, parent_coord_t * pcoord) +{ + assert("nikita-3206", pcoord != NULL); + assert("nikita-3207", coord != NULL); + + pcoord->node = coord->node; + pcoord->item_pos = coord->item_pos; +} + +/* Initialize a parent hint pointer. (parent hint pointer is a field in znode, + look for comments there) */ +void init_parent_coord(parent_coord_t * pcoord, const znode * node) +{ + pcoord->node = (znode *) node; + pcoord->item_pos = (unsigned short)~0; +} + +#if REISER4_DEBUG + +/* debugging aid: znode invariant */ +static int znode_invariant_f(const znode * node /* znode to check */ , + char const **msg /* where to store error + * message, if any */ ) +{ +#define _ergo(ant, con) \ + ((*msg) = "{" #ant "} ergo {" #con "}", ergo((ant), (con))) + +#define _equi(e1, e2) \ + ((*msg) = "{" #e1 "} <=> {" #e2 "}", equi((e1), (e2))) + +#define _check(exp) ((*msg) = #exp, (exp)) + + return jnode_invariant_f(ZJNODE(node), msg) && + /* [znode-fake] invariant */ + /* fake znode doesn't have a parent, and */ + _ergo(znode_get_level(node) == 0, znode_parent(node) == NULL) && + /* there is another way to express this very check, and */ + _ergo(znode_above_root(node), znode_parent(node) == NULL) && + /* it has special block number, and */ + _ergo(znode_get_level(node) == 0, + disk_addr_eq(znode_get_block(node), &UBER_TREE_ADDR)) && + /* it is the only znode with such block number, and */ + _ergo(!znode_above_root(node) && znode_is_loaded(node), + !disk_addr_eq(znode_get_block(node), &UBER_TREE_ADDR)) && + /* it is parent of the tree root node */ + _ergo(znode_is_true_root(node), + znode_above_root(znode_parent(node))) && + /* [znode-level] invariant */ + /* level of parent znode is one larger than that of child, + except for the fake znode, and */ + _ergo(znode_parent(node) && !znode_above_root(znode_parent(node)), + znode_get_level(znode_parent(node)) == + znode_get_level(node) + 1) && + /* left neighbor is at the same level, and */ + _ergo(znode_is_left_connected(node) && node->left != NULL, + znode_get_level(node) == znode_get_level(node->left)) && + /* right neighbor is at the same level */ + _ergo(znode_is_right_connected(node) && node->right != NULL, + znode_get_level(node) == znode_get_level(node->right)) && + /* [znode-connected] invariant */ + _ergo(node->left != NULL, znode_is_left_connected(node)) && + _ergo(node->right != NULL, znode_is_right_connected(node)) && + _ergo(!znode_is_root(node) && node->left != NULL, + znode_is_right_connected(node->left) && + node->left->right == node) && + _ergo(!znode_is_root(node) && node->right != NULL, + znode_is_left_connected(node->right) && + node->right->left == node) && + /* [znode-c_count] invariant */ + /* for any znode, c_count of its parent is greater than 0 */ + _ergo(znode_parent(node) != NULL && + !znode_above_root(znode_parent(node)), + znode_parent(node)->c_count > 0) && + /* leaves don't have children */ + _ergo(znode_get_level(node) == LEAF_LEVEL, + node->c_count == 0) && + _check(node->zjnode.jnodes.prev != NULL) && + _check(node->zjnode.jnodes.next != NULL) && + /* orphan doesn't have a parent */ + _ergo(ZF_ISSET(node, JNODE_ORPHAN), znode_parent(node) == 0) && + /* [znode-modify] invariant */ + /* if znode is not write-locked, its checksum remains + * invariant */ + /* unfortunately, zlock is unordered w.r.t. jnode_lock, so we + * cannot check this. */ + /* [znode-refs] invariant */ + /* only referenced znode can be long-term locked */ + _ergo(znode_is_locked(node), + atomic_read(&ZJNODE(node)->x_count) != 0); +} + +/* debugging aid: check znode invariant and panic if it doesn't hold */ +int znode_invariant(znode * node /* znode to check */ ) +{ + char const *failed_msg; + int result; + + assert("umka-063", node != NULL); + assert("umka-064", current_tree != NULL); + + spin_lock_znode(node); + read_lock_tree(znode_get_tree(node)); + result = znode_invariant_f(node, &failed_msg); + if (!result) { + /* print_znode("corrupted node", node); */ + warning("jmacd-555", "Condition %s failed", failed_msg); + } + read_unlock_tree(znode_get_tree(node)); + spin_unlock_znode(node); + return result; +} + +/* return non-0 iff data are loaded into znode */ +int znode_is_loaded(const znode * node /* znode to query */ ) +{ + assert("nikita-497", node != NULL); + return jnode_is_loaded(ZJNODE(node)); +} + +unsigned long znode_times_locked(const znode * z) +{ + return z->times_locked; +} + +#endif /* REISER4_DEBUG */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/znode.h b/fs/reiser4/znode.h new file mode 100644 index 000000000000..613377ef60ca --- /dev/null +++ b/fs/reiser4/znode.h @@ -0,0 +1,435 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declaration of znode (Zam's node). See znode.c for more details. */ + +#ifndef __ZNODE_H__ +#define __ZNODE_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "lock.h" +#include "readahead.h" + +#include +#include +#include /* for PAGE_CACHE_SIZE */ +#include + +/* znode tracks its position within parent (internal item in a parent node, + * that contains znode's block number). */ +typedef struct parent_coord { + znode *node; + pos_in_node_t item_pos; +} parent_coord_t; + +/* &znode - node in a reiser4 tree. + + NOTE-NIKITA fields in this struct have to be rearranged (later) to reduce + cacheline pressure. + + Locking: + + Long term: data in a disk node attached to this znode are protected + by long term, deadlock aware lock ->lock; + + Spin lock: the following fields are protected by the spin lock: + + ->lock + + Following fields are protected by the global tree lock: + + ->left + ->right + ->in_parent + ->c_count + + Following fields are protected by the global delimiting key lock (dk_lock): + + ->ld_key (to update ->ld_key long-term lock on the node is also required) + ->rd_key + + Following fields are protected by the long term lock: + + ->nr_items + + ->node_plugin is never changed once set. This means that after code made + itself sure that field is valid it can be accessed without any additional + locking. + + ->level is immutable. + + Invariants involving this data-type: + + [znode-fake] + [znode-level] + [znode-connected] + [znode-c_count] + [znode-refs] + [jnode-refs] + [jnode-queued] + [znode-modify] + + For this to be made into a clustering or NUMA filesystem, we would want to eliminate all of the global locks. + Suggestions for how to do that are desired.*/ +struct znode { + /* Embedded jnode. */ + jnode zjnode; + + /* contains three subfields, node, pos_in_node, and pos_in_unit. + + pos_in_node and pos_in_unit are only hints that are cached to + speed up lookups during balancing. They are not required to be up to + date. Synched in find_child_ptr(). + + This value allows us to avoid expensive binary searches. + + in_parent->node points to the parent of this node, and is NOT a + hint. + */ + parent_coord_t in_parent; + + /* + * sibling list pointers + */ + + /* left-neighbor */ + znode *left; + /* right-neighbor */ + znode *right; + + /* long term lock on node content. This lock supports deadlock + detection. See lock.c + */ + zlock lock; + + /* You cannot remove from memory a node that has children in + memory. This is because we rely on the fact that parent of given + node can always be reached without blocking for io. When reading a + node into memory you must increase the c_count of its parent, when + removing it from memory you must decrease the c_count. This makes + the code simpler, and the cases where it is suboptimal are truly + obscure. + */ + int c_count; + + /* plugin of node attached to this znode. NULL if znode is not + loaded. */ + node_plugin *nplug; + + /* version of znode data. This is increased on each modification. This + * is necessary to implement seals (see seal.[ch]) efficiently. */ + __u64 version; + + /* left delimiting key. Necessary to efficiently perform + balancing with node-level locking. Kept in memory only. */ + reiser4_key ld_key; + /* right delimiting key. */ + reiser4_key rd_key; + + /* znode's tree level */ + __u16 level; + /* number of items in this node. This field is modified by node + * plugin. */ + __u16 nr_items; + +#if REISER4_DEBUG + void *creator; + reiser4_key first_key; + unsigned long times_locked; + int left_version; /* when node->left was updated */ + int right_version; /* when node->right was updated */ + int ld_key_version; /* when node->ld_key was updated */ + int rd_key_version; /* when node->rd_key was updated */ +#endif + +} __attribute__ ((aligned(16))); + +ON_DEBUG(extern atomic_t delim_key_version; + ) + +/* In general I think these macros should not be exposed. */ +#define znode_is_locked(node) (lock_is_locked(&node->lock)) +#define znode_is_rlocked(node) (lock_is_rlocked(&node->lock)) +#define znode_is_wlocked(node) (lock_is_wlocked(&node->lock)) +#define znode_is_wlocked_once(node) (lock_is_wlocked_once(&node->lock)) +#define znode_can_be_rlocked(node) (lock_can_be_rlocked(&node->lock)) +#define is_lock_compatible(node, mode) (lock_mode_compatible(&node->lock, mode)) +/* Macros for accessing the znode state. */ +#define ZF_CLR(p,f) JF_CLR (ZJNODE(p), (f)) +#define ZF_ISSET(p,f) JF_ISSET(ZJNODE(p), (f)) +#define ZF_SET(p,f) JF_SET (ZJNODE(p), (f)) +extern znode *zget(reiser4_tree * tree, const reiser4_block_nr * const block, + znode * parent, tree_level level, gfp_t gfp_flag); +extern znode *zlook(reiser4_tree * tree, const reiser4_block_nr * const block); +extern int zload(znode * node); +extern int zload_ra(znode * node, ra_info_t * info); +extern int zinit_new(znode * node, gfp_t gfp_flags); +extern void zrelse(znode * node); +extern void znode_change_parent(znode * new_parent, reiser4_block_nr * block); +extern void znode_update_csum(znode *node); + +/* size of data in znode */ +static inline unsigned +znode_size(const znode * node UNUSED_ARG /* znode to query */ ) +{ + assert("nikita-1416", node != NULL); + return PAGE_SIZE; +} + +extern void parent_coord_to_coord(const parent_coord_t * pcoord, + coord_t * coord); +extern void coord_to_parent_coord(const coord_t * coord, + parent_coord_t * pcoord); +extern void init_parent_coord(parent_coord_t * pcoord, const znode * node); + +extern unsigned znode_free_space(znode * node); + +extern reiser4_key *znode_get_rd_key(znode * node); +extern reiser4_key *znode_get_ld_key(znode * node); + +extern reiser4_key *znode_set_rd_key(znode * node, const reiser4_key * key); +extern reiser4_key *znode_set_ld_key(znode * node, const reiser4_key * key); + +/* `connected' state checks */ +static inline int znode_is_right_connected(const znode * node) +{ + return ZF_ISSET(node, JNODE_RIGHT_CONNECTED); +} + +static inline int znode_is_left_connected(const znode * node) +{ + return ZF_ISSET(node, JNODE_LEFT_CONNECTED); +} + +static inline int znode_is_connected(const znode * node) +{ + return znode_is_right_connected(node) && znode_is_left_connected(node); +} + +extern int znode_shift_order; +extern int znode_rehash(znode * node, const reiser4_block_nr * new_block_nr); +extern void znode_remove(znode *, reiser4_tree *); +extern znode *znode_parent(const znode * node); +extern znode *znode_parent_nolock(const znode * node); +extern int znode_above_root(const znode * node); +extern int init_znode(jnode *node); +extern int init_znodes(void); +extern void done_znodes(void); +extern int znodes_tree_init(reiser4_tree * ztree); +extern void znodes_tree_done(reiser4_tree * ztree); +extern int znode_contains_key(znode * node, const reiser4_key * key); +extern int znode_contains_key_lock(znode * node, const reiser4_key * key); +extern unsigned znode_save_free_space(znode * node); +extern unsigned znode_recover_free_space(znode * node); +extern znode *zalloc(gfp_t gfp_flag); +extern void zinit(znode *, const znode * parent, reiser4_tree *); +extern int zparse(znode * node); + +extern int znode_just_created(const znode * node); + +extern void zfree(znode * node); + +#if REISER4_DEBUG +extern void print_znode(const char *prefix, const znode * node); +#else +#define print_znode( p, n ) noop +#endif + +/* Make it look like various znode functions exist instead of treating znodes as + jnodes in znode-specific code. */ +#define znode_page(x) jnode_page ( ZJNODE(x) ) +#define zdata(x) jdata ( ZJNODE(x) ) +#define znode_get_block(x) jnode_get_block ( ZJNODE(x) ) +#define znode_created(x) jnode_created ( ZJNODE(x) ) +#define znode_set_created(x) jnode_set_created ( ZJNODE(x) ) +#define znode_convertible(x) jnode_convertible (ZJNODE(x)) +#define znode_set_convertible(x) jnode_set_convertible (ZJNODE(x)) + +#define znode_is_dirty(x) jnode_is_dirty ( ZJNODE(x) ) +#define znode_check_dirty(x) jnode_check_dirty ( ZJNODE(x) ) +#define znode_make_clean(x) jnode_make_clean ( ZJNODE(x) ) +#define znode_set_block(x, b) jnode_set_block ( ZJNODE(x), (b) ) + +#define spin_lock_znode(x) spin_lock_jnode ( ZJNODE(x) ) +#define spin_unlock_znode(x) spin_unlock_jnode ( ZJNODE(x) ) +#define spin_trylock_znode(x) spin_trylock_jnode ( ZJNODE(x) ) +#define spin_znode_is_locked(x) spin_jnode_is_locked ( ZJNODE(x) ) +#define spin_znode_is_not_locked(x) spin_jnode_is_not_locked ( ZJNODE(x) ) + +#if REISER4_DEBUG +extern int znode_x_count_is_protected(const znode * node); +extern int znode_invariant(znode * node); +#endif + +/* acquire reference to @node */ +static inline znode *zref(znode * node) +{ + /* change of x_count from 0 to 1 is protected by tree spin-lock */ + return JZNODE(jref(ZJNODE(node))); +} + +/* release reference to @node */ +static inline void zput(znode * node) +{ + assert("nikita-3564", znode_invariant(node)); + jput(ZJNODE(node)); +} + +/* get the level field for a znode */ +static inline tree_level znode_get_level(const znode * node) +{ + return node->level; +} + +/* get the level field for a jnode */ +static inline tree_level jnode_get_level(const jnode * node) +{ + if (jnode_is_znode(node)) + return znode_get_level(JZNODE(node)); + else + /* unformatted nodes are all at the LEAF_LEVEL and for + "semi-formatted" nodes like bitmaps, level doesn't matter. */ + return LEAF_LEVEL; +} + +/* true if jnode is on leaf level */ +static inline int jnode_is_leaf(const jnode * node) +{ + if (jnode_is_znode(node)) + return (znode_get_level(JZNODE(node)) == LEAF_LEVEL); + if (jnode_get_type(node) == JNODE_UNFORMATTED_BLOCK) + return 1; + return 0; +} + +/* return znode's tree */ +static inline reiser4_tree *znode_get_tree(const znode * node) +{ + assert("nikita-2692", node != NULL); + return jnode_get_tree(ZJNODE(node)); +} + +/* resolve race with zput */ +static inline znode *znode_rip_check(reiser4_tree * tree, znode * node) +{ + jnode *j; + + j = jnode_rip_sync(tree, ZJNODE(node)); + if (likely(j != NULL)) + node = JZNODE(j); + else + node = NULL; + return node; +} + +#if defined(REISER4_DEBUG) +int znode_is_loaded(const znode * node /* znode to query */ ); +#endif + +extern __u64 znode_build_version(reiser4_tree * tree); + +/* Data-handles. A data handle object manages pairing calls to zload() and zrelse(). We + must load the data for a node in many places. We could do this by simply calling + zload() everywhere, the difficulty arises when we must release the loaded data by + calling zrelse. In a function with many possible error/return paths, it requires extra + work to figure out which exit paths must call zrelse and those which do not. The data + handle automatically calls zrelse for every zload that it is responsible for. In that + sense, it acts much like a lock_handle. +*/ +typedef struct load_count { + znode *node; + int d_ref; +} load_count; + +extern void init_load_count(load_count * lc); /* Initialize a load_count set the current node to NULL. */ +extern void done_load_count(load_count * dh); /* Finalize a load_count: call zrelse() if necessary */ +extern int incr_load_count_znode(load_count * dh, znode * node); /* Set the argument znode to the current node, call zload(). */ +extern int incr_load_count_jnode(load_count * dh, jnode * node); /* If the argument jnode is formatted, do the same as + * incr_load_count_znode, otherwise do nothing (unformatted nodes + * don't require zload/zrelse treatment). */ +extern void move_load_count(load_count * new, load_count * old); /* Move the contents of a load_count. Old handle is released. */ +extern void copy_load_count(load_count * new, load_count * old); /* Copy the contents of a load_count. Old handle remains held. */ + +/* Variable initializers for load_count. */ +#define INIT_LOAD_COUNT ( load_count * ){ .node = NULL, .d_ref = 0 } +#define INIT_LOAD_COUNT_NODE( n ) ( load_count ){ .node = ( n ), .d_ref = 0 } +/* A convenience macro for use in assertions or debug-only code, where loaded + data is only required to perform the debugging check. This macro + encapsulates an expression inside a pair of calls to zload()/zrelse(). */ +#define WITH_DATA( node, exp ) \ +({ \ + long __with_dh_result; \ + znode *__with_dh_node; \ + \ + __with_dh_node = ( node ); \ + __with_dh_result = zload( __with_dh_node ); \ + if( __with_dh_result == 0 ) { \ + __with_dh_result = ( long )( exp ); \ + zrelse( __with_dh_node ); \ + } \ + __with_dh_result; \ +}) + +/* Same as above, but accepts a return value in case zload fails. */ +#define WITH_DATA_RET( node, ret, exp ) \ +({ \ + int __with_dh_result; \ + znode *__with_dh_node; \ + \ + __with_dh_node = ( node ); \ + __with_dh_result = zload( __with_dh_node ); \ + if( __with_dh_result == 0 ) { \ + __with_dh_result = ( int )( exp ); \ + zrelse( __with_dh_node ); \ + } else \ + __with_dh_result = ( ret ); \ + __with_dh_result; \ +}) + +#define WITH_COORD(coord, exp) \ +({ \ + coord_t *__coord; \ + \ + __coord = (coord); \ + coord_clear_iplug(__coord); \ + WITH_DATA(__coord->node, exp); \ +}) + +#if REISER4_DEBUG +#define STORE_COUNTERS \ + reiser4_lock_cnt_info __entry_counters = \ + *reiser4_lock_counters() +#define CHECK_COUNTERS \ +ON_DEBUG_CONTEXT( \ +({ \ + __entry_counters.x_refs = reiser4_lock_counters() -> x_refs; \ + __entry_counters.t_refs = reiser4_lock_counters() -> t_refs; \ + __entry_counters.d_refs = reiser4_lock_counters() -> d_refs; \ + assert("nikita-2159", \ + !memcmp(&__entry_counters, reiser4_lock_counters(), \ + sizeof __entry_counters)); \ +}) ) + +#else +#define STORE_COUNTERS +#define CHECK_COUNTERS noop +#endif + +/* __ZNODE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6799ccf5c37e..b64c1f3258e9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -272,6 +272,8 @@ enum positive_aop_returns { struct page; struct address_space; struct writeback_control; +struct wb_writeback_work; +struct bdi_writeback; /* * Write life time hint values. @@ -1845,6 +1847,14 @@ struct super_operations { void (*umount_begin) (struct super_block *); void (*umount_end) (struct super_block *, int); + long (*writeback_inodes)(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all); + void (*sync_inodes) (struct super_block *sb, + struct writeback_control *wbc); + int (*show_options)(struct seq_file *, struct dentry *); int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); @@ -2613,6 +2623,13 @@ extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); extern int write_inode_now(struct inode *, int); +extern void writeback_skip_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb); +extern long generic_writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait_keep_errors(struct address_space *mapping); @@ -2872,6 +2889,8 @@ extern char *file_path(struct file *, char *, int); #include /* needed for stackable file system support */ +extern loff_t default_llseek_unlocked(struct file *file, loff_t offset, + int whence); extern loff_t default_llseek(struct file *file, loff_t offset, int whence); extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence); @@ -2954,6 +2973,8 @@ extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); +ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, + loff_t *ppos); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, diff --git a/include/linux/mm.h b/include/linux/mm.h index d436543d0da2..1f3df994812f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1447,6 +1447,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); +int set_page_dirty_notag(struct page *page); void cancel_dirty_page(struct page *page); int clear_page_dirty_for_io(struct page *page); diff --git a/include/linux/sched.h b/include/linux/sched.h index a9a33245887e..b4844adbf46f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1507,6 +1507,7 @@ extern struct pid *cad_pid; /* * Per process flags */ +#define PF_FLUSHER 0x00000001 /* responsible for disk writeback */ #define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* Getting shut down */ #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index e12d92808e98..9d47f389a1a0 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -16,6 +16,12 @@ struct bio; DECLARE_PER_CPU(int, dirty_throttle_leaks); +static inline int is_flush_bd_task(struct task_struct *task) +{ + return task->flags & PF_FLUSHER; +} +#define current_is_flush_bd_task() is_flush_bd_task(current) + /* * The 1/4 region under the global dirty thresh is for smooth dirty throttling: * @@ -179,6 +185,26 @@ static inline void wb_domain_size_changed(struct wb_domain *dom) spin_unlock(&dom->lock); } +/* + * Passed into wb_writeback(), essentially a subset of writeback_control + */ +struct wb_writeback_work { + long nr_pages; + struct super_block *sb; + unsigned long *older_than_this; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages:1; + unsigned int for_kupdate:1; + unsigned int range_cyclic:1; + unsigned int for_background:1; + unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ + unsigned int auto_free:1; /* free on completion */ + enum wb_reason reason; /* why was writeback initiated? */ + + struct list_head list; /* pending work list */ + struct wb_completion *done; /* set if the caller waits */ +}; + /* * fs/fs-writeback.c */ diff --git a/mm/filemap.c b/mm/filemap.c index 6529ee3f29a3..86413adbdae6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1690,6 +1690,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, return ret; } +EXPORT_SYMBOL(find_get_pages_range); /** * find_get_pages_contig - gang contiguous pagecache lookup diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0daa3446c4d7..0b87cc9658ed 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2661,6 +2661,35 @@ void account_page_redirty(struct page *page) } EXPORT_SYMBOL(account_page_redirty); +/* + * set_page_dirty_notag() -- similar to __set_page_dirty_nobuffers() + * except it doesn't tag the page dirty in the page-cache radix tree. + * This means that the address space using this cannot use the regular + * filemap ->writepages() helpers and must provide its own means of + * tracking and finding non-tagged dirty pages. + * + * NOTE: furthermore, this version also doesn't handle truncate races. + */ +int set_page_dirty_notag(struct page *page) +{ + struct address_space *mapping = page->mapping; + + lock_page_memcg(page); + if (!TestSetPageDirty(page)) { + unsigned long flags; + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); + local_irq_save(flags); + account_page_dirtied(page, mapping); + local_irq_restore(flags); + unlock_page_memcg(page); + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return 1; + } + unlock_page_memcg(page); + return 0; +} +EXPORT_SYMBOL(set_page_dirty_notag); + /* * When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via diff --git a/mm/vmscan.c b/mm/vmscan.c index 3ccf3b12ab4f..b74602c8a71e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3170,7 +3170,11 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, pg_data_t *last_pgdat; struct zoneref *z; struct zone *zone; + void *saved; retry: + saved = current->journal_info; /* save journal info */ + current->journal_info = NULL; + delayacct_freepages_start(); if (global_reclaim(sc)) @@ -3206,6 +3210,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, } delayacct_freepages_end(); + /* restore journal info */ + current->journal_info = saved; if (sc->nr_reclaimed) return sc->nr_reclaimed; From a20b46908a52c45b5213f2d10f0ddb277ff1d8b6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 20:56:58 +0300 Subject: [PATCH 280/439] configs: add cruel config Signed-off-by: Denis Efremov --- kernel/configs/cruel.conf | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 kernel/configs/cruel.conf diff --git a/kernel/configs/cruel.conf b/kernel/configs/cruel.conf new file mode 100644 index 000000000000..990328d291a3 --- /dev/null +++ b/kernel/configs/cruel.conf @@ -0,0 +1,34 @@ +# CONFIG_FIVE_PA_FEATURE is not set +# CONFIG_SEC_RESTRICT_ROOTING is not set +# CONFIG_SECURITY_DEFEX is not set +# CONFIG_PROCA is not set +# CONFIG_SECURITY_DSMS is not set +# CONFIG_KPERFMON is not set +# CONFIG_MALI_KUTF is not set +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_LOCALVERSION="-Cruel" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_MALI_GATOR_SUPPORT=y +CONFIG_SECURITY_SELINUX_SWITCH=y +# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set +# CONFIG_DTC is not set +# CONFIG_ALWAYS_ENFORCE is not set +# CONFIG_ALWAYS_PERMIT is not set +# CONFIG_RD_GZIP is not set +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_CRAMFS is not set +# CONFIG_MQ_IOSCHED_DEADLINE is not set +# CONFIG_MQ_IOSCHED_KYBER is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_CC_WERROR is not set +# CONFIG_EXYNOS_NPU_PUBLISH_NPU_BUILD_VER is not set +# CONFIG_VISION_UNITTEST is not set +# CONFIG_CRYPTO_TEST is not set From 3a0f7d140daefcfa7c81191d1aebfd2f88cc29a2 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 20:57:32 +0300 Subject: [PATCH 281/439] configs: add magisk config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel+magisk.conf | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 kernel/configs/cruel+magisk.conf diff --git a/kernel/configs/cruel+magisk.conf b/kernel/configs/cruel+magisk.conf new file mode 100644 index 000000000000..cdea053dc42d --- /dev/null +++ b/kernel/configs/cruel+magisk.conf @@ -0,0 +1,14 @@ +# CONFIG_INITRAMFS_SKIP is not set +CONFIG_INITRAMFS_FORCE=y +CONFIG_INITRAMFS_SOURCE="usr/magisk/initramfs_list" +CONFIG_INITRAMFS_ROOT_UID=0 +CONFIG_INITRAMFS_ROOT_GID=0 +CONFIG_INITRAMFS_COMPRESSION_NONE=y +CONFIG_INITRAMFS_COMPRESSION="" +# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set +# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set +# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set +# CONFIG_INITRAMFS_COMPRESSION_XZ is not set +# CONFIG_INITRAMFS_COMPRESSION_LZO is not set +# CONFIG_INITRAMFS_COMPRESSION_LZ4 is not set +CONFIG_PROC_MAGISK_HIDE_MOUNT=y From dc43fd4c92a292e9ee2af397f96234b435006b4e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 20:57:48 +0300 Subject: [PATCH 282/439] configs: add wireguard config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-wireguard.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-wireguard.conf diff --git a/kernel/configs/cruel-wireguard.conf b/kernel/configs/cruel-wireguard.conf new file mode 100644 index 000000000000..fb47f867aed7 --- /dev/null +++ b/kernel/configs/cruel-wireguard.conf @@ -0,0 +1,5 @@ +CONFIG_WIREGUARD=y +# CONFIG_WIREGUARD_DEBUG is not set +CONFIG_NET_UDP_TUNNEL=y +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set From 63b60a337ad242e0d18a71a30d482a2e61e2f3c5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 9 Feb 2020 23:00:36 +0300 Subject: [PATCH 283/439] configs: add nohardening config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel+nohardening.conf | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 kernel/configs/cruel+nohardening.conf diff --git a/kernel/configs/cruel+nohardening.conf b/kernel/configs/cruel+nohardening.conf new file mode 100644 index 000000000000..f500404a35d2 --- /dev/null +++ b/kernel/configs/cruel+nohardening.conf @@ -0,0 +1,6 @@ +# CONFIG_LOD_SEC is not set +# CONFIG_UH is not set +# CONFIG_UH_LKMAUTH is not set +# CONFIG_UH_LKM_BLOCK is not set +# CONFIG_RKP_CFP is not set +# CONFIG_RKP_CFP_JOPP is not set From a3baa7919f2cc5939847e4f14fcf46db045d96e5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 24 Jan 2020 18:29:05 +0300 Subject: [PATCH 284/439] configs: add 1000hz 300hz 100hz 50hz 25hz config presets Signed-off-by: Denis Efremov --- kernel/configs/cruel-1000hz.conf | 3 +++ kernel/configs/cruel-100hz.conf | 3 +++ kernel/configs/cruel-25hz.conf | 3 +++ kernel/configs/cruel-300hz.conf | 3 +++ kernel/configs/cruel-50hz.conf | 3 +++ 5 files changed, 15 insertions(+) create mode 100644 kernel/configs/cruel-1000hz.conf create mode 100644 kernel/configs/cruel-100hz.conf create mode 100644 kernel/configs/cruel-25hz.conf create mode 100644 kernel/configs/cruel-300hz.conf create mode 100644 kernel/configs/cruel-50hz.conf diff --git a/kernel/configs/cruel-1000hz.conf b/kernel/configs/cruel-1000hz.conf new file mode 100644 index 000000000000..23b4b5cac3aa --- /dev/null +++ b/kernel/configs/cruel-1000hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 diff --git a/kernel/configs/cruel-100hz.conf b/kernel/configs/cruel-100hz.conf new file mode 100644 index 000000000000..04f36746f90a --- /dev/null +++ b/kernel/configs/cruel-100hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_100=y +CONFIG_HZ=100 diff --git a/kernel/configs/cruel-25hz.conf b/kernel/configs/cruel-25hz.conf new file mode 100644 index 000000000000..c93bc310d298 --- /dev/null +++ b/kernel/configs/cruel-25hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_25=y +CONFIG_HZ=25 diff --git a/kernel/configs/cruel-300hz.conf b/kernel/configs/cruel-300hz.conf new file mode 100644 index 000000000000..ecc39ed471c2 --- /dev/null +++ b/kernel/configs/cruel-300hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_300=y +CONFIG_HZ=300 diff --git a/kernel/configs/cruel-50hz.conf b/kernel/configs/cruel-50hz.conf new file mode 100644 index 000000000000..030d291776ee --- /dev/null +++ b/kernel/configs/cruel-50hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_50=y +CONFIG_HZ=50 From 93e651ad28be03f5588fcacd2aeaaa9ae63767a2 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 24 Jan 2020 18:29:29 +0300 Subject: [PATCH 285/439] configs: add nohardening2 config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-nohardening2.conf | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 kernel/configs/cruel-nohardening2.conf diff --git a/kernel/configs/cruel-nohardening2.conf b/kernel/configs/cruel-nohardening2.conf new file mode 100644 index 000000000000..7e86529ef234 --- /dev/null +++ b/kernel/configs/cruel-nohardening2.conf @@ -0,0 +1,10 @@ +CONFIG_COMPAT_BRK=y +# CONFIG_VMAP_STACK is not set +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +# CONFIG_ARM64_SSBD is not set +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_CC_STACKPROTECTOR_NONE=y +# CONFIG_CC_STACKPROTECTOR_STRONG is not set +# CONFIG_REFCOUNT_FULL is not set From 3ff967efa34a449b43c44d2ed169abb857f41863 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 6 Feb 2020 00:31:52 +0300 Subject: [PATCH 286/439] configs: add iptables ttl/hop config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-ttl.conf | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 kernel/configs/cruel-ttl.conf diff --git a/kernel/configs/cruel-ttl.conf b/kernel/configs/cruel-ttl.conf new file mode 100644 index 000000000000..82ac544db198 --- /dev/null +++ b/kernel/configs/cruel-ttl.conf @@ -0,0 +1,4 @@ +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_TARGET_HL=y From e7970de6d2e2cec6a2a093b72176814ec7715ccf Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 9 Feb 2020 01:27:58 +0300 Subject: [PATCH 287/439] configs: add cifs config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-cifs.conf | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 kernel/configs/cruel-cifs.conf diff --git a/kernel/configs/cruel-cifs.conf b/kernel/configs/cruel-cifs.conf new file mode 100644 index 000000000000..ce2b95933bd4 --- /dev/null +++ b/kernel/configs/cruel-cifs.conf @@ -0,0 +1,16 @@ +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_CIFS=y +CONFIG_CIFS_STATS=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +# CONFIG_CIFS_DEBUG is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_SMB311 is not set +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_MD4=y From 40c9f2f33eb1382dd0a73a08931c144f6f8e3d98 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 12 Feb 2020 14:33:56 +0300 Subject: [PATCH 288/439] configs: add nodebug config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-nodebug.conf | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 kernel/configs/cruel-nodebug.conf diff --git a/kernel/configs/cruel-nodebug.conf b/kernel/configs/cruel-nodebug.conf new file mode 100644 index 000000000000..71fa3d6c46a7 --- /dev/null +++ b/kernel/configs/cruel-nodebug.conf @@ -0,0 +1,25 @@ +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_DEBUG_INFO is not set +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_BLOCK_SUPPORT_STLOG is not set +# CONFIG_BT_DEBUGFS is not set +# CONFIG_USB_DEBUG_DETAILED_LOG is not set +# CONFIG_MMC_SUPPORT_STLOG is not set +# CONFIG_ION_DEBUG_EVENT_RECORD is not set +# CONFIG_SEC_DEBUG_TSP_LOG is not set +CONFIG_MCPS_DEBUG_PRINTK=4 +# CONFIG_MCPS_DEBUG is not set +# CONFIG_EXYNOS_CORESIGHT is not set +# CONFIG_EXYNOS_DEBUG_TEST is not set +# CONFIG_SEC_BOOTSTAT is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_SDFAT_DEBUG is not set +# CONFIG_SDFAT_DBG_MSG is not set +# CONFIG_PMUCAL_DBG is not set From 1edacb14893dddd37720ad14b3fe81172cc0d021 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 28 Feb 2020 02:16:07 +0300 Subject: [PATCH 289/439] configs: add cpu shedulers config presets Signed-off-by: Denis Efremov --- kernel/configs/cruel-sched_conservative.conf | 2 ++ kernel/configs/cruel-sched_ondemand.conf | 2 ++ kernel/configs/cruel-sched_performance.conf | 1 + kernel/configs/cruel-sched_powersave.conf | 1 + kernel/configs/cruel-sched_userspace.conf | 1 + 5 files changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-sched_conservative.conf create mode 100644 kernel/configs/cruel-sched_ondemand.conf create mode 100644 kernel/configs/cruel-sched_performance.conf create mode 100644 kernel/configs/cruel-sched_powersave.conf create mode 100644 kernel/configs/cruel-sched_userspace.conf diff --git a/kernel/configs/cruel-sched_conservative.conf b/kernel/configs/cruel-sched_conservative.conf new file mode 100644 index 000000000000..55255be2eac5 --- /dev/null +++ b/kernel/configs/cruel-sched_conservative.conf @@ -0,0 +1,2 @@ +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y diff --git a/kernel/configs/cruel-sched_ondemand.conf b/kernel/configs/cruel-sched_ondemand.conf new file mode 100644 index 000000000000..4c5675beabdd --- /dev/null +++ b/kernel/configs/cruel-sched_ondemand.conf @@ -0,0 +1,2 @@ +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y diff --git a/kernel/configs/cruel-sched_performance.conf b/kernel/configs/cruel-sched_performance.conf new file mode 100644 index 000000000000..2bf548fc409a --- /dev/null +++ b/kernel/configs/cruel-sched_performance.conf @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y diff --git a/kernel/configs/cruel-sched_powersave.conf b/kernel/configs/cruel-sched_powersave.conf new file mode 100644 index 000000000000..a22c379626bb --- /dev/null +++ b/kernel/configs/cruel-sched_powersave.conf @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_POWERSAVE=y diff --git a/kernel/configs/cruel-sched_userspace.conf b/kernel/configs/cruel-sched_userspace.conf new file mode 100644 index 000000000000..8c1bc6848c91 --- /dev/null +++ b/kernel/configs/cruel-sched_userspace.conf @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_USERSPACE=y From acb7dc45d194538e7d09568f761a5cb74a9a7925 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 6 Mar 2020 14:03:46 +0300 Subject: [PATCH 290/439] configs: add boeffla_wl_blocker config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-boeffla_wl_blocker.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-boeffla_wl_blocker.conf diff --git a/kernel/configs/cruel-boeffla_wl_blocker.conf b/kernel/configs/cruel-boeffla_wl_blocker.conf new file mode 100644 index 000000000000..857797270df1 --- /dev/null +++ b/kernel/configs/cruel-boeffla_wl_blocker.conf @@ -0,0 +1 @@ +CONFIG_BOEFFLA_WL_BLOCKER=y From c7ef97c20ef6925a9b2580bd425e3af7906dba0b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 11 Mar 2020 00:20:41 +0300 Subject: [PATCH 291/439] configs: add size config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-size.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-size.conf diff --git a/kernel/configs/cruel-size.conf b/kernel/configs/cruel-size.conf new file mode 100644 index 000000000000..3c8db587fa3d --- /dev/null +++ b/kernel/configs/cruel-size.conf @@ -0,0 +1,3 @@ +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_03 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y From 6be78d64b318030f49881cc46f8e1b49e2da487a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 17 Jun 2020 01:23:52 +0300 Subject: [PATCH 292/439] configs: add performance config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-performance.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-performance.conf diff --git a/kernel/configs/cruel-performance.conf b/kernel/configs/cruel-performance.conf new file mode 100644 index 000000000000..a1362236d27d --- /dev/null +++ b/kernel/configs/cruel-performance.conf @@ -0,0 +1,3 @@ +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y From ee82318da8e200c114650728648f1df426fe7ad4 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 11 Mar 2020 00:33:57 +0300 Subject: [PATCH 293/439] configs: add nomodules config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-nomodules.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-nomodules.conf diff --git a/kernel/configs/cruel-nomodules.conf b/kernel/configs/cruel-nomodules.conf new file mode 100644 index 000000000000..d85fc9557b48 --- /dev/null +++ b/kernel/configs/cruel-nomodules.conf @@ -0,0 +1,2 @@ +# CONFIG_MODULES is not set +# CONFIG_KALLSYMS is not set From 8974dc769f7ab9acd6a6cbe80aec76b09995f7f2 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 11 Mar 2020 00:28:51 +0300 Subject: [PATCH 294/439] configs: add noksm config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-noksm.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-noksm.conf diff --git a/kernel/configs/cruel-noksm.conf b/kernel/configs/cruel-noksm.conf new file mode 100644 index 000000000000..9671029ed687 --- /dev/null +++ b/kernel/configs/cruel-noksm.conf @@ -0,0 +1 @@ +# CONFIG_KSM is not set From 7dc2c3a0620a9f3d1327ee27d9f5650dbd84387f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 11 Mar 2020 11:19:55 +0300 Subject: [PATCH 295/439] configs: add mass_storage config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-mass_storage.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-mass_storage.conf diff --git a/kernel/configs/cruel-mass_storage.conf b/kernel/configs/cruel-mass_storage.conf new file mode 100644 index 000000000000..1c1f2a38b948 --- /dev/null +++ b/kernel/configs/cruel-mass_storage.conf @@ -0,0 +1,2 @@ +CONFIG_USB_F_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y From 34b9e9e5b3601fac488459ec5ea997c09ef53687 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 11 Mar 2020 17:57:36 +0300 Subject: [PATCH 296/439] configs: add noaudit config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-noaudit.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-noaudit.conf diff --git a/kernel/configs/cruel-noaudit.conf b/kernel/configs/cruel-noaudit.conf new file mode 100644 index 000000000000..29121d528fd5 --- /dev/null +++ b/kernel/configs/cruel-noaudit.conf @@ -0,0 +1 @@ +# CONFIG_AUDIT is not set From bbc7175cf58839e2fd6047f78f769735d75d0b7b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 18 Mar 2020 08:50:37 +0300 Subject: [PATCH 297/439] configs: add always_enforce config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-always_enforce.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-always_enforce.conf diff --git a/kernel/configs/cruel-always_enforce.conf b/kernel/configs/cruel-always_enforce.conf new file mode 100644 index 000000000000..8708edc51348 --- /dev/null +++ b/kernel/configs/cruel-always_enforce.conf @@ -0,0 +1,3 @@ +# CONFIG_SECURITY_SELINUX_SWITCH is not set +CONFIG_ALWAYS_ENFORCE=y +# CONFIG_ALWAYS_PERMIT is not set From 538ec407b79dcac0b688fe66c2a7acb0192d51fb Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 18 Mar 2020 08:51:09 +0300 Subject: [PATCH 298/439] configs: add always_permit config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-always_permit.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-always_permit.conf diff --git a/kernel/configs/cruel-always_permit.conf b/kernel/configs/cruel-always_permit.conf new file mode 100644 index 000000000000..ca7b9c6277d0 --- /dev/null +++ b/kernel/configs/cruel-always_permit.conf @@ -0,0 +1,3 @@ +# CONFIG_SECURITY_SELINUX_SWITCH is not set +# CONFIG_ALWAYS_ENFORCE is not set +CONFIG_ALWAYS_PERMIT=y From 6e8d4dbf42eac56684e2115c919d7f45b202cb57 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 00:45:20 +0300 Subject: [PATCH 299/439] configs: add sdfat config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-sdfat.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-sdfat.conf diff --git a/kernel/configs/cruel-sdfat.conf b/kernel/configs/cruel-sdfat.conf new file mode 100644 index 000000000000..dba44f81a750 --- /dev/null +++ b/kernel/configs/cruel-sdfat.conf @@ -0,0 +1,3 @@ +# CONFIG_VFAT_FS is not set +CONFIG_SDFAT_USE_FOR_EXFAT=y +CONFIG_SDFAT_USE_FOR_VFAT=y From 73c4cae4822d25bc7794c5e73b9568f9c0ee581d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Mar 2020 01:24:52 +0300 Subject: [PATCH 300/439] configs: add ntfs config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-ntfs.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-ntfs.conf diff --git a/kernel/configs/cruel-ntfs.conf b/kernel/configs/cruel-ntfs.conf new file mode 100644 index 000000000000..fa7071455a34 --- /dev/null +++ b/kernel/configs/cruel-ntfs.conf @@ -0,0 +1,3 @@ +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y From 01a55fadb9a15bc6c868a01dd7e8fe3fc3f0763a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 22 Mar 2020 10:29:34 +0300 Subject: [PATCH 301/439] configs: add morosound config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-morosound.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-morosound.conf diff --git a/kernel/configs/cruel-morosound.conf b/kernel/configs/cruel-morosound.conf new file mode 100644 index 000000000000..0fc41b8faba9 --- /dev/null +++ b/kernel/configs/cruel-morosound.conf @@ -0,0 +1 @@ +CONFIG_MORO_SOUND=y From 4f1fcc75bfb8b9c9d7daab5742acd65c33c0bf6e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 26 Feb 2020 19:27:37 +0300 Subject: [PATCH 302/439] configs: add io_bfq config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_bfq.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-io_bfq.conf diff --git a/kernel/configs/cruel-io_bfq.conf b/kernel/configs/cruel-io_bfq.conf new file mode 100644 index 000000000000..3bcadd8221fb --- /dev/null +++ b/kernel/configs/cruel-io_bfq.conf @@ -0,0 +1,2 @@ +CONFIG_IOSCHED_BFQ=y +CONFIG_SCSI_MQ_DEFAULT=y From 25bd9c434ffb8d26b89fa1a0c14113fa3aac8a8e Mon Sep 17 00:00:00 2001 From: Angheloaia Victor Date: Thu, 26 Mar 2020 21:33:44 +0200 Subject: [PATCH 303/439] configs: add io_maple config preset --- kernel/configs/cruel-io_maple.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_maple.conf diff --git a/kernel/configs/cruel-io_maple.conf b/kernel/configs/cruel-io_maple.conf new file mode 100644 index 000000000000..d65920d99764 --- /dev/null +++ b/kernel/configs/cruel-io_maple.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_MAPLE=y +CONFIG_DEFAULT_MAPLE=y +CONFIG_DEFAULT_IOSCHED="maple" From cdc8ac2e2e6519815c2fe12fafd570a2e5c05c72 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 5 Apr 2020 16:55:37 +0300 Subject: [PATCH 304/439] configs: add io_fiops config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_fiops.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_fiops.conf diff --git a/kernel/configs/cruel-io_fiops.conf b/kernel/configs/cruel-io_fiops.conf new file mode 100644 index 000000000000..30100ef51662 --- /dev/null +++ b/kernel/configs/cruel-io_fiops.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_FIOPS=y +CONFIG_DEFAULT_FIOPS=y +CONFIG_DEFAULT_IOSCHED="fiops" From 35691ece4302a5134b684e9f1bed54e2359bb3d9 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 6 Apr 2020 10:30:07 +0300 Subject: [PATCH 305/439] configs: add io_sio config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_sio.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_sio.conf diff --git a/kernel/configs/cruel-io_sio.conf b/kernel/configs/cruel-io_sio.conf new file mode 100644 index 000000000000..e72f3ffb79bb --- /dev/null +++ b/kernel/configs/cruel-io_sio.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_SIO=y +CONFIG_DEFAULT_SIO=y +CONFIG_DEFAULT_IOSCHED="sio" From 2ce826d3f31e112378e2145dccb7909b76a8e437 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 6 Apr 2020 10:35:42 +0300 Subject: [PATCH 306/439] configs: add io_zen config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_zen.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_zen.conf diff --git a/kernel/configs/cruel-io_zen.conf b/kernel/configs/cruel-io_zen.conf new file mode 100644 index 000000000000..8cbee0c618ce --- /dev/null +++ b/kernel/configs/cruel-io_zen.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +CONFIG_IOSCHED_ZEN=y +CONFIG_DEFAULT_ZEN=y +CONFIG_DEFAULT_IOSCHED="zen" From 1d84e81838537b0fb9227efae894c8ab0d949a27 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Apr 2020 13:25:47 +0300 Subject: [PATCH 307/439] configs: add io_noop config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_noop.conf | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 kernel/configs/cruel-io_noop.conf diff --git a/kernel/configs/cruel-io_noop.conf b/kernel/configs/cruel-io_noop.conf new file mode 100644 index 000000000000..611ce52818fc --- /dev/null +++ b/kernel/configs/cruel-io_noop.conf @@ -0,0 +1,8 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" From 03e175782f2d0188a4b25c06890d8fc97fb5db6b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 9 Apr 2020 00:59:53 +0300 Subject: [PATCH 308/439] configs: add io_anxiety config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_anxiety.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_anxiety.conf diff --git a/kernel/configs/cruel-io_anxiety.conf b/kernel/configs/cruel-io_anxiety.conf new file mode 100644 index 000000000000..5d3f9d30aad5 --- /dev/null +++ b/kernel/configs/cruel-io_anxiety.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_ANXIETY=y +CONFIG_DEFAULT_ANXIETY=y +CONFIG_DEFAULT_IOSCHED="anxiety" From b563c71c00a3cac48f82230261e143b90a214e1b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 6 Apr 2020 11:18:34 +0300 Subject: [PATCH 309/439] configs: add io_cfq config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-io_cfq.conf | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 kernel/configs/cruel-io_cfq.conf diff --git a/kernel/configs/cruel-io_cfq.conf b/kernel/configs/cruel-io_cfq.conf new file mode 100644 index 000000000000..1034c22eb0da --- /dev/null +++ b/kernel/configs/cruel-io_cfq.conf @@ -0,0 +1,8 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_DEFAULT_CFQ=y +CONFIG_DEFAULT_IOSCHED="cfq" From 7e1c85e5045925b83d8f27611f26af3bd32fda62 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 23 Mar 2020 21:26:54 +0300 Subject: [PATCH 310/439] configs: add tcp_window_64k config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-tcp_window_64k.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-tcp_window_64k.conf diff --git a/kernel/configs/cruel-tcp_window_64k.conf b/kernel/configs/cruel-tcp_window_64k.conf new file mode 100644 index 000000000000..abb2ceab19a4 --- /dev/null +++ b/kernel/configs/cruel-tcp_window_64k.conf @@ -0,0 +1 @@ +CONFIG_LARGE_TCP_INITIAL_BUFFER=y From 9b0e9b79fe92281c9bc99bfcf36b374f581c6433 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 23 Mar 2020 15:52:56 +0300 Subject: [PATCH 311/439] configs: add tcp_cubic config preset Thanks, @HRTKernel! Signed-off-by: Denis Efremov --- kernel/configs/cruel-tcp_cubic.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_cubic.conf diff --git a/kernel/configs/cruel-tcp_cubic.conf b/kernel/configs/cruel-tcp_cubic.conf new file mode 100644 index 000000000000..de9045bf0279 --- /dev/null +++ b/kernel/configs/cruel-tcp_cubic.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" From d6ae79025e1bc48fa9646b9b16085cda8d33ff3e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 16:40:41 +0300 Subject: [PATCH 312/439] configs: add tcp_westwood config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-tcp_westwood.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_westwood.conf diff --git a/kernel/configs/cruel-tcp_westwood.conf b/kernel/configs/cruel-tcp_westwood.conf new file mode 100644 index 000000000000..87a68f70fd06 --- /dev/null +++ b/kernel/configs/cruel-tcp_westwood.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_TCP_CONG_WESTWOOD=y +CONFIG_DEFAULT_WESTWOOD=y +CONFIG_DEFAULT_TCP_CONG="westwood" From ca896a73b3a977e25c436f5457e3bb140888db8b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 7 Apr 2020 23:05:42 +0300 Subject: [PATCH 313/439] configs: add tcp_htcp config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-tcp_htcp.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_htcp.conf diff --git a/kernel/configs/cruel-tcp_htcp.conf b/kernel/configs/cruel-tcp_htcp.conf new file mode 100644 index 000000000000..20c422940245 --- /dev/null +++ b/kernel/configs/cruel-tcp_htcp.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_TCP_CONG_HTCP=y +CONFIG_DEFAULT_HTCP=y +CONFIG_DEFAULT_TCP_CONG="htcp" From a3de74ced1781b730619e76db1eb36ad45c03498 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 7 Apr 2020 23:05:04 +0300 Subject: [PATCH 314/439] configs: add tcp_bic config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-tcp_bic.conf | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 kernel/configs/cruel-tcp_bic.conf diff --git a/kernel/configs/cruel-tcp_bic.conf b/kernel/configs/cruel-tcp_bic.conf new file mode 100644 index 000000000000..f93b8bfdb873 --- /dev/null +++ b/kernel/configs/cruel-tcp_bic.conf @@ -0,0 +1,6 @@ +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_DEFAULT_BIC=y +CONFIG_DEFAULT_TCP_CONG="bic" From 9678617b9e6434abce1d75c2e449884dfbf47aa8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Apr 2020 13:03:16 +0300 Subject: [PATCH 315/439] configs: add tcp_bbr config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-tcp_bbr.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_bbr.conf diff --git a/kernel/configs/cruel-tcp_bbr.conf b/kernel/configs/cruel-tcp_bbr.conf new file mode 100644 index 000000000000..297c6ebfb177 --- /dev/null +++ b/kernel/configs/cruel-tcp_bbr.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +CONFIG_TCP_CONG_BBR=y +CONFIG_DEFAULT_BBR=y +CONFIG_DEFAULT_TCP_CONG="bbr" From 0fc035a0f3e1e19cfa51cef5351359d412e7ebd5 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 26 Mar 2020 15:49:10 +0300 Subject: [PATCH 316/439] configs: add noswap config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-noswap.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-noswap.conf diff --git a/kernel/configs/cruel-noswap.conf b/kernel/configs/cruel-noswap.conf new file mode 100644 index 000000000000..63b4815455fa --- /dev/null +++ b/kernel/configs/cruel-noswap.conf @@ -0,0 +1 @@ +# CONFIG_SWAP is not set From 2619da545367f1c24fb2497e25cd14f88d28315d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 26 Mar 2020 15:51:28 +0300 Subject: [PATCH 317/439] configs: add nozram config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-nozram.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-nozram.conf diff --git a/kernel/configs/cruel-nozram.conf b/kernel/configs/cruel-nozram.conf new file mode 100644 index 000000000000..00f4887473d3 --- /dev/null +++ b/kernel/configs/cruel-nozram.conf @@ -0,0 +1 @@ +# CONFIG_ZRAM is not set From 8c1977546f02af932373e963b33afcc1c3c56ae1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 12 Apr 2020 09:21:49 +0300 Subject: [PATCH 318/439] configs: add noatime config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-noatime.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-noatime.conf diff --git a/kernel/configs/cruel-noatime.conf b/kernel/configs/cruel-noatime.conf new file mode 100644 index 000000000000..e3337606de23 --- /dev/null +++ b/kernel/configs/cruel-noatime.conf @@ -0,0 +1,2 @@ +CONFIG_DEFAULT_MNT_NOATIME=y +# CONFIG_DEFAULT_MNT_RELATIME is not set From 5fe1e535e8507f26e3bc8ede95b36a84ee2c4c9b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Mar 2020 21:08:48 +0300 Subject: [PATCH 319/439] configs: add kexec config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-kexec.conf | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 kernel/configs/cruel-kexec.conf diff --git a/kernel/configs/cruel-kexec.conf b/kernel/configs/cruel-kexec.conf new file mode 100644 index 000000000000..ee65c9d712a9 --- /dev/null +++ b/kernel/configs/cruel-kexec.conf @@ -0,0 +1,4 @@ +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC=y +CONFIG_PROC_KCORE=y From 19710ecb3c4b29cfefb4ff537964401c9f7acebe Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 26 Apr 2020 17:38:01 +0300 Subject: [PATCH 320/439] configs: add kali config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-kali.conf | 122 +++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 kernel/configs/cruel-kali.conf diff --git a/kernel/configs/cruel-kali.conf b/kernel/configs/cruel-kali.conf new file mode 100644 index 000000000000..150a300ccab1 --- /dev/null +++ b/kernel/configs/cruel-kali.conf @@ -0,0 +1,122 @@ +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +CONFIG_SYSVIPC_COMPAT=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_BT_INTEL=y +CONFIG_BT_BCM=y +CONFIG_BT_RTL=y +CONFIG_BT_HCIBTUSB=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_ATH3K is not set +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +CONFIG_NL80211_TESTMODE=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=y +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_MINSTREL_VHT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_EEPROM_93CX6=y +CONFIG_USB_NET_RNDIS_HOST=y +# CONFIG_ADM8211 is not set +CONFIG_ATH_COMMON=y +# CONFIG_ATH5K is not set +CONFIG_ATH9K_HW=y +CONFIG_ATH9K_COMMON=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +# CONFIG_ATH9K is not set +CONFIG_ATH9K_HTC=y +# CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_CARL9170=y +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +# CONFIG_CARL9170_HWRNG is not set +CONFIG_ATH6KL=y +# CONFIG_ATH6KL_SDIO is not set +CONFIG_ATH6KL_USB=y +# CONFIG_ATH6KL_DEBUG is not set +# CONFIG_ATH6KL_TRACING is not set +# CONFIG_AR5523 is not set +# CONFIG_ATH10K is not set +# CONFIG_WCN36XX is not set +CONFIG_AT76C50X_USB=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +# CONFIG_BRCMSMAC is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +# CONFIG_IWLWIFI is not set +# CONFIG_P54_COMMON is not set +# CONFIG_LIBERTAS_THINFIRM is not set +# CONFIG_MWL8K is not set +# CONFIG_MT7601U is not set +CONFIG_RT2X00=y +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +# CONFIG_RT2800PCI is not set +CONFIG_RT2500USB=y +CONFIG_RT73USB=y +CONFIG_RT2800USB=y +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=y +CONFIG_RT2X00_LIB_USB=y +CONFIG_RT2X00_LIB=y +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +# CONFIG_RTL8180 is not set +CONFIG_RTL8187=y +CONFIG_RTL8187_LEDS=y +CONFIG_RTL_CARDS=y +# CONFIG_RTL8192CE is not set +# CONFIG_RTL8192SE is not set +# CONFIG_RTL8192DE is not set +# CONFIG_RTL8723AE is not set +# CONFIG_RTL8723BE is not set +# CONFIG_RTL8188EE is not set +# CONFIG_RTL8192EE is not set +# CONFIG_RTL8821AE is not set +# CONFIG_RTL8192CU is not set +CONFIG_RTL8XXXU=y +CONFIG_RTL8XXXU_UNTESTED=y +# CONFIG_RSI_91X is not set +# CONFIG_CW1200 is not set +# CONFIG_WL1251 is not set +# CONFIG_WL12XX is not set +# CONFIG_WL18XX is not set +# CONFIG_WLCORE is not set +CONFIG_USB_ZD1201=y +CONFIG_ZD1211RW=y +# CONFIG_ZD1211RW_DEBUG is not set +# CONFIG_MAC80211_HWSIM is not set +CONFIG_USB_NET_RNDIS_WLAN=y +CONFIG_USB_F_SERIAL=y +CONFIG_USB_F_HID=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_F_HID=y +# CONFIG_R8822BE is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set +CONFIG_CRYPTO_CCM=y +CONFIG_CRC_ITU_T=y From d54dc7a7a8bfd7db09a2a239234454896ecb7501 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 4 Aug 2020 10:39:02 +0300 Subject: [PATCH 321/439] configs: add usb_serial config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-usb_serial.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-usb_serial.conf diff --git a/kernel/configs/cruel-usb_serial.conf b/kernel/configs/cruel-usb_serial.conf new file mode 100644 index 000000000000..2d57ea529a29 --- /dev/null +++ b/kernel/configs/cruel-usb_serial.conf @@ -0,0 +1,5 @@ +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y + +# natively support nodemcu/arduino serial console +CONFIG_USB_SERIAL_CP210X=y From 64fa5b19404aa156f32f3f8cbd7845cea4695354 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 8 Sep 2020 14:12:47 +0300 Subject: [PATCH 322/439] configs: add faultinjection config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-faultinjection.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-faultinjection.conf diff --git a/kernel/configs/cruel-faultinjection.conf b/kernel/configs/cruel-faultinjection.conf new file mode 100644 index 000000000000..5079fed6c2e5 --- /dev/null +++ b/kernel/configs/cruel-faultinjection.conf @@ -0,0 +1,9 @@ +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_MMC_REQUEST is not set +CONFIG_FAIL_FUTEX=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set From 07198c86eaaa9cadd11a1a94dcb9f37e97f94b13 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 8 Sep 2020 14:13:09 +0300 Subject: [PATCH 323/439] configs: add debug config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-debug.conf | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 kernel/configs/cruel-debug.conf diff --git a/kernel/configs/cruel-debug.conf b/kernel/configs/cruel-debug.conf new file mode 100644 index 000000000000..2e4b02f6f42c --- /dev/null +++ b/kernel/configs/cruel-debug.conf @@ -0,0 +1,18 @@ +CONFIG_SEC_DEBUG_SPINBUG_PANIC=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_VMACACHE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=140 +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=y +CONFIG_LOCKDEP=y +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_TRACE_IRQFLAGS=y +CONFIG_PROVE_RCU=y +CONFIG_RCU_CPU_STALL_TIMEOUT=100 +CONFIG_DEBUG_SNAPSHOT_SPINLOCK=y +CONFIG_FORTIFY_SOURCE=y From 8d4469c2cbdc2ca4c5dad76cf4c3177f85c9367a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 22 Oct 2020 18:07:10 +0300 Subject: [PATCH 324/439] configs: add gcov config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-gcov.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-gcov.conf diff --git a/kernel/configs/cruel-gcov.conf b/kernel/configs/cruel-gcov.conf new file mode 100644 index 000000000000..dd732654c0fc --- /dev/null +++ b/kernel/configs/cruel-gcov.conf @@ -0,0 +1,7 @@ +CONFIG_CONSTRUCTORS=y +CONFIG_GCOV_KERNEL=y +CONFIG_GCOV_PROFILE_ALL=y +CONFIG_GCOV_FORMAT_AUTODETECT=y +# CONFIG_GCOV_FORMAT_3_4 is not set +# CONFIG_GCOV_FORMAT_4_7 is not set +CONFIG_DEBUG_FS=y From ab3e3656030ac20a03f9e599ba4a9850fd77f6c7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 10 Sep 2020 02:03:26 +0300 Subject: [PATCH 325/439] configs: add lto config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-lto.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-lto.conf diff --git a/kernel/configs/cruel-lto.conf b/kernel/configs/cruel-lto.conf new file mode 100644 index 000000000000..6c5a1661d842 --- /dev/null +++ b/kernel/configs/cruel-lto.conf @@ -0,0 +1,5 @@ +CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y +CONFIG_LTO=y +# CONFIG_LTO_NONE is not set +CONFIG_LTO_CLANG=y +# CONFIG_CFI_CLANG is not set From 1efab63cbd2edb8ceae6ef8abe5ad6993676bea9 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 2 Oct 2020 21:50:51 +0300 Subject: [PATCH 326/439] configs: add dtb config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-dtb.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-dtb.conf diff --git a/kernel/configs/cruel-dtb.conf b/kernel/configs/cruel-dtb.conf new file mode 100644 index 000000000000..64dbfdd901d7 --- /dev/null +++ b/kernel/configs/cruel-dtb.conf @@ -0,0 +1 @@ +CONFIG_DTC=y From 3bc7119dd12707441955ca8aee3e7f0c6265ada7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 2 Oct 2020 22:02:41 +0300 Subject: [PATCH 327/439] configs: add kvm config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-kvm.conf | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 kernel/configs/cruel-kvm.conf diff --git a/kernel/configs/cruel-kvm.conf b/kernel/configs/cruel-kvm.conf new file mode 100644 index 000000000000..3acf5655b916 --- /dev/null +++ b/kernel/configs/cruel-kvm.conf @@ -0,0 +1,21 @@ +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_MMU_NOTIFIER=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_ARM64_VHE=y +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set From e4331373c69a4e724cb4b911d7a7e1c06d8bd4b8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 6 Oct 2020 18:18:05 +0300 Subject: [PATCH 328/439] configs: add simple_lmk config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-simple_lmk.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-simple_lmk.conf diff --git a/kernel/configs/cruel-simple_lmk.conf b/kernel/configs/cruel-simple_lmk.conf new file mode 100644 index 000000000000..41a4ca779222 --- /dev/null +++ b/kernel/configs/cruel-simple_lmk.conf @@ -0,0 +1,5 @@ +# CONFIG_MEMCG is not set +# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set +CONFIG_ANDROID_SIMPLE_LMK=y +CONFIG_ANDROID_SIMPLE_LMK_MINFREE=128 +CONFIG_ANDROID_SIMPLE_LMK_TIMEOUT_MSEC=200 From 784ffb2281b6e871daeef59da359854793a2a9ab Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 18:09:56 +0300 Subject: [PATCH 329/439] configs: add reiser4 config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-reiser4.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-reiser4.conf diff --git a/kernel/configs/cruel-reiser4.conf b/kernel/configs/cruel-reiser4.conf new file mode 100644 index 000000000000..42be06276c1a --- /dev/null +++ b/kernel/configs/cruel-reiser4.conf @@ -0,0 +1,5 @@ +CONFIG_REISER4_FS=y +# CONFIG_REISER4_DEBUG is not set +CONFIG_XXHASH=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y From f689c1c059f215f26a3aa946eb4bd3aa99b4f9cd Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Nov 2020 18:01:22 +0300 Subject: [PATCH 330/439] configs: add force_dex_wqhd config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-force_dex_wqhd.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-force_dex_wqhd.conf diff --git a/kernel/configs/cruel-force_dex_wqhd.conf b/kernel/configs/cruel-force_dex_wqhd.conf new file mode 100644 index 000000000000..da789cb14223 --- /dev/null +++ b/kernel/configs/cruel-force_dex_wqhd.conf @@ -0,0 +1 @@ +CONFIG_DISPLAYPORT_DEX_FORCE_WQHD=y From ef78887a89bb10e49e3ebdc938d1c9236abd66cf Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 23 Nov 2020 20:23:40 +0300 Subject: [PATCH 331/439] configs: add polly config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-polly.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-polly.conf diff --git a/kernel/configs/cruel-polly.conf b/kernel/configs/cruel-polly.conf new file mode 100644 index 000000000000..3d5d7eaedf35 --- /dev/null +++ b/kernel/configs/cruel-polly.conf @@ -0,0 +1 @@ +CONFIG_LLVM_POLLY=y From 6bd8bce83f80209e163cce4178e1c5b282f5171f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 25 Nov 2020 16:55:00 +0300 Subject: [PATCH 332/439] configs: add graphite config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-graphite.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-graphite.conf diff --git a/kernel/configs/cruel-graphite.conf b/kernel/configs/cruel-graphite.conf new file mode 100644 index 000000000000..8ccf5d33da69 --- /dev/null +++ b/kernel/configs/cruel-graphite.conf @@ -0,0 +1 @@ +CONFIG_GCC_GRAPHITE=y From c3ad5ff5e700a7587fd6edada7196b5734f413af Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 25 Dec 2020 15:13:22 +0300 Subject: [PATCH 333/439] configs: add fp_boost config preset Signed-off-by: Denis Efremov --- kernel/configs/cruel-fp_boost.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-fp_boost.conf diff --git a/kernel/configs/cruel-fp_boost.conf b/kernel/configs/cruel-fp_boost.conf new file mode 100644 index 000000000000..1712a2f8e2d0 --- /dev/null +++ b/kernel/configs/cruel-fp_boost.conf @@ -0,0 +1 @@ +CONFIG_FINGERPRINT_BOOST=y From 8e5b85e31470a8c99fe8994247552066dd85cce6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:43:31 +0300 Subject: [PATCH 334/439] configs: enable boeffla_wl_blocker by default Signed-off-by: Denis Efremov --- ...ruel-boeffla_wl_blocker.conf => cruel+boeffla_wl_blocker.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-boeffla_wl_blocker.conf => cruel+boeffla_wl_blocker.conf} (100%) diff --git a/kernel/configs/cruel-boeffla_wl_blocker.conf b/kernel/configs/cruel+boeffla_wl_blocker.conf similarity index 100% rename from kernel/configs/cruel-boeffla_wl_blocker.conf rename to kernel/configs/cruel+boeffla_wl_blocker.conf From e3b5a3ac17c7bc8e7491cca0adc154bb6d55c174 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 12:16:20 +0300 Subject: [PATCH 335/439] configs: enable morosound by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-morosound.conf => cruel+morosound.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-morosound.conf => cruel+morosound.conf} (100%) diff --git a/kernel/configs/cruel-morosound.conf b/kernel/configs/cruel+morosound.conf similarity index 100% rename from kernel/configs/cruel-morosound.conf rename to kernel/configs/cruel+morosound.conf From 9577bc0c29040a4e0101beed54ee067f12025436 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:44:05 +0300 Subject: [PATCH 336/439] configs: enable nodebug by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-nodebug.conf => cruel+nodebug.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-nodebug.conf => cruel+nodebug.conf} (100%) diff --git a/kernel/configs/cruel-nodebug.conf b/kernel/configs/cruel+nodebug.conf similarity index 100% rename from kernel/configs/cruel-nodebug.conf rename to kernel/configs/cruel+nodebug.conf From 507800ea1ca1757ce1ab27e529fb963975c23504 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:45:02 +0300 Subject: [PATCH 337/439] configs: enable wireguard by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-wireguard.conf => cruel+wireguard.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-wireguard.conf => cruel+wireguard.conf} (100%) diff --git a/kernel/configs/cruel-wireguard.conf b/kernel/configs/cruel+wireguard.conf similarity index 100% rename from kernel/configs/cruel-wireguard.conf rename to kernel/configs/cruel+wireguard.conf From bd914c80b9f4a37b16fd9ad877f3daec5f94eec7 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:45:26 +0300 Subject: [PATCH 338/439] configs: enable cifs by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-cifs.conf => cruel+cifs.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-cifs.conf => cruel+cifs.conf} (100%) diff --git a/kernel/configs/cruel-cifs.conf b/kernel/configs/cruel+cifs.conf similarity index 100% rename from kernel/configs/cruel-cifs.conf rename to kernel/configs/cruel+cifs.conf From 6f8b214ee77b7feb036f7bc253c28b9d4eae46ea Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:46:12 +0300 Subject: [PATCH 339/439] configs: enable ntfs by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-ntfs.conf => cruel+ntfs.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-ntfs.conf => cruel+ntfs.conf} (100%) diff --git a/kernel/configs/cruel-ntfs.conf b/kernel/configs/cruel+ntfs.conf similarity index 100% rename from kernel/configs/cruel-ntfs.conf rename to kernel/configs/cruel+ntfs.conf From cf0bb029caeaa7f7f056edf6aa4fa184f64fdada Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:48:06 +0300 Subject: [PATCH 340/439] configs: enable ttl by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-ttl.conf => cruel+ttl.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-ttl.conf => cruel+ttl.conf} (100%) diff --git a/kernel/configs/cruel-ttl.conf b/kernel/configs/cruel+ttl.conf similarity index 100% rename from kernel/configs/cruel-ttl.conf rename to kernel/configs/cruel+ttl.conf From d304291fe4665b2a71f029c7d89a94ae1d36bff0 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 14 Oct 2020 13:48:25 +0300 Subject: [PATCH 341/439] configs: enable usb_serial by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-usb_serial.conf => cruel+usb_serial.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-usb_serial.conf => cruel+usb_serial.conf} (100%) diff --git a/kernel/configs/cruel-usb_serial.conf b/kernel/configs/cruel+usb_serial.conf similarity index 100% rename from kernel/configs/cruel-usb_serial.conf rename to kernel/configs/cruel+usb_serial.conf From d387b9fd8e7719384bcc2b0df29cfde9cbf760ac Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 10:38:15 +0300 Subject: [PATCH 342/439] configs: enable sdfat by default Signed-off-by: Denis Efremov --- kernel/configs/{cruel-sdfat.conf => cruel+sdfat.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-sdfat.conf => cruel+sdfat.conf} (100%) diff --git a/kernel/configs/cruel-sdfat.conf b/kernel/configs/cruel+sdfat.conf similarity index 100% rename from kernel/configs/cruel-sdfat.conf rename to kernel/configs/cruel+sdfat.conf From 951f7953bd1143c716c9cbf3302a12f4b7d1f0ed Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 10:40:41 +0300 Subject: [PATCH 343/439] configs: enable sched_performance by default Signed-off-by: Denis Efremov --- ...{cruel-sched_performance.conf => cruel+sched_performance.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-sched_performance.conf => cruel+sched_performance.conf} (100%) diff --git a/kernel/configs/cruel-sched_performance.conf b/kernel/configs/cruel+sched_performance.conf similarity index 100% rename from kernel/configs/cruel-sched_performance.conf rename to kernel/configs/cruel+sched_performance.conf From 21fcbe33dd72880afe4f7c60717faced223f7d87 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 26 Oct 2020 10:41:23 +0300 Subject: [PATCH 344/439] configs: enable sched_powersave by default Signed-off-by: Denis Efremov --- .../{cruel-sched_powersave.conf => cruel+sched_powersave.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-sched_powersave.conf => cruel+sched_powersave.conf} (100%) diff --git a/kernel/configs/cruel-sched_powersave.conf b/kernel/configs/cruel+sched_powersave.conf similarity index 100% rename from kernel/configs/cruel-sched_powersave.conf rename to kernel/configs/cruel+sched_powersave.conf From fd35f97fafda1d7e4ada48386eca6e55d69a021f Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Nov 2020 18:01:53 +0300 Subject: [PATCH 345/439] configs: enable force_dex_wqhd by default Signed-off-by: Denis Efremov --- .../{cruel-force_dex_wqhd.conf => cruel+force_dex_wqhd.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-force_dex_wqhd.conf => cruel+force_dex_wqhd.conf} (100%) diff --git a/kernel/configs/cruel-force_dex_wqhd.conf b/kernel/configs/cruel+force_dex_wqhd.conf similarity index 100% rename from kernel/configs/cruel-force_dex_wqhd.conf rename to kernel/configs/cruel+force_dex_wqhd.conf From ce857e3b704dadfa3a09b68adb980f70815dba5b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 28 Feb 2020 19:25:44 +0300 Subject: [PATCH 346/439] samsung: integrate different models in a single tree Signed-off-by: Denis Efremov --- arch/arm64/Kconfig.platforms | 51 ++++++++++++++++++++++++++++++++++ arch/arm64/boot/dts/Makefile | 54 ++++++++++++++++++++++++++++++++---- 2 files changed, 99 insertions(+), 6 deletions(-) diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 39d7ccbe8fc9..276cb267bee2 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -321,6 +321,57 @@ config SOC_EXYNOS9820 select PINCTRL select SAMSUNG_DMADEV +choice + prompt "Samsung EXYNOS982[05] Model (only for device-trees)" + depends on SOC_EXYNOS9820 + default MODEL_NONE + + help + Select the phone model. + + config MODEL_NONE + bool "None" + config MODEL_G970F + bool "G970F" + select DTC + config MODEL_G970N + bool "G970N" + select DTC + config MODEL_G973F + bool "G973F" + select DTC + config MODEL_G973N + bool "G973N" + select DTC + config MODEL_G975F + bool "G975F" + select DTC + config MODEL_G975N + bool "G975N" + select DTC + config MODEL_G977B + bool "G977B" + select DTC + config MODEL_G977N + bool "G977N" + select DTC + config MODEL_N970F + bool "N970F" + select DTC + config MODEL_N971N + bool "N971N" + select DTC + config MODEL_N975F + bool "N975F" + select DTC + config MODEL_N976B + bool "N976B" + select DTC + config MODEL_N976N + bool "N976N" + select DTC +endchoice + config SOC_EXYNOS9820_EVT0 default n bool "Samsung EXYNOS9820 EVT0" diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index a469363ab745..99ac941a815f 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -1,11 +1,53 @@ # SPDX-License-Identifier: GPL-2.0 -dts-dirs += exynos -subdir-y := $(dts-dirs) +ifdef CONFIG_MODEL_G970F +include arch/arm64/boot/dts/G970F.mk +endif -always := $(DTB_LIST) +ifdef CONFIG_MODEL_G970N +include arch/arm64/boot/dts/G970N.mk +endif -targets += dtbs -dtbs: $(addprefix $(obj)/, $(DTB_LIST)) +ifdef CONFIG_MODEL_G973F +include arch/arm64/boot/dts/G973F.mk +endif -clean-files := exynos/*.dtb *.dtb* +ifdef CONFIG_MODEL_G973N +include arch/arm64/boot/dts/G973N.mk +endif + +ifdef CONFIG_MODEL_G975F +include arch/arm64/boot/dts/G975F.mk +endif + +ifdef CONFIG_MODEL_G975N +include arch/arm64/boot/dts/G975N.mk +endif + +ifdef CONFIG_MODEL_G977B +include arch/arm64/boot/dts/G977B.mk +endif + +ifdef CONFIG_MODEL_G977N +include arch/arm64/boot/dts/G977N.mk +endif + +ifdef CONFIG_MODEL_N970F +include arch/arm64/boot/dts/N970F.mk +endif + +ifdef CONFIG_MODEL_N971N +include arch/arm64/boot/dts/N971N.mk +endif + +ifdef CONFIG_MODEL_N975F +include arch/arm64/boot/dts/N975F.mk +endif + +ifdef CONFIG_MODEL_N976B +include arch/arm64/boot/dts/N976B.mk +endif + +ifdef CONFIG_MODEL_N976N +include arch/arm64/boot/dts/N976N.mk +endif From 3fe92bd73eaabd610352d1da65d2ce2e72ebb487 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 23 Jan 2020 18:53:00 +0300 Subject: [PATCH 347/439] cruelbuild: add python build script Signed-off-by: Denis Efremov --- .gitignore | 7 + Makefile | 3 +- arch/arm64/boot/.gitignore | 2 + cruel/.gitignore | 1 + cruel/build.mkbootimg.G970F | 11 + cruel/build.mkbootimg.G970N | 11 + cruel/build.mkbootimg.G973F | 11 + cruel/build.mkbootimg.G973N | 11 + cruel/build.mkbootimg.G975F | 11 + cruel/build.mkbootimg.G975N | 11 + cruel/build.mkbootimg.G977B | 11 + cruel/build.mkbootimg.G977N | 11 + cruel/build.mkbootimg.N970F | 11 + cruel/build.mkbootimg.N971N | 11 + cruel/build.mkbootimg.N975F | 11 + cruel/build.mkbootimg.N976B | 11 + cruel/build.mkbootimg.N976N | 11 + cruel/clone_header | Bin 0 -> 435248 bytes cruel/dtb.G970F | 2 + cruel/dtb.G970N | 2 + cruel/dtb.G973F | 2 + cruel/dtb.G973N | 2 + cruel/dtb.G975F | 2 + cruel/dtb.G975N | 2 + cruel/dtb.G977B | 2 + cruel/dtb.G977N | 2 + cruel/dtb.N970F | 2 + cruel/dtb.N971N | 2 + cruel/dtb.N975F | 2 + cruel/dtb.N976B | 2 + cruel/dtb.N976N | 2 + cruel/dtbo.G970F | 27 + cruel/dtbo.G970N | 19 + cruel/dtbo.G973F | 35 + cruel/dtbo.G973N | 23 + cruel/dtbo.G975F | 39 + cruel/dtbo.G975N | 27 + cruel/dtbo.G977B | 33 + cruel/dtbo.G977N | 33 + cruel/dtbo.N970F | 19 + cruel/dtbo.N971N | 19 + cruel/dtbo.N975F | 39 + cruel/dtbo.N976B | 39 + cruel/dtbo.N976N | 35 + cruel/unxz | Bin 0 -> 627688 bytes cruelbuild | 1329 ++++++++++++++++++++++++ kernel/configs/cruel+samsung.conf | 0 kernel/configs/cruel-empty_vbmeta.conf | 0 kernel/configs/cruel-fake_config.conf | 0 49 files changed, 1897 insertions(+), 1 deletion(-) create mode 100644 cruel/.gitignore create mode 100644 cruel/build.mkbootimg.G970F create mode 100644 cruel/build.mkbootimg.G970N create mode 100644 cruel/build.mkbootimg.G973F create mode 100644 cruel/build.mkbootimg.G973N create mode 100644 cruel/build.mkbootimg.G975F create mode 100644 cruel/build.mkbootimg.G975N create mode 100644 cruel/build.mkbootimg.G977B create mode 100644 cruel/build.mkbootimg.G977N create mode 100644 cruel/build.mkbootimg.N970F create mode 100644 cruel/build.mkbootimg.N971N create mode 100644 cruel/build.mkbootimg.N975F create mode 100644 cruel/build.mkbootimg.N976B create mode 100644 cruel/build.mkbootimg.N976N create mode 100755 cruel/clone_header create mode 100644 cruel/dtb.G970F create mode 100644 cruel/dtb.G970N create mode 100644 cruel/dtb.G973F create mode 100644 cruel/dtb.G973N create mode 100644 cruel/dtb.G975F create mode 100644 cruel/dtb.G975N create mode 100644 cruel/dtb.G977B create mode 100644 cruel/dtb.G977N create mode 100644 cruel/dtb.N970F create mode 100644 cruel/dtb.N971N create mode 100644 cruel/dtb.N975F create mode 100644 cruel/dtb.N976B create mode 100644 cruel/dtb.N976N create mode 100644 cruel/dtbo.G970F create mode 100644 cruel/dtbo.G970N create mode 100644 cruel/dtbo.G973F create mode 100644 cruel/dtbo.G973N create mode 100644 cruel/dtbo.G975F create mode 100644 cruel/dtbo.G975N create mode 100644 cruel/dtbo.G977B create mode 100644 cruel/dtbo.G977N create mode 100644 cruel/dtbo.N970F create mode 100644 cruel/dtbo.N971N create mode 100644 cruel/dtbo.N975F create mode 100644 cruel/dtbo.N976B create mode 100644 cruel/dtbo.N976N create mode 100755 cruel/unxz create mode 100755 cruelbuild create mode 100644 kernel/configs/cruel+samsung.conf create mode 100644 kernel/configs/cruel-empty_vbmeta.conf create mode 100644 kernel/configs/cruel-fake_config.conf diff --git a/.gitignore b/.gitignore index 880734d33f8e..cc3604353dba 100644 --- a/.gitignore +++ b/.gitignore @@ -39,6 +39,7 @@ *.symtypes *.tar *.xz +*.zip Module.symvers modules.builtin @@ -131,3 +132,9 @@ kernel/configs/android-*.cfg *.reverse.dts __pycache__/ *.pyc + +/*.img +/config.json +/config.info +/config.G* +/config.N* diff --git a/Makefile b/Makefile index 9190284c413b..692bafceb978 100644 --- a/Makefile +++ b/Makefile @@ -1456,7 +1456,8 @@ CLEAN_DIRS += $(MODVERDIR) # Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config usr/include include/generated \ arch/*/include/generated .tmp_objdiff -MRPROPER_FILES += .config .config.old .version .old_version \ +MRPROPER_FILES += *.img *.zip *.tar.xz config.json config.info config.[GN]* \ + .config .config.old .version .old_version \ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ signing_key.pem signing_key.priv signing_key.x509 \ x509.genkey extra_certificates signing_key.x509.keyid \ diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore index 34e35209fc2e..be177135a3b6 100644 --- a/arch/arm64/boot/.gitignore +++ b/arch/arm64/boot/.gitignore @@ -2,3 +2,5 @@ Image Image-dtb Image.gz Image.gz-dtb +Image-G* +Image-N* diff --git a/cruel/.gitignore b/cruel/.gitignore new file mode 100644 index 000000000000..718773c4fa1b --- /dev/null +++ b/cruel/.gitignore @@ -0,0 +1 @@ +META-INF/ diff --git a/cruel/build.mkbootimg.G970F b/cruel/build.mkbootimg.G970F new file mode 100644 index 000000000000..ea5fd3d155bb --- /dev/null +++ b/cruel/build.mkbootimg.G970F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28A014KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G970N b/cruel/build.mkbootimg.G970N new file mode 100644 index 000000000000..f2954dbe3aa2 --- /dev/null +++ b/cruel/build.mkbootimg.G970N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28C005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G973F b/cruel/build.mkbootimg.G973F new file mode 100644 index 000000000000..d8374870f111 --- /dev/null +++ b/cruel/build.mkbootimg.G973F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28B009KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G973N b/cruel/build.mkbootimg.G973N new file mode 100644 index 000000000000..6c8bdef91f7f --- /dev/null +++ b/cruel/build.mkbootimg.G973N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28D005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G975F b/cruel/build.mkbootimg.G975F new file mode 100644 index 000000000000..5c67877067ba --- /dev/null +++ b/cruel/build.mkbootimg.G975F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI17C008KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G975N b/cruel/build.mkbootimg.G975N new file mode 100644 index 000000000000..9703db832e5a --- /dev/null +++ b/cruel/build.mkbootimg.G975N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28E005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G977B b/cruel/build.mkbootimg.G977B new file mode 100644 index 000000000000..7d73e8183f13 --- /dev/null +++ b/cruel/build.mkbootimg.G977B @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSC04B005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G977N b/cruel/build.mkbootimg.G977N new file mode 100644 index 000000000000..baa7aa52aa08 --- /dev/null +++ b/cruel/build.mkbootimg.G977N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRK21D004KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N970F b/cruel/build.mkbootimg.N970F new file mode 100644 index 000000000000..f15f5dde22b8 --- /dev/null +++ b/cruel/build.mkbootimg.N970F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSD26B006KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N971N b/cruel/build.mkbootimg.N971N new file mode 100644 index 000000000000..0a1e8f9569fc --- /dev/null +++ b/cruel/build.mkbootimg.N971N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSD23A001KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N975F b/cruel/build.mkbootimg.N975F new file mode 100644 index 000000000000..3452a84326b0 --- /dev/null +++ b/cruel/build.mkbootimg.N975F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSC14B006KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N976B b/cruel/build.mkbootimg.N976B new file mode 100644 index 000000000000..542868855bb1 --- /dev/null +++ b/cruel/build.mkbootimg.N976B @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSC14C006KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N976N b/cruel/build.mkbootimg.N976N new file mode 100644 index 000000000000..52d971c78c15 --- /dev/null +++ b/cruel/build.mkbootimg.N976N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSD23C001KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/clone_header b/cruel/clone_header new file mode 100755 index 0000000000000000000000000000000000000000..4f0676a586327fbe1b5e0e79b5ab0b79e2e47bbe GIT binary patch literal 435248 zcmeFa4Om=foxlG;AfYKuX-iw$(t6rLLs~+nEp2I}Eon$fDoMtKTH2wEVvY8Hf1l@^VP-fq z*|+O|U3+=b;Xdc{yuZJlnZ9vL_15&XH050B>hnrJSyyVQ!Zw(DYNcaERj4wxn!o3% z93^T7p9HV!b}lHd?XLG><5BO!mcDV=f0fZ4I(h$E+}SBJ7S8amxR+1FF5gJH zrCmB4)lW(}bi$MJ@8Toz-W2w?wlw+G72LPpaJ?V=m#>>|x}og)(p&z^?j?S?d{*$e zYsWq6rroRGb5+^x#h=X0c>gnRdNj8EVA}a^p5N^|;ImrxJ{)L>SuKrLTX=uCAFSZW)qIeS_9OLC)<5`j|Rc?oWyQ;ara@#Joqs5B0H3UsaEv=Tn zsTB`eS`Vo$?STfXv8A=y7qbGbtu3u;tHdVkYYq_M7Aw>e-Y3p|;TC^D?TiFkeX*vN zuoZ5J;bkP!(kk&lh_(8{QF*iiVSl8hDI8Oq{QlNJG-~-mtpT6^kku5n+M)qfv&5z( z)v?sj5)SLcslw<+Nq6C+ZPxA9?%mt&-%_`IQ?1(G5>^jAaOl1Vl`pIU?Xf^OR!8PH zg;e>&fo5MQ)Y5SKMr(_tMM7+D3pe{BMeD?`)vDRoUKeg_u8RfzQ59|swMBygZlWRI z-l%%L^0fxSzGz8!Z&Ns`9&J+T<2?eL`7r1ShU2p;3*+tO@4w_t$}^fR6}uI z;wcIHb!?U~ZD?yX!RoxmpN5tLf!0HH(WXZOR$<#}MZsOGS|Wk)Mymmj$mWAhv7oiH zZu71!n`$=ftgG4X-MX!Mi?z2cMtNAxQbIM9S+g&E$ci3n-rEvtYOq30;r&#Za6?O5 zI2LFP_zBAAS3BEc>suPvQx;T5GQH6{;0v_{6a^P(pd z&=;a^t8HO&qsbp{yGlZAX=n-Q1@j0Zt+f_}l zO>K&_5~~BML_JJXRgVT*Ta>OWK7x!WUH_=JQkhj@A(_(b^B-u6N`5BS!>Cw_*R&w% z3AENxkD}5@r50`9R9(Gub6w4@O`Er<%D{mpk}c)nv!XF7Li1vBO!MEN&1$#o*jB51 z0s1ZaFx_*g-Gpsi+qG3Ee0!jop2Ujys7OJ2r=YLZe^83sco+^Gv~xpxKqry9S(_Si zcUzds_UNOB>imHQ-=VtZsMD09iaF+M3QH?$3)jUPON7uU-H1+yiZ|`Lx;pF5z&>iD(>0On)c$C! zsUd1%>Uds4)oiQ_HSKK+A8ZQyl{}XC8e&ZcsJ(5?djqX9P?%V)*KN{XbAbD zQZowM?EdS4!svq=SEtq#J8n9Uo5K4xs>Eix(dqg;B~H7)CrZxh9{2&1bs*_d18lxY(gOY}5J0qH4457o@$=ezh;6 z%YbP)maVG_qgSb#wm{VHJEV5|Vrsjum2T3mLv;;QGFpw(!O;ze{Nc4UKxy1!!T`5k zw*)$F9oJ~Y*PwIW6iQvlCl$}G4Iwk+>2CR;uPLUx}NQjWwz6d>g*@Aw}tfSvLuy(5S=wONd7%&wMG2WBdu1Oc2w@# zxs9HA$L_lAJ1e(H=e)aW=e>2C?x}s9bg<2ohV%gBDpMKe7pCK256}S(K#<#J883kiIhop>LOTJzCX0$70tPYl-jKMX&pXXr8~=1yL7#{7hL+Y=a*o=W(qY`&%1oD9>~CsoHq&n2XW*Ve3Ce|&06ccBLgyK9Il ziReb_^}2es(b5|N`c}K&=WlC{IK6#X#!+8Xy|&F4j*)H|Pwa8S^hK$Ih#9BjwVg>; zV+%t~ye-QRPcKeuPg&GZuElgaN$Zw)T_XN%bcctQu6NWnnPfN7Ew$=GK0tR-)k3YH z(EZdzr^puv3tczVdKGQ(F{fSIyp~>de;_IopYAu*(z3rT;&c#ZMk6z1Kb?^<*=Mu9 zNSW=JLMREx%g-v3wU3IbP#aS==4ma5sIbPGrMm(0oTZV>wwjpOGk;;($zaTMpXoZY zg<>wVNs7y~O73 zwotrTEjNxf6BAhhSi7#fH$IlyQzbp3O2cQBMhn)<3o{k*>1I(v&loK+v&Y5|_wmq7 zBW??uvbW3KhzxI1BBp6D(8yAE>C)34-X-vw-IWZ#dZFkn3C)zUWU+clRcW^;{JPr6 zaPCswngh%Q{He_{WeiCfDcqZ~Q`=;r+8TI-`3v(MXXT*$vAuQDk1mcUX7a|wKCI}Q z8dSoRj+Q0;bbQxCuOzLth0(ReY&Argo0AQydfT0w&B~5-yk3;-^ww@n!U{l+^wrEVX z)zOyjY@$Crl$Ih^ibS_4d|=vMWjJHAr3Yg59O zmfB<{d(s-HC$c3WeTZx@nazsC$$Jx%|J_W20@jYLyUjSlrkS64ZSwY0q=lYK7W@7- z*{ia4ZC$^=DHKvHI3IY>9;Zc5oM^L`CYutv`Z!b8c$>F-m?%3N{yGXp-?W7Stwl0; zy}r=@*gCxvWBaYMXG=%Y0#!E(S)(?PRLwwLv#*u)O;Q(e&*J2hjhZMEVY7Ue@pm;X z%XBcs_R6z_u{7k|CyeKM*C4Uy*~%_h8w+o;R`(?hv5=H~tfjeWZ;?z2imj585+)vD zJ*kvNX=agTsws<)jj5Fn=RVc|K02d_R3;V;GWVx3@7lEMzB=7*b<3{Wx$`x<>$dFJ zbZ0fI1A7leR!-4F;f5fyE7B&rDa_oQ8JJ1AY;G|d(a*JDJ@K<2ee$j+T2QGTW|Vh-Fz1pZR7#FieXmcvQHV-r4(V~F`%e^C8V-7 z)DStubcGoWT`F6Tg*9q-K=*P9Wzfv7iZXL#yFavK3T0+wx>>2k2@?A;p-@!4&g^qJ zv)PnU(9D7#P!Fo@deBUrK;ga9H%KomOG9=NNQg4q5w05<A3XHoAR(lpf{|= z3#lFKBDTe3Br{uRQMRNb!4@jDwYXI%3$=FU%&iH%6jHq2GW}bp_ZTDzrmkY9n~enH zXPZ%P{}-)q)D?NXJq2oMT_<~xvcy+ryV2gKO{|ieWNB&@=`K(sH-h%zW1#z@+u)>TOS*?Ab5X zm|CnCAZ>a9V^-Ens=M9{DRKJrB{LzrHP{uzQ^HVaZ!Rs)W@K@^tH#>umlmP!Vh`$d zrXQo!^f8HD&zS_96) zS;iy{Vy3aS2G+-S+GTjxb7UET8iLGoI;=JoIXCNi`)c(|7F{j9;1F>gVDQ2eLIrDeDTjtt9I9qHo z_cCW63G--b-hXYWtT$bf4+7(hW_uZ}{4H(FhSrs?=Rpl}aF94#J7gawhFA~<7=$?FD6!Xl zYQr+Sst2NBd;5g5kXF5Kx;3y@_D!iSvMpM~o+v$_J~!RSjDRV7SSnYzjj4h@T%=V= zx0O_{=!qLG#v8Fo1B{xb{{8@4_=)YnsoDoOSf*C%s&4HK9O9sg!Hpr01-PFPI?+{P z-`KQ|39+8KuU{{ISdrkNXcO}oYq5dpZH5-V-(Ha8BPpU(uo1P2!Yx*ijjUFCol1=F zkp7j5uXeSC-B$;?db{+srlK;!m;=d8_P#P(SL9-`WvaUU;4~rqNHk;l@cUvemw)Zgodvr)ypS8XxYU2<%*c+|hnF9sO zS&T~7nZrUhjhV|fFl=#%*czyFb}#E>!c3`~8iQZXP7~9{gY-kPg<^(xLZO@`Q}eI=&4>tR?=BlT(Ec3*o_v+P3Z^C@%O!+s@01AA-aeQIy!jCtlb z`TnNJCe9z^vrB#41~QThdv!>I)mFX@FCFJeKCAeR_*tZDUBa#}j!ug@H(pH(KeHsW6E%=*{d1a3Jh-u6B6( zWd?O8XS$870L=hs-fh7vIXuwAntf8GN|=vPUs!+XsjuZL5pUuL1PN;)hQEe@oG-i7 zD#fy9$EF=>({|alluE-IQ&EL=H{9wwR8$x#mPG15rybN1hn(%cki?!m(lyPDY;5qv z52s8$W13?EW1$u?>sGVLuixC@^^CCTNH}OPosL;eCC$6d?qtf8CuN)3)il1X6_y@- zN!Mh*o)F((WIW-ZTCbs`Df_Iw30;AbnaH)u%kgO-By)52zU_CSq$=oRQ7aT^)LnpS zL!xiqy-hleHVzTk`-snF<43@B8~Uh#&WA;oUWn>>gbA&1+s4A}8w+<^g%R0u4x3|K zHl7ceT_T;Kl&3wq+F7Xc!pTpkdCHX3IZ=^&-Ms9Xrk&f5n1dE)#VUE0aK2ARp3u#c zo3<-^tJYZ`nT4&g^udnglU|RSQ(}9jW=80QR^}{ZHZQJLNfT>M*+&U5n_`7S(d_W; zpu45zZQZtGTg`n*Gay;P7nWShb_su0J3FIw&fGwcUrV+~y>ce3S2c;%N9kpuJu{J0 zX?49GKJMGGY5TU#b@y)B^cs5$=|<8ZrRH^By79L%vbCCJON?C{_Ucp(>+V*D_=p}y zoNhJo1Y((q=;9%fyy?MPK&|vLdgfMG*sy{3ZumS9AMwrhf2{S8)Y+u0iJ$E`hyK18 zEqA}{@|f&NXtbN|++4YZ`2@MCtD%0)%e-->kLK)iUHk0}IRe!=ySQGrhyQrK>o&He zyN(YJGCRQZkq~F5%=@V>|BJ68I%^T;0y}qYyKCDHGm)y%FHJ~I)rU#D*acdzb6l@c z%rLlQ`?!JKJM%UoyLfg#APdXA^zTxc0*jSG`jW-td>Jh zAJ?ycfPK{m^^Sd!Jp*7qZp-mW@DdZ6&Fc6m!20!zJAJZfmnWCQKSmPs`dt&%;Sg(d zHU?KK=LDWki?NV&m^a?o9+mfU>-5Qwy+5g62Gi#|q`Tb=hn~Vv%7fjrRKMhEhvz9V z2C2R#oC>m3ah?==_VQwKO*Lg2*r#94;HAiYVSPuNlp)EPpZi#$AEa=Y0N$XR&+3G3 z$L;Vb@vY5-lc)G=VS2DItCx7v0kHB*IT@aJh5TYZg0)#WvGwyWA=dIFd<_i5y1GuE8)9zH7$Zre7R z*Z1pMH`3_Yn~5!M^=^!M7|3N^(aI?(hZii{q$nx_4YL2wTfctEX#HBCnQWVVP#JE_ zvWZi`cIUm3MvjNgu{gm}seBx##t)b6SNNo{>iv=UJWJ2UmD3F*El6E^hQ(1W#n|Z) z?4Dy8Q|H}yj+5m)IT#2<^kBS=ZG7hd>8>p`QuK9la^x(EqoI}<$G7AbYxqbLuMX?J z!+EPuck9W+b-d$uJLglcp&`k49$EwP-3nSC6IS-$^$P;tox8WyR^LYz+qG-U<{ERn zRA*k=kgR2^#LOP`Jln}KC4;%MByt9JXR4hrIk0<5vP*i~czNuIzmBVC+{qL6l+28; zycpo^(M$bzcd}%Em&zt9AHBuS!IgfiJ;3bUEOMQOBE?{@isU6(+H@%3lYLa3sqrn7 z*1&<5{bZMUO^zlnF9NZ6Hg(!=3x{+UuV<}{hP-4cMZv08zqv{;wRmrzd9a8pHE#{* z{!(6$m6t6Trk%mWEMsI(nYRt-*Vjgq#ut`ttf%?DiHtKGOmkKjA8>SVyyK)(#cYRT zW4^ya>Nt8KQo11$r#tnz3)^7w!X3kcyhOkYrtH?rmWzH9+HPKTW^Ny8;WXJ)0hY4* zn+tMkY?fB`{zskO9x<-%`(-8u&KZ2l(Yvf3IImfJD2bk4Iq9$9C%LGwx;h2~ez za@pR`i01;^6=pV*I7gA`)w2EVI(woM?*vjZS{HGAHsx*@558H>@TAh)c}mS#e4QG7 zwQ37DvA@O4S?!|MwKSWyC36_FqM<^{>sDJC*!1?3AYF?A|x?{+#b zTBnwY-5lwgojI_)%$HpE8OHe*OuU@*&Y-M97Z;TAam$%~EKVx5V_Bd^)t#;Mkh~(m zaHe**Hp@4u>|t%2{oMq4uZVGpLYJ*@=i3Lm4`V3g?12tWx@`Le<3{-cir#|OZ%H<8 zX!LKOWOz-*?7l9(51!b!Fx7B5;9^PF>`OkZ+7K;vG&?-tLkW&Xg;je(m>8*aS$2LFvb-%wh1bLowqG6a9AGWADEeV4Y! zNYQAQVPUjH|Cd^%@jVNt8Onb8M!o~Zy0DS2zR4FKTAgmp^>{VDD4@Q|x}c>|wp^uP z7k9Xt`OY7wak^pY$%ix_-csZ3BiUb;g?8e%ni}(nT*HR9kfq- zk|)Pt^2HQ+npuDdJmef9k}tydRalJlyo@(V90 zIw#rJrBBZHkS`cN=X+v$P2Xi7ceF*@DN5(dZ(KB!yR5%FLSBRbB&DzbI!uO881EdN zt;_e5%{BK%F+YosareVJUd8>_`S>=sG<@tW|9FR__2rjewDWgC^FqwCh=Z7Hr|HSJ za``fv*@knD%nq@!>+{#im@SJ<-V|Y;C6c16*I&n?+RDiZ=T3a%&e=0c672^WH<^=C zcNzJeMV@hH+opzZQ_Yrc#Oz$l(bL3 zg=i)C99^qad>5@quJ}{@raLG3%7|aKzDZwcE|;-zr@qo%E-|+#kN~q$f>r~#nOD?@E|MDwdWnFpI)dhvutSwr1?RCZLOExT?Ky7?=`E9q~QL+4i z=L35le5kH|dGptIJobh+cK*d*zNzc4-u#yC$KU$4p1*$kJ9^*wt|tz^`#taNd*Ay% z(EsEIpBnhkhd*-Uqkr?U!Tx8~edVjq z9Q)eWzj6E@zWJ^3XTSa2#CQJjyOZC0{)MUU|IO%e=bg5wC zV)36ZZAbc#(*Bfof7+YVI=MIhI11wKUi+y8&T6wC#BDe>#l0u}W!wE5)XQma!R;gJ z&FO7t)uxxFzm2Ec(jQkp*DfEyCogfjH0@H-B!3;=rd6e`LY2NP^`Eip{2jyAOKL`q z>3@!Ti}vxmw!iuKeXS1RI{nwNB^39k)K6^p8@Rg_60oa?bX~}o_3QmF6Hk- z>hjAk&*eXJsrB{^5QD|GVV}+}3iinl1Ixv0@<^qO7czD{#z%Ed-*V`gO`9(cXMXN; zBTsDo?eFjS`HG!o+i$qs^ZTlUe|dlT)6qBn%gzhiMjxMg?`Qt8zx>9J4Sw(H-)+0~ zwvWwM{_)(8-tgpmd!Ebs;sb@7TaNmRRvuq1UMe+rhhag5;eCb`?=b9s#_-_<&Cafp zO^R#c{07ag7v8HGc=lPLRX31+C!9S{b&K%Bhc9i06+h|sd>Hm#acuN?SbEKG_h)Z* z(xV2Z{_5V+1Dn_3_qMNm zZmzR3toSlGo-4IFO}IKu@f_6u>eZ7<-L!%HyK>D{E7qKsd-0{0TUTGRu6V-@H<#^iCWygfk|_g`1vvE84`@AO=YauHv7^Qafp z->U+i-c8O;XgA{iN=2S+T(kPg)lWWoy*+eYa>0Ug)f-=%acNy{b*;Lh;G^Qv zND7PVgiMyXX6YE`syu)KYfj1x&3QIulft~{NoQBPW^-7&fginvxtt0 z>u2?b?|#bg#Eju7`-iVQT_!?9KQR1sg~@<@AGb3iUsfAR4Zhp(Z5Nw}KldTSzS5Q2 z{HzJXJ7Q7;mAdyz^NU*hUod>dV2_Ib|M_R7wdnyS=8FB<;z#5wwT>bp{V zmAZA(tc%wCz+_PEuXK}B7q5|8tyER1S$+JhL29^CC;Knb-1)C;zg{%g6k45gl#DA25EmR6$FZRbl%QL6vxb(*)e zU8nh{8>Hp%yHX1!nl)edXg>9U8#LS6b!$>Xdv4axg;iymgBv$$o?BY3d0px4n%~}7 zp?UnSJ2iiPf2F31ZPlE5SCwYP=kL}`|53H(&(Gbd`Sj-3YHsh?t@*$g?$P}8OZRGC zSaHARj>q?CUiJJ#n)h7e(>!{pLGu^S1vH;83TkfXdRX&IKMHAX+|;5O`S2r}muJQ_ zGn)=*{_NRy&9~h6dd(y6)3XciJN5Gejen`xKHjC7cf(sWUwY5unm>N&ZJO_R@a>vk z`*N>l!=#X@2^^M>PNK>5ppe$Q;x> zvi0MdpLx?!&5F^#)x2nKNb`?Zd{*=6s?TYzZT`Hb>iL4^MIZf=<}D+CuX+8~p4OcF z)>ky=zV(deyl6F<~kb@z`nV_*ES=CfDMXsXViX#QdH zrj8V$ z`9Zeb*(zPF9xw(ETDF0s+x8vx0d4)k)%et%min)l*;04fN^O$5s*&fnt0z|e@BdS- zp&V9gH)}MXZWvjMxUA!vumUGTJ>%6PZI!o#89kvP6)A^iz_IU-DRI0N?L=(H_ zy5f#o?=*gD2j^nGYL(bINB%ISUc3lMWhP(8ZPN^!S&r=-`^Ir^(=m69o!_%GwH-IQ z)-V1BrQypBER#xY2VU%g|`4!6^OcId<} z;@ER;mhsPV=j4lJyL0F`8?bd{$FI|VcIbpVX6r1*^i^R$J9PY;wsmF4uhV{Z==kU4 zrxU*%+nz(mzY6EZYb;dQ_8dCKoz9KNSg>q+4jtn$=O$t-RM_?$I>zz(YDdVaZw?)M zolb)6_*u4p4jp@Rk>;}FXM5`;Kb{?(j=hMn?)c*{?w`|8Y>%8rhjDxKM*8v^EEjHk z^WS6aIq7s5_s>kr`C-)4_~$sZ19Y;wxrI`-|@ogPj;Teej@e^iD2Y{y&G+s_f3oi+z- zj@g{H>9m*%J4T&0$DA12F^)G-yJ5u}n3Lm917Wz-ms7!=8sk(@rvf?^%t=_T?Jm!z zQ^1z}Tx8QpxRb#Zw*Kb~urs3G=AUbToe`Zj2W*bnoVNMrCSbSqKlcIu_c}q-y+)i~ z*gPs_*>&Gzv%;p=X1&db%}$#GHpgsE+x&AI`g0pHRpHN_$p5`g#FT8O-PQ+ej@g{H z`R6|1&wYTY3jYWDfVA}V6)Q3_GBa1MT(v4I>nyI-TxWA-bDd*fYwYV>`?6mE%Q-LQ zIzQ#QAmv(oX~f*|`q3+17oFz1_%v7EX|7BDo7biP&FixN=9T~7yjHusyN8sMopyd& zTCNygv}VzYy0Vy8C~vtV=dKr?KmB#NSh-r;Q}Xv!&p&@<_9d3nt0pmQKYjl~ck_4g z&Zbz&SjN3M1yri;CU^I5wcU4I>u%m4o^xH3XKAMXdD3=2wM_U?+ksW;?mn36;WE!@ zm#uiU{6C6zvi&g65#R{-1%Id16zm$o(bc?h1oL1&^g!~1?S)bEaC_;uRtmPm{Mi1@2j|nSsWSm!ZJ7nJD?m-A3Kg)IKWY2P9bmQeiJ{S z;z+Rq=EGVz3ESb(ar}fMa0nK0lEyV3vdDEm7p(>H*gr%!VwsOW3U_M{sH%J98SRYQ=}0N z!JG|BSsbaB!E)$@wJ-=fVJAEc`(PJGt7CBfWzq{rnaE{%l&a*-QV;BdURa)@pH3BED#6HZvnt0!YIV^%h zunJ}r5FYG+y)d^BcW@X^!}@E|)B@~-IX6=lunZ2YB|NyNC{4A)A=n42*Wn(PU5k6@ zh3XdK3G-ktEQ3SP3nyR@&cRNYa~=Lb4;+GCcpOIHH0*^7a0up<;T{&jIamR6iqljr z^uP%8!fqIW1F#p4z#%vRC*UlcgStQc!heNOzPQXDp2ghJe zNt&919ykZRFl(c<5y~6(!ZJ7ny>J2s;T-IQIU7hn^uQtLg~wq8PQzZf0Eb}Ct8ovD z;2f-gIUdpvJum{jup36;0PKY$a0pJo2{;Sqpeo1x^`swqpa*(k6^y`o*bCd?5bT8$ za1hSHF_?1$>4zRT2fZ-sHrzuC_QEna1if$q2H_m+ggG~oe&~Ti&>Q&Ovnt?r$Rf&;vct3#(uR*27-d z4u@bboPdLH4vxW`n@KrPA^p$;hoBc8hY>gp zd*K2cf;pRT4~yU&tbjRXq#t@<1bSgNjKBfd3rFA(oPZN>7S2I+C+=@0{m=tF&hZgLGWpD_3;RFoAIoJtvUQPO;2M$3m zJPspp8ur2kI0SPlaSw~&9ISvjd<&`;dSC>4VKD6 zfgb3ERWJhUVJ~clL$DW4z(F_%$6(Iwq#t_V9Q4Aht+R=}JJ(hof_0==*sM&JPKg(GkXPQVE`3+JG! z!u=-F4?WNWy|4;KU_I=G?QjV8!U;GC=inI3xs&um51fNun6(Y}(1N|N3=TmroPa?% z2RmWTX3`Hma0q(gaTtNquoo`CA((SF?qLy}gB38RlJr9ljKC_`Ezeu<4-Uc!I1T4u z-d6m14f8%&1bbi=?Ak#(Vfjwt14FMRebCy4JD9hde5z&~t|43)yNCRNd9NcKupAD+ zd3YRV+)MnS7pm>d$)E*`Y6%Z^!wAg2pL~Uf;RtMh0CzBd5AJp_?mkF7;VI~av4?O6 z`(Q8ZsmC1*`X~ojzZZ8qu@8%&)j)W#$WOk*8rTgh0^|?$!sD>Ck$M4-!z?fM_E8_; zWRU!Yy-nl~tZJtGp%um*R4urJUYPY-<_FM%gAwux9)q=T=n?Ws%->JAu>B*X6V5@k zi}}w-i7zbr8`23EhKVPf_#ENEs=p&VnDu$w!Q2tT+fBTn1}PJ9P@aA=D93io`Uc-=#M|C#i_$zNh0`d`F8T!3S+{nw-i4#5RD4s%{deT7AE z9##nFNf*rcE&jj-I0UN~$bVQ5)xFdYm3x+;RPwdtw~_7tV72%N4+S0k{X zGF{EW!!YZ9%(tW~5A<$LSGDjkjKHzG(p4`ku1Z&fV!o4bU=Lh?GrQ82^#JLtAsjdg z>tWYD>8cxc!vWX>N8rG{gbVxcBRzX~h921e0Pf@&w!;P32dxLm57-N*;V|Ua_-Svr zhh-1p9#+?-D=+MaK{yAy;dFhv8iF0ZbTtKY_u~E`+7rx!5$J)vunP9WdUyo3!x7jE zkHbMY1;=1P1Mz{yZ~+GW_*X}Gfpk>?z59p<9D?2O*$3;3dMFYJPYa2$@oDL4gZ;T+6sr5t^P2Q4@fB_41rMm*pG?3U+)!~^CY zA|7xOs=d^=N2xDx; z`N65ji7%{rE9DM{VUC~n*+V^sbAL@egV}E<9M}N|p?XKUIu7^1Sr~yTfH};AC*Dmw z;S8*Y)gL6EU_Bg!5jX~Gp29sGgjtQ`11y3yA0oc+$VbyvCp-xUVCmoB4%WgcIP*!u z-AA~iln?A3Cp?((ZQ={-pCe!3i3#!(&cOv3{4U`Hd7dO3*#11_33Go!{NTh-@gJV} zIsP|M?_b1!cobH_QCJU;!FD(fd*Sc`@q-yJkuKPMiu`z(^!yR`aQIKShq-Ens)zEv za67C|TcHMEI~;*3eTABam8(~%oc-88dxa{4eQQ>zAgsw*2(!R;XUMaP0X6!dpPhc4=hh7+iLD&g9;Rx)5 z)3@Lro`O>_uMB@+Da;BJPWcMufk$pzp=#mi9i$V^!#=1gh&RlJ$6*beh4oOikS>@9 zW6%S~?<78OY|9E2fg{^U2lU^)f;LMzZYLgaVF&Ib#Ot-VgK9V7!+F>)&o#J%?e~z+ zaPmIFg~2`K^CPs+2T4Cv50PIm8@9uI*b58bAe^cvJXr1{zu`2rT50dF49-C>T!2AX zyqEXFVFV7sVR#%)25=8+8%b}JcCrumu!66!)xs*s&j_g??1kNM2oAytI0t88`@_UP zM!rK2%-B!(um(n8Pc!)ry>BI4I0I*4*V}N{rc`|o?%>J4#vMHRcG3;U-$A{Eo?glu z_QGkH^-l8n0P%qqtbW%DRRM=#J?wmf^uiuE0Q=wwJOU@+5S)dhP#vTl!8|w)J#Z3M z3EzW%qJNP1z`m#O4-UW)coa^+VK@tqeHj1RiQh-?4|)gj5Bi7j51xWSnDJ@qGt7p4 zunLaA;Fs|Sc0qNBav8%toc{{(fPK%9F4zwT-~b$fN8ki3`!?<#CEq7js3O??UGftS zO;QeU{CkumEPerZFl&nZgcXpla;upi5FR`Ut6=57;tqDhUO4+R$^mx%ocae(ous{W zke(N*H}LRpR;YS7@muV}>ILk>!Ix;CaQGDE{TSwdqP$>tT80Y3Q?MHjtjJJ9a0X7m z{)`MY2j^hU8>r8)2##cCs46%H>)~Kv-wBQ`{z^pftZfHR- zEQ7Vs3s1s!SiB1V;N0pAH3Cn;2{@mfq2!k?rq^VsyiU@89`4~RtcTs_XQ)omFUU{> zFbj^rY&ZdP;4I9A>M!WOU>+=h9;ov02WG=|7`v48!0K0#4medn{Qr`2E+qc2>l*R{ zo`gX-zm|Bzfg;?&{B;@XIJDq2EPxBJ2jB~mEA~+SqQgNr z`VjGjQ*av2!v&aAM||H*d|?r+si(Z)C=9|pAO666H~=j;0t?^-EP}JJ6sosSzhDuZ zguUtO|KhgGl_df_0fg=4TD zPQf6YgAth3OBW9<*a^#E?K_AU9ER;MrmgyC~Opk}uGL^RNO|9H#!l z^7jxAnEhVd!4q&AcD;{q-$niY0QCS4_Y)rMdXjno^FK(uMTZlx=qch0`(e%#^fLpv zgWb>z)rV-WFa~?!_(up2F8mGQ!R(Kb|F9S49mf42>4bf-79NEWSp5HR4@==7EQe#T z5>CNtI0tKB*1IWxXhAJwhxKq~jCjGxuMjVI{Hw$Zjy^-Y-bX$hBVMroYs3roe4Ti~j&Bez7&=b8VEsRk z53uH&4BBMAU)7~lJvkm zza%{{`0u0##$F^nuyc;|43NLSB0aG0*Q5s?`3>oTL-V8uj{cVPz~jFoJs+a|{0HfQ za|@&gp87rMfjKV`KbZFi;s>o$#19tzk$Ui9%HdDc1L4b*k5FZ*Zed!c8WN^wswr5y zB2%f4ke-Z8RV2*JR9@lAO!mm&s!TN?%*s^9g=b~5XGT3-ovHGU(C*L9R29PPOcfNK zlc{=zYckb{@Z3x_EzHSOSsx|8&dXFDSap7;suf<4sXB$ZnQBmYVWyf8@?!)GLcZH? z{SEa$FH==P?i%CE9uP6O5SVH=R z8%V#Yx;6lU7^2D<;S>#el{CAmxSBFCz7H| zm~`dJkG==S!+j!4p0t0%Z~=3+f8&?34N{Z9ha8cZ29A2F5_*S&rU^-Y$|i!CxN5d=KVxn3svUKRN%zyaV$r`s?x)JS4h* zcQ$(_^!Jz-^iu|szx4_JJNf&*^WUG_pNFAI=Q#F@=>H;8NJ;)t9@>_P7u&=tBK?bK zPPy9oEAh%DyiUw3mob<49KpN_e^T>P+}B_}PX9BJ{VZ_#eaGB>jypxfK7!pTn4sW8NVy z;_){5Cz4#^&nWgR`k!hM;`U7X#h()rAIv=xAGh}TzFeJ#j10=-DE)@`V_)KbK6(xP z$sW<-`D@}K{+DCkEB#NZxztb5xFnDK*z?m5nVPC)9sdqnFBd5#AH-ch=AF1BYVk|- zVf4ID(H|$&ABiv192FXJ&ku3O@UI3i*CkQCCg;hF52lOGu&VmdIhKiE;vd76s{M?e zFY)v*)-SqqmH%1lgSd($*AsES2XhbR?PBh@?_Vq*x^vZy`H@6(=|AP!zQms+*vlAp zua9DW4D*^-FrUHv2PmNbDQa|RSm^*bztcgBsyOVq8lKXM=qu3X}olEo?7yA;= zQ?`BSr1CcE@jQn4QOun8JO&P1O?uRu3GiDQ$lljt6FnV-3qUw^KSS8g`O6kNHvY*u}gC^Ui0NZ|5=0`!J7)`^9=;)(xU@NjeBlO=C~;!ns5rMxSxf$I)le ziyX^P?9ZStpv%2;NjaZFpGTh)ahZ6Cy@GSd>u(cZsSj@T-p;pb?8&?DRGs8}p}fVP z5avfQ?_Oe_JU={)z3lJch1g4qm&yNO%quWI;$kjooW%SX=85T*JUod$hF&V}QtF%7 z&0d3B%oFQPF?s>I_}9*S@=V618ti9(j}O+JOQ#=Qq0fq#SZ@;MixSVn*pu%7c*LGd zJclu#z&uaP?Mvc6ZtJ{u9KS@LL7#p;IXre-OJ|VqSxJG3G@gIO&voE-{Z`UW$3O2#)`HK48lCFy{4`2gN*Pz9#O6F(1Ud zRLoucpTvCBF;B@qaX*jw1m@l1J|2FSJel^NL))El4`1@F6!T8ZjSKBe*R^W&2s;18 zFA2|&?)ko3xtsB;3-blcGvcmH-Ib1Z20b;LVf0D##QG)i8b?2#qD%dqK_3;J`Olfu z-`w-)pQe|FTa4a~K7u#SC1p^FUi2g4bO!CiaVPoGfxBZfspE%9t+bbZ%=3Oi+{KYo z&d$71_E(U_-ZAVQ#a@}z`&4^Ka!Ed%#NOyn8BbUboGu^4-~96l|0HF@{bD^~{GmT{ zF7dY#d$ZUpm++nTkQh(dr?4FeW7wbnr9Gdvb;*w&^!$IPzKG^hZ;xVLk9kd^x!iL} ze8(|AjCs2V%jARDo5$V^_WHzLy#BAWTeyhEzx)gE=0(;#A}r&dq^A;l z{9K%$oWeZk*MuVuQv8?j^YOd~bBcD^^h-FE*z^9z?vGRYD~X?)H>m5)#&5s^)C7hdeB8bg&uLyb1$S0x#-2{oi2JMdbf+d z2ff!tkD>Ru=soBIF8UGlK^J`#eaJ% zT=Z)6X&2p(KI@`)pwGGJhtU^Y^rPtNw=VS`UB0VH^5R!6cPG$uT=aSLJQqFtQtH2p zUVvWYqL-t4T=W|BG8a9BUg4s5p;x)+{pem7eHgvgMIT46chP6igD(0h^oWa|dl~iL zMK4D0bkQr(yIu4>=)Ep_486}q??E4M(T|`Hy6B_mLoWIx`iP5u5`D}?&tNb(L2!RT=c`}3oiOmboINX_52ummWzG@J;z0#N6&N7voELq zyXXbzMJ{?dy2nMYK`(RBL+BMQdKY??i{6j!bYhCnl^m-S420iGapF)qg=($%= z|6TNA^iCJO6203+--F)kqQ}ttT=X9F0T=xU`k;$Giaz9`Poj^w=qJ&~T=Wc%ypOx+ z`REfadMWypi(ZXB?V|h9XI=CT^f?#(F#3XveiU8(hfDoO&vMaEpy#;g^XPdldbUOV zchL*b<-7l><4ZZZ$3?F}FLTjD=oKz{7kZV8-jD8e(TCA%UG#DEdKY~LJ?NsJLXWuU zxmQyEUG!q~P8Yorz1v0KgWl_+$I$y+^d9s97yStOpo>0=KIEcLqK~-fC(*}T^o*;h z|1Nqy`h<&KiazC{SEEn6=zjEB7rg_0&P6|rzTl!CMVHNp)OvUfJciur*PxfV=ppn97rhI;%0=%-_qyoA=(R5TIC{N{ zK7$@~(NCdAT=ZNP|Lrb%F?y$qUWwlAqVGZPb(I?SI zT=bLZV=j6|A@$!y&qtqd(M!>%T=Z)6X&2p(KI@`)pwGGJhtU^Y^rPtN_e<;HG4w1K z{RDcBi$0H@=b~p{L;ZKr3($*P^m25Mi(Z3X=AwttD_rz0^ePvu&IcMGd{IL*1R_;&*P87uhiJNJwJnSEZ# zk5icUWA6TbRF1gPdy@t0DiN7`2FIvfWwMB8Wk8QEnfj#-{ zA^pC`GWH}LN3hq=%Q?N`pG$ch!#tbc9y*e2uJ^OW{S4;4m`j+c=SiYvT+6;W<|+BK znLKRSxNsngh# z_)KCi=j=4qmBvFnK4#xr!kfoDmd$tMxnHgwh`s#l@C$o0uJ+9Ni%viGde2#s|9U@P z{PAO+yT*>slJgOhn_{mAdy#X~T;3O0%m)c?7<)&sr}NXvuY~i-Nz6}SE`G-IQ|`G$ zo5!60C8(13K$oN+NiGQ|pGG%+UYdD7!wJVerxbt6G4DR#uII`9i5%VQOYHfvS9?L4 zIcG?=7k^Js!t0Unu;&(UF+Ym=1m^lZwUlxd^Kr~aa?{Ltf|Jkaa*3ZOF+Ya+NfDCc z5s$C5%yh=ycoR`eMa0b=107pRbyUxQJQ{#In`Xk4`Dunc~7$Yq;@9u4r4F> z;xyIiRzCK*pQLvbdof-f8WsOi&Rs~VI)Qo4CA8}mJh=5QaxV9QJaMhdJG+E&DWBiK z5FwtwS@NXKi!t|J&TmnOkd%Ms+`1a`oGbWE2{Di7k8v;g8p6B+^K3DzQBgxBF$D<&tp9(W}wtBwk7Rx!7Ne zdq4Jao9+6|vs~i71HFph$jOrSnbe+EXI5p4Ez|CfV6W&AJN$V5EVj#InENsB69-B0 zFz;8)V15E~iHmbdzMev#La%o%H#5FD_9ec>H;@le#xaSnTfSyi<%+*G*z+HtelBOv z@l^ce(}n#~2J52|kLCI)+f6AP#{SS>F>gAbhcm4IlJ=9htL$SQCE?hY)W3`y=}X?X zq~AU)IV|P{n0xq5qE4wN=@m&YdW*l6nD_Sc8&=#qm*{)YhtS0j=Mp`JK8!B+&L#If zw*5q1^5+QpQS2)T|1|xPxShm)#|M+gFDbv1=rQzp?o;cDNOG0($>mSQQ}kD75T14< zZunH=ZqJ97rpu3Bo1!DB4*t}j>vpug--JDWQ_8J>7xPNYYcZF&rH;e7NPf)sV4fHcao2%fjXomo zoOUhuToTWI%x5q^llO|m{y6r#zr_6K75X#rTdqnzr?B7k_vFL5JVcgxpVy2dmu7Cu z&%FDJ%-2|%)mLS1FUZ_+P3F!boJpE1Z>If>r_TSA$6-lt2z%4m8|OZrUKzJ|6!RX; zd!9|n58GVAIf{9~xB2ZV*ZwL88zxBr8X zo>XJM^xyamF7Y6-ALdcwAHuu~^932yr&Ab@w;kx_sZ^bm_I7>2_ao-=(Uo6iTRQ6zNgsh!rthA@Y{0|-z0n427T@YHTH-24YgB}j?>te_;g^u zepJUiBeL2}5sRt*}E%aLM7xP70n<<}DnAc)nw4^;P=DYY)un~I< zKFRgk_#@_(m=9tuWwF?UC2l7%ug82&93;0Z^LtpDp=qATa0OHRH z^a6CbcP_b~N4L;(91)71O+@libjjBO^c?hRv7eH!;;s_&a?BIM7k7Kmi_yzn>X+nm z2j*3o{I=K<^TqkI*gJx~zLnHJH+u=|;c@IuuS!?-xLK@+W_*!$brN$ci}+;luxx*x zC$4op$iI#FGdYp(S-RF2`}>oX*c&@Hxm@ybvIl(_-6I}4<9ovQm^&~(hIx^g+n1#G zF#06A&hNy2#IXd$|55BMU{ALL7ynOSUX+ubwBFL?kKTh`pWvS^|Jw-{J+++{pl6(y zoIbH%j=q5X)Ou8dK8LQ;ZHFQDL+DxfquYzEOFFvHyV0|xd{gR|E`Q7;=iBB>+N}v) z{29ldm*1o;6Tjp6kx>4a2Qfc>2J>18H~$X)@Y|Mp-M&owmT)VvS9~$QiO0QL{Qa1Z zV;&OoSMa9?dpUV_dq~L#3Fj#0!AtD?b1@&sd>Zri#Bl6=K}pX^%*!rKF?aS2#0{6& z%dMb1us6-U`#5I4?^cezip$c~q}W?hF3IahUH;gg!G1*S+n2Pe!3c$cD8K0 zT>ReSF!rZjm0W+tpK)}{N;m7%)A%FlJca$()#>V()Q2VWF%y5WSFnk4xh8e|qh2NS z^J1?Wd$rhGTF%*+noI1(u-A{hnACT-e4}3}#i$>9vumlZV(&ER!G^vhpC@tGzAim! z-6{3wBzgzB&i{D(Fuk^zXWvO4V%{Y_#myJz=f#+Nu1z=l(@uQEE|-K`jd=)jxpywP z_oD~V7juF3g5H6?2fcnJ4~hMpGDS(M4u7i720n#_WZZn^ADH&3t`@exmWy2F_&<9Fh7p@9x3l- z>Z7E07<;ES;!g$-?&(e1*PX$B{vCF|9uIf1+%qbr4|6w{d@sN}gt^4gx#Yeay&YY? zx9lDd(@*%ZcVa93mh4wL^~ULUOgWqVN{l6ahp}II59K2TaT@y)k7L*`3fS$*H9yRL z-%0Gn8twdD((X*DOZd54DDQoYn^F$0{w05(r4sw|!Q}cR{_H`YLvK$%y+4wU9_*Lx z=Qmy5${%yN#NIIW`a-X4Pr^Tey@h7xv1gd?*;}c<9n5!LDc$ydjKrfH`*V-cu2=Ao zT3?aml6d&BH`z%)o5{l&bvx>_V0gGaW3_qrS9`>y<7ZUrr(ov zS7I;kz3Hkli-$AF&lvWLpGr5s5AzE3rXPEg*wg#Xr}0JBk5GT5o}I?N`K0*EZ=Ekl zJB{bdVm#+DxBe+T>HC_+Sk2x}e}LX2?oLy0r98{ApYsFKW0$KD+FGS1N7 z!`MIeBkaFIzpcw3djmfvA0!}`be_OG_OHq1EAgI352EXR7uWW0=56^q7#C)kFG+np zgZ!w*{>*H;Vu)I%UorEr81}rspdOv3U+l+T0eO^kbe)BcI1^^8C`AYmZ|7|FQS> z@ljRR;`cst0+~qy1oA=%(IgNx8K|NLNGh1gi#7y`it(jZO?XiQp;aulC?Eqw4J5S; zMp4QwLD6PLa*H)6x#bp6>WxAzRJ2}8OAypRu!8cE(O{nMIx|CNj!A~!?|DAYU!Biq zJ~Ok{+H0@9_S$Q&z4qQ`TEPeFP5AByz883!l&sP&d7f72f(vr zHQ~GSs?wjG&>im}E|*F#z}`nslk-n{ujiYlfIkJk9(Ja9^ZdT7(SGRWChER(Wx?YD zDx>}2Gx?8)+7SQX(~s!634C1=&wQ79LhPoyeL{Bty3z6<5yGbzI;k(>X6EH&y~lAZ z_zdtB;LG{mJ#TmSR}uII@IB?-r2H)KYPhbPe02AB_k30bz8w56!Ji)|;Qh}D+6X=( zg=hNrP+r=(3;Y!DdjxN8r$0X~c6%JU6VP>Aqc3iXC}mvt08at_VGr2Y4Oux*p)0fL8%m;(z?Fcs_RlpPR~mOhg8t^ci0d0H1PM zZ+eL}h^2hQE$9V0vwSJQTY;PPl?{A<5A;)j?*blcl5ZC9ZFW6mpH}K!1iTG+vBcH> zm%!_QCymine2_q5N1K4h=kQ++LTj=UDYplFE%=~fMqGRZ_#zWr_;muG3w(m`3-}rN z&P)75%B{>N;L`^1BaDBL`lmp@;5t1d?Huf~m{p)j)?>KbU68*dTzqiSn=)3R~;H@UO$d?U#4{&)-*@ehA3;Hpo=tcZ- zFXL0%Q43wht-A4i(cb8!-9>!B3yh&-$JhRs>zzUHmr(ynC{vS>SWPZ|n~~O#d795B;W@p>`~CZvws! z`0Ww==qYYx)oQdKx;^*#$D3*Xm;O5i{sj1P;d24aUyaC&yzv=7rwio8~qE$@dd=KM*E@9c^J7b zkUrxM{O&?s-FX4@qW@CpH!s$G=em0EEAzpxTjC!NJ@sQy-e3BCBlOiv`7ar%znAeN z_3nW#iDc1yzMr?lpz}k=!OsP6_=%qMNipFQ|6}~}V}APy$RG549-?_Ph-2mc}XU_UMO)&g$l&~gIb1^o6N;0J&g_W(Zy zJO_A;)Z>2%pO`zbBj6%S;3e=3;A4Qx_rOcOPw>;5;ibScq4&+Oz7j$+7kF9^@G9U* zJ;2uij|CpAC$Wbn;0k!1unw_@?iNXX2cVk^okQsSFDZWtcpdOvGHyf4cgK%XKIJaj z&3{LAbM?g)*}yY^a}=r9@+qM1gzgl{1dd{v=zMYRBJew&)K$iJ?AjN{uLHkpmEPxh zaH(e(bWZ-Gs-Ev5cH@s9kAPSFXVoIXpSMS!R;8U0cOxr!aWjFJ(SG2?J;1YpXZHY~ z0z9n;_$=TtJ-`}SqD zmrUx7DTCLu{O8mK)GPE8pr896tW)~Ri~9FVM9#U;w>E}(*WpDJa1J3&gUq1?;J>VCC?<*fAa-4$h&KLEN{Fm^LDQ8{;-kY6_20!Jc@7hTb z@SGmtcLL8a!KI%0z+-`T^4)B&M*o994t}fXwXgHtpoM-H^z%0nUrN7)p_lqkL7&8b zp;hy}dpvZVv zbrEzqZ}DGr@Cv*{jywHud0yv*$WaUZHV!*WHwIo}*BgO10I$>d(bK%mK9=;y9_Zpf z(0%uzO#LDFNQkEy)qX*BrbAAWa!ECPO_2l$=9PxSzw z54;1o@C>}9owa^?!w>Y-PH3b|6Lg*b>-PhG21fgVPx%Nt5!iY8yZ!ue@YUdh^=tJ1 zEXGd{@D$+XJ;1YpOFd>jQ-J3HkMYYS9~r!70Ur&#LICFZHpq@@p_>a`@Jr;|2)wih z_%`4ZOmNY|e&89vbA*v8{uh3y{N;R_5h9;nWA5?EDLeu%q0az527YGx3Bc2U2e()3 zz7%*oaHAi?%sUy-R|);!X}2`J7ySs|P0$_xhwi(#8D?Ay-viKZKFEJ7^1a(0Lj9QV zjkp(kgl>x1eQ)vx-4o1)KK>B&!uPzt>bA2|@DA`Xg7?2fk8^=%_W-W~o@s)M{Of?H z057ueqnGg@bi1I-Im~}hiu`8%0g{)r^EhYV_- z6MTmN%=HG@^(^T2eDPiTSpKZ~5Zps)V2XE_Cb1pI+M@DU8m zG5n|Id}z=2r+nun;r!%_*U>$QYPRhg32!f zU*zqb|2ps+!7u2g{5J5Nec%s(Fa5gr`a8jI1@G*Ie@Z2B;y1n5KLPv$ecKG`g<<;G5jZQZ}QfHukHmea%}?N3f?U5F5vQCzh?LW;5&MNp8~$Q2YAd+ z$mjI{&j7xv2lxcw&K}^Uz#r%VJ{Nd-5AZ7BH=5w0&vn2Zz(3@Bs9*Kv54M5t0Dq_8 zP5xBM9{``-)jPfu`~%=ON%_F|49b_K{1h7julkr*rJf?__N#~<@7)M~7I>@HJH871 z1n{dw4pTd1e%T0q6L=#&>1BQqx;@bCwM6*tP4y8koPus+1phH9_4PrQazAS67oM&N zx>L|Kh@Sf3Hy673krBRiOpkGv?|eiZbgkA1-~LA*{I)??8s(R-54{|LPX1H+S!u7C zPGG!5e=!7#bEEsFlm5(x?f`VX`K40uV`6&8&j()xzBfNmC;a;;-z5A8^j>~H_-gPy z$tUeRCH%n~=ZpH#Z^}G$5*wjLOB~iyf3e>neyIpL`LAvH9(akK?v(Pts{|MrUw#6? zF9QESANY0P_xFOAy0(GOiT`fB`+;WzcS>ERdZpYc@O9wL{xar4`lSbW2Jrbkz$XBo z)dRc~cxeytxxkBhfL8$*d4uJVep&}S1NaGPhuLn|#0u1~+g;EdzW}=9&^0B5=NJDo z`X(_v-2~{0hK8q`1)coIdboDfLbv|{=$fEgH!M8A1JKRlKi$LC8!?}`eRxC<=WNJ# z`Sm;+U5|#Y1G=UNe)KdR0?!@s_sgBoFG>qve-(5^>EY=%K^HS3Jl%fi_Fn*9Cv@vZ z_T6r=!;FX6cgYA(HwC)vOTx=HAG*#9pj!vsu1mx7+XY?iW#Q?LLs!b7-*EO2|8v%T zS>f9|0lKyepqm9A*E`V+{bn7k;ua{Ej zDy|4mw+OoIF@4J?^TbBzI-zUh`+0vJ6sPS1zw1ik9>Mo9pB;zp*{krgVd9i{CgufK zhvzo|x+&S=>1IKfHa0w6Ep(?YfUXI;mT}?v9e{4#`0#WQ4>J$v;MBtCWi)ifx#8(b zp-cOIc)CT58BmlNX+DE_7B$ zc)B|1S}%ZZ8+2Rq!}B`=-Kv7{bTJDVw}s*9vY~Sng{Qj{x}@vE(^Wxt;sWS4LDzD9 zcz*k#tN%fGx=!d86o;qFSj75tQr~p(;+mk#ED4QAGl0(mo&h{2j9u13XPq3Lt_iy1 z7eIFay1i4v^NU!_{Pe@{bfckrc4~OKQs@@l5T0%kbmcdOr`rf!(M{p$_CS|8Ej-;R z=wfaTPnYs2c6kAGMbPap4bN{bbX#x1KEl{R9dsLS4NtcXx>dLJU9Z^X5$NXM{$0E5 z1pWZs11v^~P7hyi6LhT?Kz9JTtuw;&i&%;s%?wXB z8oG*E;ps}DbKDcYy^Ekrxi>uBM(8@A3)jDUpxb+2c)C;2HB^MBOIb#oFgrY55p=iD z2~Rf{x-pgE+gk@+%-ryF+n{TME}UMDK)2&3;pt+QD#gl_ZC!qa6`kx!c+o^A?scO1Hw1>x(Be+)nQaCo{2&@Ff*Jl!nlZd@3it`@q?Md9h1pi_&(>*WA+ z2QGjv;&J@sqv7iv4c)3G;ps}Dt8j*=TLhhBX?VJg&?PMkPqzoU6VUZ~ZxXd-@ID3I zp5@``Ql7w1R)w#(2s-Cu;pyf=cl+bv>FS^x^F(;MZO~b(!_ysst`)j)dWoqfr@SIO zT{d*jt_)xAozOi{6P~ULy5idKbeo_{`(=2#{m^w>09_|^d!G#7-i#IOKm95^-4y7m zSM@EQ7dkAd*p1wM_Wto%Re1)YC{KIQEQ zwGr~5b3w+FqJ&@kO8g)A7L6YlkmoIeK4*7?;%Y-r`6u}sXX6~nT<{hkBllI)86c z)-m$Z9+{>h7F?_ik4ZItiySGucct;XNP*-JjQtPGZ?wVz7d`UNfB1<2OZg(+&!%gt zoX@~Z%HPTRIPeJ~Uw3&+Ne~0kUlngPlIOqo=d*C&nE&Yin|?(DE-H8`qE6Qvwf*nq zcNY-8O}sZ}`Jd}u;FAmbS>03+{QLE94g6aJ|JK02HSli@{C}W1xsl2Va)`+Z)$XX}#9c32xncH;tsT(5PfwI;rN5?73 zex+-kHeEGa7ni?honKvJO;@hg7nIxfn4{xpSJxJ4^C9YK8>GAoQ&iorc;D7f)wL~m zxVP|_tt~SKyR4eS+whU{z6`uoPw;$i4){oiw_G`=C4wU*T*pyOPd&fcE~(>jPkYbK zNb;kj$S*)q?uTkcMSl*9Wsi-W$-`{08a%5~6^ z(B5iM-u$kvQfW7RTgUNTqsZD{3m_%u+5JJI1C0YCNU`Kytk#(FN#W#zl3sonf%Zr#bQuAwWC zBeNfUfIW@PS1ysM;c-Vt3;nmEfA6@Z@ZaDMrPPgf>?hUp?0o4%NAsZ?hr8U8_rYY* zB|5YWF7!IPx;Arx3q2=#uDwjvy-2$Rr!+^i*s#$LY04$vzZ!IL$GL?XcB)()-}ZRB zy4F}@3cNP#;ce=)B4?f!*G_0zYU`KZ;YQY9!Vg=-?o&OJXfuBs)@+gRa%ma7^4(uW z7IX;j|J$LQnx?W`tc<(juTd{q$Gup7bu0nm^>6?}J)D6-2 zx)}F4U3b;bRQsG(d<&mDv8(BP-@tbV-^J#u`Rz<_PGcQdC${QHbWY3VyZF@v ze{52GY6Necx25s)DAoMc)v9^-a8*~$xUg#DU9IC)b62YJmOtuvO7(LzF6HwuzWxpB zv0m()hCkkqUw7^B*V`JY8oiOBvfSgBWmu3biGEr7lEaN0!S(l4ck3X0PrC1Fvy36% zSksI-$AYfNpr1E}5TNHLgna$}p4#2+{zM%Pkfwx9FnprBxx9V~BRXo;9${%IU zNY!lZ8yGX^78WpH<-5Dy^!byUX&>+=WVWZOhBHOuz+v#iN!bh!A zV{-6OR(ryws%{Z}=k%b9I)ddDIqS8K@uw*J5PkI5Z@bnUVt%MW4#sPfwX>`xRf}(D z*;<;~X>Fl@$KqQJSp#yNjx6k0m!jg!^7Qv>zg_TNO+R(hY}&pOTUhcvH7`pW;3|1B z)m06TT41$H$GiT_lK#orl{t;Jfi=sJvmkzit6od}Utf&c%>{L_uHk$)%FZ3{N(#UyT;d{C} zkdOXELG5L(m;n8(OI)@9eB0G7nNQ()qF_@bx1 z(5|dk#=G93U88ANi7lq4igs1eE(h(JMZ2nKSHFKap5l2kTdsqjv@bo<*S=IOy1j_@ z8Td%Gg?TdGC{H^DHow2xlH-GIuA;pH+uu)Zxf+>o2m4>LxQuTJ{Ms>ORMc&wa^qYlPM9rVN+SLV|{&Dgb75l>vA=E;~c_{8KHe4@ms z;M;=jS;i4EZdRKTT-RTs>KZBEIVic~>-#i!PcdZI@`E`{0voc~& zeA>r@w8kStEsdY1>W#PH8ybAR!PBrjx#R4)t~H6V%JsKz4_4o1Q`t{2<`kdwja_{3 z->EZoM|L|ty7W|k=W52g>*z>}EB?g6FSiydSJyx_FIls=at|rj*Eh$xJMJ6eu7|cY za#ABP(~0!zSl8j@y5Gmfd0HbQ8wY2ox&ZAVeNtnG-p{2*D%WA=2F)RJKpZ}>f8*8o zM(ZNSQxXGrMX7mhFDS1AJ$Ch1^YGiHE#H<^lWLo{6kQ(t)>VfD`+^?Gh&y*`2WHN3yy*sy%a)>rkot=A+oFBQgZ-MT!kL*@xuB6nz-|jXtm$>2q{p zZOla%A}e$Ed08Jd$=a$%F~<$y9m6}iv2C)}{MF4`bFA>aO>6G@oo|ja<`{GtJin#E z>&$KcjcncX!w!6{_}tU1H^k@uoHjm$9Y`E`Ie(3|Bgp3-LbnNipBu2vGq=6jAhs!b z&c@cQyrb|(lkmBD_@ioKH1WG7`8lrL_@XJZXXjMniwvKMFDei6v3JS1^6}WR0>7OJ zk5qUxU_*vKJLL0cqh?p&&!W%s5PxR)TKw5oACE0&IU3^patsbH2XY!?x2>aV3uA7T zs)};Wrf>G5moo8@v?(y3$XpPZPsC@+oG^;=M*HxUnZEDFoNx)>4LR|RBLZ-IqWCz2 z9$!dK*Y_PCNSxvG_xL{I41sShz~_l?%9FWhkSeXfkBe^X$YEt(%VJ(zZ;5Ijv9x;a zLxJ*r%pEJybD*zu{GrT=M$9xX$Cx{!=`Yc>;rmT%DZdjA{=d~cP%oyK?n90|>;Dt!H8 z%!}fq19r>!+pHcK;1c`&&u_bi(k~|py8COD%;En2+Fa1xU#klI{bkVm`^)&=-CuiU z4wwEC-R$()%4WvEs-A3R40QDCD(P4MT)(%VyTA4Z&GoBft{1swj#pd9yJWr$$nB3k z47o+G22W&_*aI4)4Kha>^nSe>-~D>!yCJtf_V_=M=Y-geT6cvpcLnq-dc7UJn%0{3 zyE~cpm}Atrvg){Vw(7U>)4}VQ=IRE({r1ibk7)~i>pHDckRzF7EBKZ%8XzW(%ZhnISdbq?cam38O$S?lkZc!+ti zmDs#B(mCNOe}#uUO~>%L&W8otjVmbF|A5@8@?@Tpg93R+@~*x@i`#*9#P<>X^zj+eF1y{sk5`>STLg;O`Y zVa!Pm;9t|$G(J1{WLb6o6*1bex{X}I`W<;EM>D@zVjS)wW!-(9ku$LFzK_o;b;+?m zsmoT@vmQp4(yB3!tyQe0B^OXdPN4N_=UNq0R3r1Bc6`z5bcfnEBIXT)&%N+^O!fC{ zPq7};C_5r1|2yz{*4C79NuB2U(){(MDYaYb>qqWJ>Qo|wzs?acJNuMbgMT=h)^lB@ zpQ0O)e*`*+ARiJ*{^Cg5uC-M=wBHw~49`8hQ`O=x3)BaOR1mFwQP=KJoK)?ec`$EY_GqgKfJ zv!8SIG2+daU(oj1h&MMet{RqK+|dTSgqX8(r*e6IJJG%AcN5$Vs$-ljT@B9D)I7I- z@zy`1%ZBB5b^vp^m#1`C$a7UO7KE>R`Q1kBCVg~n;SKJue#ST^CX+U}^}9MUHEYc& z+PfBe4E9YthJ4k}EXrk3%ckLbh*4ARamJ%+mQ5Rhy$Ng-b&z)(y8~JbkNotqX)>lS z0bUL~z(Z&+Bi547vwR9&fJZX43ff>FX^hjlWLFVm;N@?+*1W`9F^)H~g)ZaRiT~jt z->wPZUjX+TKYpDbPx-y7YN#t4{I9`DUn%l5M=Ml(V6A@Y>#jBAXIEJx3cXnu<$E(V z<*kL5Sf%-Ma8_05oz1+Gl{*OEs7{J66Z))lA8kA1k$GR*y(&F6#-&;>Cf3zF*_3TW zj&lolHoW&u*U$#3b3w3P7m#3Ud{gSs88CosQ zY45_T0UuBur^d2I@yw*o`S?D3LusI(2&sUyICJ*V_zSMCtojzz>Jox6HeA_klAZxu*__xn>n@95Wjg0N>T2#9O zfA*j7{z8xOoJ?@!?B$(jiLJ~>-c`(pd1qK}Q*ISJ4l%B=U2iUS6c?e(Uc)LYwtjA5 zA#1mMcO1A=Hx#(fMiv=sHz(~!_PY8@oYPp}*OkW`IX7(Cm*0~0sq`N_n`g0)5j=mU z(MBsVh0LKc_iv!DkJ9EjBb2LcN%`+bsU3fv^6>1AEZS<-qH8vhbI+rV<=9mGx$>u| zFP^_dIr%y5sCZ<4M_qrlPxvsFOJz@i9RASYxi8pPwMe@d-!0;+p>0*6{v_aA{zA?S z`6jOxeK5up{c#4q-a*4VI?6P3Vqr`&9v2c<>6(j}f3ww<>(W?DYZqCk8oy6wxh(v) z@Vi>#pbb~KG%NdEej0%pYwsUgTnZY|ho-c~G~%3(yv*T}=U?D>IFrw)c!#(BEA989 zr_;PkG*wfwRJkf|dHPGSA<w4CU`RHT{Hc-2G_phHmSM;8= zVM~;1vOVB~gHbUCZxhv;n(I;9o}ESBd>fZhr34 zvaRdslUD3!y-m&2sHcK@tk9pPj%OlOQw8s{@D!OPS0T13`zl#lVvRMQIFtR7vIKRq zo_;JFrcR1qDWR{0E{eUD+n_rOFWS~Ll=pnbQ;wg{mj3Ew(oE%Yu-+QBylkt)PKPYR zYOWNUq))B7$dXu7(O-E5U(Ozq;K{Xx;s^TiLwxvjGd|IePxj#@-<{Uk)y@B6<^2aT zo?BRiJ;+!oAkUHS_C{W3j1%>L@rq<`=NR!=7yB}j&xzzSpD|)#KV>BEAE>C^#l%V1 zDgB)zDs$yaD#CMVS693M)-ZENA4D$ zsi=?8Oghhxx|rH_=xy+!HI2M#vaONX^a2*8G9&w z$@doc58>%hx*-So@_oU%m+m!*>|qceXwGSpn;(F_+oUhOOIBBmV4WiIPapaBpCvFS znesY}`ORW)wsQxqvX3nDqrkG*!?mV6a*O@3x8$CJ*KC=CRXp?1W<^{oYlc?V8f~l@ z%D?T(#@Fmsvyxm(@o^)Ok$G6=tj%rg!3urD2$ee!zbEukhxJ_7*!RIV3}nrt#nt3p zr%uW~YMRbm)X}x(HQ6&%7${a>45$B0W zQ+1X24Dyhk@)+k3;tS?kD>>LyyT*75_~o)RHE$;4t=OuD6f?IR=KUkewZM}xZ}>Th zN2DJ4{Ewk3zMs-|mpQeQ&pQjg9>F^APsa6kOU+1Ssd+5Mv3hRy68B8iZ+8LjdsCLU zYqRs*6&a7Z=Zs$Lo}1n2t}<|zlPkn-*xP(OCC@E%)fv|sbSp+ywdp}jPvz@W{{$af2^eKdY4dxF~%ue?qp`uKBk+QnyN3{BYBx&iwblXN@vC z+u=@EHuP?*xilpoI_1vHD1cWXymq+@9F~)_(fPe88am8J*A@dmXS8n6%+0Ry(fReg z)Vhd%(a`x~gSOh}Lmizjps1%D`g z%3oNQY7?wepI1rauy60up#E4!e*kM<>hFhT%=P@v#kY-H+M2q+XD7~yZ>TZjoofAr z8Q6e|a87ip-%K!YeCL#Dqf_NI2Kp#HMGX<169-HzQxPZAG98aNY3DNh*-XZJfkWF@ zu9j^or;ONd1HRFzt_|VwiMrNSJu-{=61`z&oX1oN4CEJ z-dA5_O@WGRe;&Q?S@Sw`!T;&ncQ(F1N>xv5!?#6Q>mQW$U740S$IaT;Nj(+xr}(>g z{M|9u!obwv$e24 z`{i2Dez}D`v5Px2{26DOn%l@HW=62KA#a!#v120h-A`MWFQ1U7)JgWDT-xCEYnKq8 zw~Ztxz;~Uw{7rHOtRb4(q8;7}i)xm0R}!1GF5fvp%9rcprVoPvQ`G_TYLfa zVJmWmz+)pWlRT1*^;bZqp9Gbcbx8o;rEB}lxq|L}P03T@pJUiBB(9SAY$tWxVd5ow z`}xf8D=g{+>(vvP%z0T^`QDQ6<$Kq&E;nK`)+dv-73fiU$7w6qmdiQ{`%DMEgR@xj zJwNFC^q}ucgT8MF`o7cuU1B}P2J2(x6}ur8VvdOO6keLl#3Wlt_)w$lT7ehMGnWF8gH3* z_KgUa$g!RJMUL&rBeB1cJ7k??d*-oearl*gonc?vFZ*`C>_Tglp{&d)0of0w`f_iF z^#uD4WLK=oQ#9bvPVa}n`URt5ErmXp%MIS?5aidv!VOUDEX=Lu*39^$=fb`Cm;eB`E5?Uw?7`cjR4m$RISo((o5kLY@iPa}pwzin}jX6u0XA*q8M z^JFb#tOXo###$haJsKzL33PkHdL?IUS?k;KSx*mkco|b(=Q8I6=dz^}WM9gl8Jq8w zy6Q$KuNAuLN0s|=@}5q7BYoN=vN}g-xxb*V&Mllwn@h0wN$#_SlAGoH0CA?lC)ZRL ze4NWvqjR8hqOmrnZDNniH%48O`(TXOrOj=fk>lHf`qi42pIgj+gG%%34cUdh9(jEG zRR^mB{Tu9mf@490hx~mTJY?J%`XA8EN8&$OZ%I7p=I>{*qs`du31e>~Cdsvfb{_`s z#cqAEVyZza?KQ9O#&#a8-eOVXq@GLZe;L=$SjfZ2IHyS+5@X63I6~WH-{@0*S3)QK z>7?Hu;*7`B-$wG392NfzdY0b_{0^+U=pTDvZ@=^Y|KbcoFXv1O)xIA~j-EX+vB$Aa z%|*M|E3$aftNXbm-z|N8iZg6-#^lM<#y(B<--amHht&BjZ8!tWi~Qf`Y^zWHM8Et= zA@W}nB)?JCFaP&BBQ3v^_}!)_*^|%R{$6pR8f+cB$cQ0$RH`eNbz>f%t@zCUX37Y{X|v36vRzEhX8@@%KWLhVK3b z9X@T`pJ;=eqaOC1HVh4ELsU>3jI#bV45JP5dnms}mI~z@{{}kJsiWmuNAnvNtFcD* z`Q@R845IVkvnSh$c>?>9A!kqSf6G|g&c-i`EtMsa=QEuFmOZ@}@n!6xG3T;&k+>#u zeX_&_f%S_b{*m&*pV8Q{z|8y%8pe(>PABGjHO=Au3-i3jY$CNv42RK^+pSmL{B$eEk3>ao>0`+#`a*X$S%Y%lS^UMupKc zmS1jvP{yt`Mm1NHk037Ww6cz~(C??En4jtgf_tss@J`H-1y(x3snS+fzq7R5yrS0|15mu|C*;cZ0+EJT_wSbK+ zI@xb`G|#5pZ_{ppi7#48jH}V^PJ~)T2v2QLm(Z;wHKgOKP{=c&S ziG3f@q3G%aagK~v(VvfB_kQr#_x*u9<#+Sg($6HYnys|$sBW=mxir_fhaX)Xh0VTg zNv_FbUqR+d$w5o37^RYHzGA(HjoQy*uaoq|==Be1^KM~%Vx#XyqBE{Z500jM@c`yQ zcpcMi_82XyW<6(wTG*qAQc*SREk@r?8Lu8;mp*URlkiQt;b+R(>+;RV;;Z8E<%#yx z%G{hAm?tHE55`>q>=Aspz|1+>ukZVlG4^}Q;miE7ft>Yo=peT+-`jyKXLQ{-55^k<=~v`963LgM2@pJep!ai7~|?Yx(ej4!`0^{ev-PR zT}kKGSAWI4AoE05p=Dp^{W^1H1a{Vsn593q9LX4D9O@R=hG^Avc#vv7!o8zUQdKka z?%odyMCbkO1pe%CD9_jUJXFr)clRdZ4zdTOd!3I*3?JvU7x%v@+5d{@ZEtgZ@U6Jf)DqJs=IwaG5zoZ`r~@~W*B29)h>M{{ibNMj5jASM#~U?nS$;zebvKRpEWIvIinu` zm9)DuFmPHJ?2;=fqKqRk8kb}Uy?{Yx?O`ursJh)bC|og&l#6fawyM*F*Q55H&S5R zG0{D`cG`-V`Uh8BrEhf>I43MESUO=@!LkWYT`TJq-`$8n?A)bW_I-Vyj@?CIfBmpS z@=Ocy3FLQvMBJQ73>Rpdtk=c9@Vj=&GmZJGYmLlH5?8kBL-E6+3t7*^8!<;A@mw1I z)k@4Md-yx?mD7!wgqV|ctIUx*@OSlE!#K%LX!w&O@JS}t97?PwFf-31MnXlV5_?=t$1KM#t?YWM26)~O|OZYd5J@yVHmsdzG?*`8HER4nPMYxU( zwYon|we@(mNAjD;D0^IYu9keI%FZuhGb-NccNa=!C0 zdaBz0J=crNwSCeqS?_GnCpK>DADXY+!1y}1@J4s%4L2~}edl_9i;Njg&ed5O9+W)0 zrETHrRWT7$^;pN3k&kblT8|#{>km%h4EpYo&!$biq&DrRMSNb*=ggGLrdIKOF=fou zw|LJ>d2#CMl$WN?>{#;Nm4CcyYCm#U^7%r>n`S?c?;c4#a^dV?VHKI=Mv@k(h*TuqtyDf(g)$@A+ftG`jecp zC!e*N*nhh9BF44Va}>M#khA1F2CF8?CA2QbMrO+W)i_TVb5d8Aa=#~geE50n>ts7; zkk{e|*|TJ>;qKvm>_w92l<(K@J-Gaz0_9j+e2;T6AJgB5M~eTpdX`|1@S9h|9@rUt zyW&$~_hp7XCwkIv%yM;|Bqr+Y%6^)C?X%R8z~*w_yG2`` zWq(`scTS6x{ZZc-@;C<2wr`dD;0%>}kTHF5h|2x&R*U#bOSXt{^ktqs#a_h!EfW(j`(3L@V zh&Ej_UAYvwsE>)mAEFNb*!CnNmy&s(`u~^u)!&sn!>`l#|9_C@k0J6Tdd{#1<&!7T zvqj|jO1TrbXS_gUL{E?7yQSaWxL5oL`9f?)=8<4uk#6{kzUSUuw7X01x3BYO%KP*HTp-AC5g+@G$G`i>8|9b2~2-(sthUs#E(a-PPzRB<1- z{HfV&M=B7l? zYv|K~?&>+yvBi>D!}*=T>p4fo8Iw|>4?VM(ymbIEa>Mf5{bv@F$QQ+tzp%CMU~Q3% zO_jmxHH#ycac1YvMBN#3e#Af7uNm!0v|j=2R>=d>CZS=^pzayQPC4f|FE7(O);w zZ#Oaz+~Dps&QJC;=7Jy6x50S<#`EU%mN?_i@!vQ%=Ip1&mjhFyN4b__I|CWNGH12I z-=GC9wzWs@z_3Puc9qJo8ML~e)+uwItnYy*qHp7SsgVot{E>1FVn*7uSEXHTlsiq# zB6XxuM?Ua$;H;ibV-GXoQ-ItBi?Us<_*2cvxit3eQdR6c z@*qB4T4M~H@RBp3<(#p=mN*ur;(IE85}zsM#osM~w!(;U<2=@j@Ttb$N2*=gDEo#I z7i>VEw4J+8-FpO`WxI^Er^qyrHM>#gxh^BWD|h4n#Fz+P-^Y_*;_xf;jz;`zMU&t#CsCsn)6_B z@O_nj6?wjZ$IsyTD*Yz11oB_z_KU5QGUrs&KG`c2J((q`E^ ztYF@90xS2^C~Qe+?-g3)6&;ns3%y7#ZnNaFh@rg48Jp6t!Fu^o9On^Yd^x_(Ue5f9 zecy(Cw^6sOhq~>1ct#c*@u;oZjqj0lo7_Jr_TwrZjjDbJIP382TY*g< zB0h-wZ|J9jvOlNX+u)0NlV|W&unwz$X7;0H)l0thmCdtp_Kx$WoU@#l$(h{5e(RI( z!7h@Bm1WJ~AO|k_DPJBu(H@)w&j#*>R(t^a9Hm={0R&cpoH7n(p-agZ7DQy8A4`|} z$HEJYwVq_z06ZW|JhBwPU-qlMD@%NcEKm65kohE^c|bm2LmtU7iAr!{mrYS#`T-s% zPmvFnes?ebu`%YSGcIKw@QwS>vw)rcvw-9{nq~j&ooH3pX}aqzwiJ8@u3`B_9pntX zZN$qJGaR|ZUSoUfleZbSa-}URXoF|@ zU9>@+Y^6<--;>;=uN}ViTc>|KuL~o$*our*rFXvC^A#3n_D{?9HX|CgQ{I1jy?1#*2 z<2>e>%)!oSS*!!aR>lslPOkmJXV=14Y^M;N_9r$tgk7>OJ#X9lpp)@cZ>+sS*TX-@ z9=<_V!xvqbPrQtu-ZtQqPi<9RO$?A>3sJ~>UZ8uv=9JGc`{ zEGIcf>w@GV)_&Sesl&*JLnCEnj+W0hWGoLl6S#MPiZ{1WY)#rCx%@(6a@zn$bMRfO z#M!gAwnQb?BycB7{C+CyM%wjCJ+YbSq=-2;tzpHqs#7CeJLY&entsSfe&(&gTYN_+jgO&H- zbk--FBd-Lu`yA_)!75kuIUgUDHJE(R2Icw!JxY81c`o9yP+j-s!visjv_BB1G%O$9 zVTn*)#aX9)%p+;!31uw(o-yPgW=ZHr%z`hHn56+&AZDq+Ml+eSBtPE4TVfadkyqju z(TCC!YUUC@2_5s+v?TI=XDv26`)%!8o>#6HSQp)ayf|l( zTLW=S59PP}%NI%<18whdjL)a-JRisO?9XHj|J`42y+4)_|F$|x)-l{WB)1!UZzXUR zMeOBc@{+O_EoV_=TuPkt9P=LMf|@zYf2H_Z@lBE!bR)+KbQnc$Bkq!@UzZQe?*P9= zek*?aY%SH3h+fNy5hNav*cJWlHTJsh9PQuhiu2rq50W^k0p5Xg#NzJ)vD#_YbAhuY z%ppCVC9!fAAHPg&W}K-p;v`u|$XR#Un-V{DmNmo&^kdK>xlv6KY=c<5|nO z#E#ed>l8lIIXnLp{&>j{&NN*7T_0R4IP6j0W*H6N$k_%t8)1#mI!0-s`?UeQ$R3~> zf&AjnWxPsk`DrY%T#7H23zrWBRDHP~eYj^ReqWxy^9}aC6P-xDXOzSt>?^dACz9XQ(Av`Tv$)4{{2?h!?-HmNHiCshm&AO)mQ!--G|mjby!S8CF?OKNSD{;jK@B zpFvK&OeIxj($7+F1!vR)a$E2zV)No7&kk43k?^dzG(SsZIExI1Ou)Awm*lcevmZme z-PFc9m$|4pnR^~bh{=wQ#HS*c)#hl<(Gn;BntFv-TYu&u&cEOBYISnmOKXzL^!Un3 z=7n4I_~`8xo>}nH)5${@YjG8S|Air|$X8uiv9$%=uJG%22D+uq#$M%f@b#TdkQ{H4 zr-*iw>l&L`xge*3GG9}l)Dgwrp^SCT$PdOYjd@VyKYLqY);EmfMDDDtqs<+(<7%$ z@zH{<((V!XzcMw{=*LWQ+0u`b{5lHm$D<=1?)FiIS(8}HFeaL#bP+Lh$54(YzV`ccFXX*{1p_UhY+Yh|wVf7|u?2>h3Zzdo;dt3rF!q64&g&wyY73X33Y)_Ic=|v;}>N zFO~U1>Pey=i8-FRRk=Sw4>Hb$*X_(3|KxXY{HYNKi;tR4zsMYQ12juGqZJ%?2E}Rc z@!j3HC0fPHbKcko^;{O?_;NbqaAfSxhWmJ?TLpK3KH>h)C#>^V@Ove{FV*ZTuV-#7 zj9uCwcfDTZ9?&N8;!mV2*L`ZtxI(3jb1V08=Dn~;ZJpU!{@#5mZCqi7S}?Qo#`k`p zl7uE#;>N-%)ggC^_5_tL3MyYj`6_?;BFb0!%R^(7-=NgEk=z?9j8{4E94kCE&3YcT zobi?cz6d;J4E!O^?vE^TY`sTCPkvF2jCyg0_TV9JSEEFtrG>XqypRUhwKbA+5%8Rz7Su@`B3;M}C_*I?K3jDb~ZU6SiV z;z!vFEKnV<2h#=4LsI`{HD!oP_B73N_ASV^<;jsQBR}_{jT~Wu?As9&SUhs?c)H{R zpY3PBeerml=Of_d$QO{O4cd>XTjWjE^!93OU7pb?XZ-H6;s2-~{+lJ95;@X04|Nq# z*I{VLYpjuW2%bB~MtcN5lRAaR2JitLE96V-_ZLiv!?RtVTUG&XYoJ|kh$&~x@vZp3`%%dMGB;_4%tvVJy~w}=1Bg9*XU6-|>!5DE z?me&94E?-bZ-&=*^aFlob&R3c$>5b*W~}d)F;`#W`?y!~@<`dkGxqN;VgK%usrPQnnEKYXOQ*iYeVPpKlK0-q*tPa8o_UZ( zxwr5oQkMO-lYw$>`rzJ_jg z6WBrazuv;Xlt*08obNRD?8GO_IOzM?CCYO;@40EW{Oq?68ut(s&lj1=eiHX{#3$>_ z$A?DBep;$s@m=m6%w|n=jQD`FM!So7mO-kP&`zA=9Y9~t#wQ=kdv3L-cI%f`Ey}nz zno0a%W4@|jY*c`kSV!pOY=RY>l!azr1?Rntd6nmx@a$8Ax4>i_5ZEi0IU{f$G5Gl? zKfqq3t;lVx_p9+?`q)Cw{-(;g-p%@TzVFI=V@3= zF|V)6nw0OY#lU5k8jZ1K-xP_<~Q;bf`h*K8h%SmB)`Qz75f9Hd1eanpK(Ug`n&$klXUh1xA>kv zmZPU$#Xh@{mzLaH1@Upt!qg!dTJa-R{ki14Mq*3wOVQ6Atc5>eFHwiTe0Ff&lIO0i z`s~JhFXvA-ujSpv{PSz(4J-O8ezkvd+iU%s8F#DJfqSbVf&B~TkRQ{Y6=j9qboLq# zK|2qA(w;NWvbQt%8Ks(sv$w;ua4XqoXnu#itvMh4ykljV?wut2n8-dIdGg7_9Ewt7 z9|xWkWgUNesv39nwdunUjC0*(UF{L`~i_V0UuumgwOz#CQMF zqwUTFzC-s4x^N8iHT~y_aSALo7P%7H8Xz^1s=gjDsK4(P7 zmE6;ivvV^a8Pg&Ate-PCN#jjr9Jc+hSj!bdZ>Ko$IRO;G^PTSb4yA@eRiQK@i;8Sqv z$dS(Xe^Kv2(G7L{*P=XHtih?aH?!`6O; zd|q@m-!EfclbW{zeThAn*c`c0=w|~q&)M0{XE`T1T<*nVYm(Qfr@aeQ?06}|+*q2| zrsjRE_h;=H;VIkMpXYi219yl|@kLqaP0sUHi=Bc`pw2gB9fD4n7ma@6tYoP?Q&i4c z#q&>+oTu@;Ci@BWe>G>=@Jr2qrEi6g@9gAIk5l#m8NWx!{~l)TAnRt|x$xnfmrt_q zr@Zu!lQwZy&AyiUbEsR!ri?QgpGSy&KV{5{oFZSYjH7n;s~B5(ulvRm`OVy7tK_7I z!haZNjUqYY=Wj1E)XQ@rA{@CtW?wk3Oq-`+1F4^LmWTeXrXGCTX5nFFe79Jf&EkXc z;gv14=@|7?JW|mibxRwH(N%Ceq`lIXO!QVyd!!9k+8}&IJ~=OB$fv8Z(yy#PSwHyh zpPjj_2%lSs?=9f0oILB*7Z0Q!W&F_Y>3;d}CGybIr}cHtVPKiaBnMax5Q8fh;34Mmg`U8A@pCf zMv$|qJT1(?b;ih@#CW^dA$eQwaI#+%>3bG^2f44) zmOe`%7bj;uQpi>K#%2of_b|_%HrCPfQLCQf`3H5Ts$uP|jH$KU|KORXSIQhX{M(`9 z+jt)5+lkhisO5KW9YqeROpC6}m-FL#Sw|N6Q`tY*+1XXv0*#GlesJH*$lIj@JA~bw zMfQVy@*IZj3b~){^G$i!GIn+%>!y5fG5tZ#N%Br!+0zz18#sSC>R}(5y>7}JqMj1n z**qEFFd3QJnaAfmJg0-4k?33UO~#$YO!mIuyCtl=9RYpBd2B(ta3hcCLe3eDQg~L%`=Ld|tTxR`7cDpX7d> z*!vHdXI{nERovnGKY_df>SPQ0@a3FRJ-?KB*^eX9xT@r?0^(2G?yK{Qn)C7_nwd*v ze`_B;*t-Xw{+#ytJ4hWpoVk%0Uvg;w4ZV!9OQ|CcJ9!=c?C-Dg&H0k^O7$?#>#kCR^yktjnc*oh6~> zXO@KBhRm$1MCNqHrsTzf_cJ;;2lE`yI+L}X`M*Ab_rJv-U4<`bKxVP6sGUX4Sv(t! zdw#3%y6(dJ#V=zn_3k_{hB_Ir9mu@gVcO$YrI!EKWIF!5WkK`Brpc zb^MaE!u1al@1oPV_O`35*UI6dZu z-t=VL7dQPXE_T|gI5Yo8wB(ZKv|%MVT2jev+R&0aX2O3a{Aa>{MkxPpEW^Lc{~q|? z1OI#AfA6Ze-N=6&`E3?kv`34I{slbOQb#Uz+)f=Lb8s6vEh*7I)`v%5NjogG<4JvB z$^FPa#u8idLp7izSaxYkSpLWy+=g)c{~4{W43@u<_8j`EtMps$fmCW2M?bpQdh;Lq ztK3Hy$KL$MD3$x|C`a?loU^N3oOJWc166J}V-s~w+x$kINw7bC1W2%&+{H^}-cc|Q>thqeX{N?ZVm%rCvevZnGvcyHZIn!du z@Kb;JpZm){0^FiUdmi+cf7D<8aew)hz*R)Fdzrudlm7D0_{*vBKuFaMwZ@~`>J{}-Qnzi7|z{pJ7YEB^&?sOK#o582P~yu)|Bf3*7z zKc9E~d_M5=`AFs3xcen}5!nafFrH7J|Mcm(8<_SHHwXtp|M{4iXBkkW1=bWW( z9&B{e7K`Tkh8Vo<4-U7?as7t%&$^tsG3%sdI)kz)R zc6`uoa&N?L?VPVDEit|iY*+qtPknn``4eJZQ15x`2Bj)YEi_#hZDT zh37I8yAba?+;2{_4OY$MIGb$=e6m-?vpAb)v$t%!h)?#FZRva_@_7lL$$Vz3m(NT2oY;MbCvaw9+NWJ>db>L+&&-!|e%C>>zJJ6?(Zh1)=muh; z3fAbd9)1{k=aa9NvoWu6mvN0HX}9df`C)-~|>3Ok(if?w-m% zC+GBH6>E1sjYkUJF1BV6JB2SoOa=1Sh4 zy}@?&3Gs!rUAdyU;~>v&ZS>#4+QeRx=eHBwjlc8V!`eVRD|fD<#4k}k^f^Y;xN~(A zc_ZJQs}f?$p5ouY`6#nqBo-FENNgX_i^TW=y;%Huncjn5a)b187vCFt*^>y4L*@KT zJNzqH54T`Psi(Qu!+4ZDs=%tTN!D7Pdi*_ow>OLD#AeA_h2O-Q#@VPjnwlKV9&QZu z$H;xHhex^OF?q&q8}=`7a$Dq%qnT@zXZ=O$!8GjiFTlIB(Um(HV=)&wa(LeDmG4ro z4o=Rmh#iQHifsqJPs0CYosQ)GwIer^_bBF$O#T<6LbFzGKyDcWV*gP-xyhaT&QF^C zw~VL%CKnM8t*m|C|54z-@nmfv&x4U?iNt%hwab|Coi7x4z&G~9FAIB+zLNTP`uVZ; z4&m1TzwO+i?un=Rzj&qc%(&j(f&ZZj#!qM6cbqc@7M^csl`{s+#h2rs-sO3*g`730 zP%*~;$F1bEhBF5JwXBst&_=DKUs%sZden~`U+JiGj%%U|eqO*EHIHD>8_#L?6Mq^g#an8v04ry1C3jqF2_sd)2Ofu6>-1 z$pIJGM>p2Mb1!%827VQA$KK)Z_b?aQF9U}TJUnNP^GNw_g&hU%mIM!@hT+b?YC-=S zC(ob|et|L3l3>Di^Ye1jQ(G8At$x9>zKzSxZaA{3u# z#{Vl6Utz|79*S=;`x55;H3n)G`t6klP+9}UGf3^bK*GvVV+_|HuE!6y8t zq4;Vu{*zFAg_-}6P<*u+e>fE1kY=j?<4}B?x&IG^;)~7r)=+$#S^k5e_=^6f`u`b< zZ!^pPk5GJZw5j}oP<(|M|MyUQwYh&k3dLud%l~gEzAe&J|A(P?tJQ@6TPVK5jQ=1M zUv0*}ABt}<?PLh+eq`TrVc#sex#}V+oAXdbNQA~e2W?XKcV8AQ$55>3fpTi;X&1<3f zV*a-&1ivK|-(bc!h2pJd{k|HCZ!qik_n~;J8UKn2Z`RM|P<(?K|GQ9piy8l)q4+kN zsr`+i_y)86Z3@K~oAED);xo;7Pbj{{T)*3dH2c{lkV(e1o}seJH-g?BCai;>lTs+V^ivcysyx2*p>M z%dZQ?SD5j?4#iu|<9BT+KGSTU&xPWxX8zBH;@iyqw#%=ll1;;rWSvo;i; zX~x%t;)~7k&B{=Gra8V{5sGh!G3mcL6mK=J51t6cSD5jShvHkz{2vR&x0&%(q45UY1yXcS3pM`t|jp<@+WkjQ_5o<)Os#hN0z-NeT7;erUNbvHWX8 z%kvV;?;KiQmYvZ4s-fl0iS<_wEpJaOuOC{zFR{FCXn82Hymn}LS>pc2H?-W5SpL#PX`4 z<@*xb|GS~(d5QJEG_<@uvHXie%l9Rg-!`;7lvsZ2(DJgx@-GZ6Z%)MDEkn!m66N#F zL(6@M?SFn~xg$}2-IP$CXdkH@TD~u_e0f57qWoAkv^+0SelHzbUY1yX58Sbp8m@_mWpzjkPOdt&)DL(9t&_s0u|mirRR7Yr?LPAo4STJA{1U&+w&yu|Y2 zq2>D$`RB7k%R`Cf^M{rmOMn~$D+K$KZ%ZKE9c?yXNo$vc?|fJhbYmvBy$}-(4Sx z*Evd_#PE7Mag+{Xd7Q-bq!35xB91Z@|B#5ki_TI*!0{4o)pCC5GE07%v4&W2e1BbL z<4;G8DEX;;qR#-|N1Qc__7v|hgLCXO@~a-3M$Qm>elT=}*%2fM=TXMif$zWa?{OET zx1L-Qb+xw_%4fOiv4~&A0(=AXOuj#vri~o?ne}~Fnnfx2%hm9{&QN{e>?mXDyRx)M z{iv<-xyEee7dp-dpC&IkiPBs4%<=czv%GouPJD=O1vy~S^P!!TJ@{XQ@n7pO8lHg$ z%4WeEYa{stMWb%wFuQr~-Wl=P57GZj>M57Ie#`F?B$H-2`+gt@(= zXhU&R?k@bb;F*d1R_u|=DmdR3JVp7g$-uF+%40OF2NsVx`GD_0ZQ&f?R-39@`f-^2 zvBbDB4%JJc-T?7Di>ar)rOwM-lUGpJnNZic&0Ojo(K1djwE%ZBJ`C4GtDS?cfZs6E zp1~)i-kIvJHr;Rd4%}1N0bWkhZW=JQGe5QUzQW>dWUI?zg;S60{G+v*(UZ{j`wE~>pXMt9`F<6h!OfE1E_wK_v7reV#@^QF#JKYz&YvlkL?%h|iFL-y*({Rc1Uc0jb z-~5I;?paF>m`fLX_HXf|`?u&WVvQO3TQwW6lr|rwJY@Hh58tt!xhg85O~sZ~;ES}R zwzjaM)+0Z^z>?a$LI-oN7|?0-a|$@+|M(H{Pd|7Q8AUGBZIi%5vN5xU?@)0hm&`=Q z-%cLDdG-nQtKc#6-Zq4Oo%GY*y>?_$^Om+`a_!i&2K@BfHjTV-y#HNt^2j$$G6erd z@8`*H`+fVw3bn<(Jq_fOtyEbgzGH&-2FjJw=p$m3)US^pS=r26g zXgKxL@5c2t-g`A|owkjy2$KUSRpTU=f@GNF*T3L9+zf5U=lX3CZKo3d2yHu|?G|X; zVG`#=41{OZNX0)MpUXIRf`8$x9beke)5dDW8xGr6$^~t8sa|-IS%i!)dOBL~81X`h za2z9^G#(Boa5zSO+v+a_4dC~-qmOTkj^+~shkJP+el<8dR#rh1`S>vrD`(MB-YDoC zIywpsrp{{3f<9bj7OkWNmG8?tL;&`5pR@pIAo@N6~i`Il{ot;6iXwOYBV^WAO3qq^y=$7<4f! z_oQI;k$Z8UwQk=!z&QK2L1$<0yPh?932So&Yjinl^bx7vv&h>uy?QJ8~hqNq*rlH(lv9?b#mW#2G)&gTDG1~&w8`7=ScIsp7nP0FI#75 zg|T`Av0#U(`=ZK;4Okl2&oAQo>7k#Q^h13AoHw(L2IaDZZv)CL;khS!dGFIkQRp^% z#|z9=Pld5Mw8;o8rtI)S`O~?*q05pw#`NGHm)t!+G>x1b$gV!_Ix+ILjh1g+=&}(V zeYcHNKJ>FEuQmpk{E0DIKAhY)@|)z2vgrTNB4f~km$n7(=g|kHzs3B$ACH4sxXB>~ z62C!j56_iXEB5~n{I~o*(MxC2b{@3o=$&H0-1D^E$~)Klo1_kJtn6D?TCw}R1<=~l zBRT{_2{5eVGbaItZzRAFvUnqsSK5T1kk#xRZh#i9hd-8doW2eHc5j3)W*dduuPwr!gL|e6``z z;6mGG(>x|@VjO0N%ynaMy7UMp={@PCEOIEJ0 z(9=m44tf@nYahI7pA&vMsi=38aFI?M$(`?!zxyrTg9aFPMsNb-{&&V5nXB6@IcAM} zZ^nh=j^+a1Pj0h|j(dIjki63JX&qQ$T5xH6;T5(HYi?GUMcu?wDCXkZ?5C>H6GfM; z{I+x<)>{WL?ed{5##gtGJxnt)pv=krs{AI-F~du|YY+2ftsQGOWQ`}so#s;azpR7j zLX8 zQH%Ft{FM)XO-t%H3{8r^7V!B}B*(oKPtx`u=VhN{67X30LEoay9^O?BOSQj({p1C6 zxqcWLj?U%hnajjIyJ8q5Th>`~k`cU&IbqKltYe)dHfOLbHZS0*jUKU}!B4UK_tg0q-^Z}c7{ zc*&5y^r*39Uf?y_C}6%4^AyX@@#w#YA0OA02LFu4L$1y*B zCUS=nxSctTkGB$C>uy5bizk_10?x(rm$LV=au@HK9GAOzyp_B7BydVMPlYCaJK7xR z#?M)EGMU)qJLz})r8fMj&)@f?yN+g;kvCEL$q)B(k9^a7*;wLjB$hw<2z=Gi5y+3) zB|B3Od>LC2h2yT zt)D@QJvXmiHjmhQ=l#mrf_`&+%|_9z;$@^;r8tZ(@16+(*<`S14LTjhAUT!T>)Sfp z7IS~WT6<^Hq^9{zrrFtK8l4+1HwLSUjKPw%w$75ZCLg1-~h=-!4w-3O8JTQFzy;>C)ckzu0nPL<*blx!-4&>jd4207+rqm(veRtXV%&XKV^_#p!f~wrQv$) z%*35kw9)6+U(E4a`*+3}KSt$PkjzP3lN@Q>Z)%@7H^YjPA#XJJW#4AObT@0z95Z7G zOg>^kcLS4eG%z7Id|q;*kjpu`CY^_gvYo)R8<;l7U}9V`9*@OG`Isx@;aK5K^fv(9 ziVHkVo`S^KK-ppUvmS+_>*!(fg)44FISF#Hg`(@vK1_ZU?O9%&D}8=kaOIm3o4rB) z7IY$S5^IRoV8dbTxCdG;i^@vbV#Vjm3DE=W)^p@!?3rYw^$Zyqdv;z%a_*T*zyY=F^Ri%SIcmw^a zsNGDPTaEmbz9Jc(hfXZHc^LY*>v!knW)JiwT2cP#ec+~-{kitchk;k)kJqtve^)GT zWesgiK=z8q1aloaug)ZN7I7K*j4zbCx(6^4%exi0BJ(}YYXh!$nO+eGTlY|z9*oKK z|J&TF=mW3)cXF@hvnTNYR|#t?bCMC$G1QLqPosTWAou8%0b<70feA4 z@k=xIFUJhUl^H?j+R~yH&Xt*J|0_8*U zk@I|ClCjeRJRiS-&dRy~Y+V`fg7)9!{~p|I#Zvy)c(ES&a4}wd1)53ZMddsaJnApT zdxEK5`Um_RlP#N{YP;39prpuW6wcK>IgeCC#%RDRJ-l1{IyuM;XTRjuL(wwEK3M%( za;q~w;UlvlA~X7-$q3J8dS!!9uK%|NBK#>k#LvNlBdQOGc0||8mt?IU4df)zTy^c8 zum9ESn&vscJ@~b7p9JpX>A%#Xf8D`#{!A^F!~aXb+4xyw@IS!8%D-Nyr^n`2?X&J% z`PcU$BQ+;GhM5zCz9h4?28a&+FdZC$+vbnm1v*Rkugc6(=x2%ZWYG%ajUVfN+Lx_j zI9VD$pR!3?Hm;k;VjDPb=Xz@l_r*=%zBu=V_r=BHjdfQ`4(nVf8tbhXZ`rpxrd`EZ zmMsuuAK=(S9Q3pz%hna!+sMu({KdojUEr0?OZG0|N3ugUuYP2<=2G%H{`~;&w_7$2 z-9h8*UwdvYD;dY~jx#qeJ7*7z^6;Y;4|mTC^s-+&O>S4t_b$jK6ysy%K^E`I))8V4 zn+Wep`qy4l=ZO(`73XVo?!E}TU*>&__O!>9?1h#i^K&imUV}~Md_JGofUR_RIrOYG zhSqU`U5p`KFD)fMWS%8&tUOK@uJ&ndofF5rJL{FE`Y(*1yb{+Ps{XL!-4` zWRmHu$G)n33=tbHxmxScVrWP@g98{%V7NzouCd3BTlVTY=>yPsko~ziV&Dbo^V(x< zwSTsAyZw^FeAYkqEyrsH7oGKFwq^BUW7S@UJ<68t3vCCnO?G*(H|)lKy_GR;T|cjL ztNn?2eeg{GLSwKG8|-1XaaQN=vUT@7J@gFz1av1`5j0ffv-tV%EPmbw-JHGeChXdk z=f-5n4#b`t8$Nq|@`;l}d%={si&fA+qmz)jeUlFOV)w`lzDcgQ32VQzMfXyFL4r+;#|P4KIpBO>dA%EeZFHavSAu)M+Zox1xQ+0#6=WU$HF zZ&>!obNdb1G_~)MywZ4_4*Uc-3tUW|KJssSH7TvJL48v|y;QQA3x9@2r=D~b zC(o4gLizl)FZ^gs*1(5fH91?ev0mAcIZ5zY#Q!FbyO{P(Mt4T{jji)O?u4iXFN;}s zw8s1i`qdg!K_4+)R{Oex^ebJc1sa%pQ%Oe=x^Z5rqhQ}?qadrKdD&yw=adVnn)2N* z7WD6$^?25VM%g$c_nQBOeTmkhTFP1LDxe+zY4$7Hf9gDuc^d4+SK|$v)4$W^JiuJ{ zH)H!O8ILZ-9T;=i4;3N9e@i}#x#R@>=2NAfDtlgC4Roh^yKT;XC%?6hNx!XSe@l+Y z10ix3YJR+{-B;d%40M|l@e4@tVpFJea7T;Dvv= zbUS>q`^BmKG#A2-pia*50D=qBhwK13ItAMTdC2buB_ zd7DL>iStwnUr0vE&ix8G4jsn$3i;K(dh~_NioEJ_-gSUhKGH7Nua^eMYRBAuc zi>wgcOf+pSH@*tk(<|fY>c%0so?zh`hquR1;}6kIzUN!1llx)B58`>A=kQEpdl}q@ zZMOQCqH`FVyLm(AU;=c?{6%S0ZS6T7$+;fGZw#;J<^>y$_$1yI5|vZ%=y^SIplShU z-=!TNC0{p?XhRl%O0R9VO)S`F&zYNNOPgE!Y4V(+Bd|x=;j{K?j{a)o$rNZ;Wvu_! zzO)WJwj^mU=uGh;E4lwrc~IfGODwyL@Z7GF)q7+dTtX*kK5GV9&!40zSI0r?LczCW4P zxvO}~_rJ`JNj#IUUIzRi`Tj@jpIgCI_3LIw27Z&(ya%7mR)4HTy3?Rx^T^Fvnk$Ey z!99M_cKKgR+macQOAgZ=!CTNojz;brI=N8ivXMK7WaCP_mqhx8#?y|zrn7}}`UidX zFc$4Cwt?fsJ4pV8Gk4u*lH0Ul+3`1qo(05c!tDS~Gp!(G%PDy4_46`1GLEcSrNRyL zV2D<<$Bo<92f&HSSX&2ucd;LbHbUAL&y}oVjv4Qj)Yo1_^Zd?-+%u{?#^6MDW5?<# zV;i3`Jq|P5!inUdFM1F0KVoyyd$NIc!&8QAWoG&;qi2R?k6uk%D)%4{QdXypEjIJ& z>cDvocu-$2Q(mL?rbg_Lk_9!u6SH4%UtbqFZF$%26&*G)mkx9T(YN@^fsUX&c+j*p zm!jWzIu!oYxB3M~l_$wltG2{@@q4uR{aJfdSE`NQPkEsa*-<1DqHCuox^|8M2U~&b z{tv!?O$X1m&#)M*I%=@6Mm#uWCEMd zZgidCt{lCy*e)H`nIb<>llu~s%W(gQfr70gOFH}P69YE(Ji+yu1D39}I(zI}>(|bI z%g^T_KAZS_o6i$`9$z~@e6@YBZ~dgA_RFSvdXh(W`p`3blCPs3N93H{K5jtrd#gR^ z;9K@&&itkXgZ4CXzRm3D*qgR%_xiEB+U=PGJ;|dxcQB{2H*Yn7h4-g|Tgi@{wzgZ_ z?Mn-Ng)`?y#vieJ?`Aw5@1k^}vWHB^DFk!b`z$KG%WzB=XjCF|VW_plj|&+TY*zxQ%I+ynpY zSe^UV1<3u&nYqq?!oQ}U^=8-6%h=alZ7}Z9ouQkJ!86$5k>$Y_d|@?b#Zmuc2ehVV zE%+xpI5XJ5opI&nl>TdGn3V@?lPaqAk{agVb0c_bKIM0`7ei0j4TTOF1ct5eJN-BmI)<=AJ`{&Pl9*lod1Ax z!ZYXxp(qD8>0O>>34(G zZ%X~kQ_V_0{npd(4Ek*}Q|o8&Ozqt*zd*G`pHI{0De^3!F==@oaOHr*$e3vxKkF{~Znl$G5}XukoSd)pF+M-z(|Vf1`239T z1Msrj=BjUyqK!O!N3+bz=IoNf$H)b3wG$nqd{9Cst}D`5eD6bF z^67*Yn$W#w+KgGtRZhFgg_#1ZF3OAX!Ir=IOxvVc71sA9vljF1118nIF1`-(--SP% ze7mQZ-{0ahH)N%o>0S=zsh=_HYU_K#tRm}s@~i^BH71Rv7aY3-$WGb8$W1>Kx7=^G zH-g(!;535Yl+Hgy@|Vuc$uuifUwDr3;&EuhBm3ULIsWgHt22YXpw}J!H{&CDFyC+K zQdg7=>OXtMu6_89^zq+`?>D+z!(!9udIep&?@IhfSi5;1;F<0cNbhIP#*^n;c%1u} z@LXtp8GG#doG~VJG`?b{Eqj*Ayx=vSc0@PrZzs^rUi-PRMaGrfd6F^ykg-MRCi4Qi zk^HE6D_mI*&4hsYV(^c`uU7DD{PANr2wd&9I2wbOVl*bYkI`70=*wYNw!%Y0=qnle za+MTqhQ7YfJyJ0m6Ypxz-8%-~RB*=H??+xwP@A7PzkB8X6`5a+J%R^k=N+;69XZ_m zW_nx5x0iQtw*N7g5tu^19`wsTYks|c*LKDST=3@@{G#!V9yPjY*}L{Dyr*7Z zKLVfbG+VRe*M4fv4KDK45B9M>pwnUFO7|vlm-TVvmF6&H;Wy8}G<#x!r_|VSnsUK0 z)&{=mTfc8|Mf^5o`;;t=Jr}PFHo?0O_$NKrlBGV>MnCsB9%l`T?;~JLEZ83{*WDNU znQy%-|5gi^mqdNqOQSq{iP3dzCu;<>r}tt!i|&1PH-7$Q^l4-^XTA0Hk%1#eMh4zq zI1*iBlDFrOarWorJIwhj=Q3@jUA>1NT&B5^oR&RaV`g1W@0pg;A^jMB9~){jyCiEy zFz$=NGU})2VR6joT<6&R8=dGCk|T!MJwFDM2~0iffeEE?|EOZR`qs+<1GEd_bMGj_DBxqp*``{Cc@PD!ph`EmTHNc&0>1DQo7d*__XBgur@z(V} zx{$Zv&q1}}_&EGC?>|f&>&9OKUGG2#>(Rfv@M*8zARb>H#eWUY_7%` zK8^m95y9Uxl za1Nil8lyPQV@-_Vcp-i7pMIW)i|EIqCFDhnPKKk8Kv$%XgFnBe!1v0z_I9n0(FgYJ zSRb-^_i**7O=KW8a>s8;q_*{!t{^@Vj zANBa~^<^U#y=?v)^*bLMzP{=%rvCtYKJCTy`$J@?c&zbnw7H7gZWGrAtuLYh?zb+I z9ru%Pa%>p-=llvg(8R!7>}j5#6450-1jq7qiRnny2aHPH5fx8+AF#HG=Dq+e^s=6@ zNAKE?Zq(0jFS=0`vb#>Y(LWiL{pc38@EOkoJhRq(wTFB^J{}+B%q00pj}I>pPq%rAbZ%I82dBk!gFE%zj5ct zz)feu1>3OYs{g0S6R-Xy(=|TvnZ_8yLpEa-JQiPHL>IjN$qQi;k7#}+Yn-gB;vLaJ zFZ13B4~YiUPfY&AAe+z?ik-xG@m%G(zu ze9axiA32D1+W)b;usIyI>QAE{dzj;TH`bow>w4d@{j}qI-}|d`&Hg7W-^jBI4|%tx zOfDZWWtRJ4^Y}}&5#kJRA37WU2I>3wFQ1BDx7G$@Fon3W6sK{eL-kHZ`kTvsQSkf@ z8n4`x zql?p7qdC|3(I>Qf`NpB`4!7h^qy{=yT@t8UyJ5@rwXIu)~BlCU?S) zGG^wB-t5l$e#TSA8B2=I7@M`GJwz?ZkApk; zTAuS~X+M4PNODo{JIO_YF`xI3O^(>o`d)5b_R`DG+?BG%`^1v7jfcw5-o8n3rsZa; z3z}>w+vb`qT5ONR*c}XUF0>N*{NOV57@or$$5r-A^z_g;^jw~ww5NH=uYEu;+4xq> zgh{^bPjgjZHj;?ZJBryyX9~ElV=3FL_HAHX&;b{t!IgYPww5 zg}Nkr)qjU=@+{9K)a6`oEqMBX{+-Z%Z+~);U~V376!u~-sv<7b&z-HkziZ18%iR+5`ez&M7)6A-O?Aekfz1YDSjNoxPjdUkxtj*ZUKd?FK zdfSYyt8SzcCcXF)#jB#nV-KpwhTw1^ zH;?FU@^Qi6;~N&UOLc$Wm;;`km7ORi4@1C|=Q8GehOvjB?bF1$owgIF3LQ*h&Wjmi z1$*N%>J={n=EHdbhhTt?K1H30oC|U`xYR0Nc)lE8fyx-%qXjFva3ZYv%y%>UxEQR@ z#>2Y#LRkO8en_y(#&(3?>f3@JJ9(SU)=&+c-_kgu`0{d|W#P$8@hA4}$M!I2pX~MJ zpYQ+Cxc;j*Mf(4U^Y?gsIcYxzgZeJx?Co$A7PY7MPjfGn=C16o+~*+teiWW9*%>;N zToi+??@+Qem*ug!B<3d(#(r=NUeDpj7Z2Yf7ve`aD51_Z(RtDTZs@lp4rV=D3yo;5 zd)x3`WRBbLMec`p+pZ%fo4M_yj^gr!-^~k*rELo$&oBQk%qc!ZL+14AIGDSeFN8T1 zH>bMOE{2~CtmhY-J69$&e#v>fOpWVf>$fiKV>jbFFwUslJn`5&UV+QqkUD7hZPaUXQi4_&kn^CADa@zaeC)>eNVv_Wo{b@Z?J^4)tFQ1Krq0*jlP9_RpmRw}d6O)UodrmI$Tyils$D>EE zH=?{EO2dLv=QNUwEpcnzNqqiebYxUt2&3di3@4I{vG)D&31^h-zg*-gi110Q4;S+k z-^U-~`sn$h!QDa;I+c&L#-*{x`moM%fjfrx4*EHYj+1&Zyt791_Jw!G=J|JdX$95awPtJFc&l%^kS;GKi$7_t*$#c>VdIGL z<3n+Mlq|cjk34WH+Lmq<8-EDh5Whfgi2INwcVaU3Z|QRV^v#*jxxP7vI!80rH(c1a zWNs*JVD1%B81&!A|NJ;uG#4>=-gwdD&$IszV2b0lg>f)-FTN0_FVnYtnN~z$!lrr- zFMk*JTd;BiQ@vvYI^jK*T9Gum&FNE`dLIVe&(e@X^JO7O}O>jSMq!nl_ z5xkElr_Gg~oxypug=6$?^!aBc`{MUeqKDY{ta9|xSotyV9g`QIw2xvRa*huFE^eGT zm!H=e{wL$Bm;8%sdl79**NM${HFl31k@Z@;HP-$3Ex6g^mO)oKPdPe0LRXW_NiJtx zJ984+>A5U2A8JQ?_#LN*()SAqJg}Gleeei6oc7%QKO~gDm+(IgK8W#{Y*6C6>S3OJ zooANqANrQv-(hyI{u=XR+414^$g`5wk4K+rZHPVF7JXKR%#J7dl+ z@IRRFU%AfWVe3x#{}TV@Z`2&;FK69z%QM+|Cw7@Rl6QLFhhHOd^R7Aewp(>}IAT8WE!Y!zXNTLxZ_W{W_$~h<`M)=@ z?i~JR$>63x(;hxZ>~{w{?2%kwhmqNeN5-d+JUJKoJ)im7udH|l^{X=!!P>)k%ZW7_ zDxeR?4LBXW2q8(nYUr?Ky^RfYTJkSh~kxV6Ls$S-_9d-go+t}!39 zeNzqPT1(5n&v$RR)8#LT`kl%D>zI8~a1i@uKlS8)CO@!7?DfwSbDyfyW%+@nV%w4* zSgM}kzoPc*@wJ-4rw!Y+d{~O{Lpou#H7b~kA6m6|mUATeMHP3D%L^X$^1kw4o+3`( z@lm+af|F;KKN+x#eq(h~;{2kt2e{8ZVU}p=KKmuJ_F3NXdE#g zun>A!A2xRRfGKZ7%s=N0Fg4<%^Gu;pxiV$k5FedXo;ABm3a`Yr9`nzUpIgL-JTjJe zAFo<`K(00?d&isK*`i#1_u0qK60fR1_0yY-FEMygj9DW-eST<-v$ufaj%O<#4L`U> z+r;`7=Jnneznk?P`33F2vQWNbRh%uvzmJ}1ybIz7_(?WV{Wsj3>m{|;*~V!HpZYa ze;4?j`{xzDkzxdv18WuUyTo%*9Gzl5PbjB?>RG?RDfNYea&X{};vnC`0cR7uTS_}2 zV31CJHSz87IQ$*$sIKtV4nO@KnB&WT6IZ^E@4?WEi1Th^o<3-Ps=N~dR$JyZ5j9k#9$qN zO967DQt~kNEWX|LxOVrUH(I_G)WtX7djB?jQ8ecGI#0yaX$F=y-c5XGW86DF{FlFT zk^Y}dPVjQF;2d7=OTA^t9DXGZj&{aWcM*8M824TX_$+x8#dSQ6mc_l7$9tc@ zNPA^*@A-Id$wl7#Y}|Xzytm*Y?~(WB94-35N#RA_^Txeb#(ST>$a^_)?>Tty(u=$| zKJL8`_|sZE+}x$cy_d&(I_HkZTT)zk8S|^XX?*z^Vn$-TmPfhxRy>;*<<+I^^W4+0 zVZrYXD~9dYR0? z%f~wv+;={g5s)nXaduPJoy3U#nD~U>pr`$WHeN+%`!@gIt(`GY&-owz)%~s5+^W&d z-UZfIYwHW&Y%m(~SeM3p98P<-_RhlhoN4|Z*3QvpQ`e8rhUd~&dKlaM7<@Zs5Oa0~ zZN6)c_V*%#tatDMiGODlV@-geD+keI$yLtl%~Z^ABY{pd^5|JbAU*GP8bLwxqWMd&!!q3c|W&T|dA z&qDH}F6clP3Mf{EwPN#ghpE4w80z)PzZ8j~4#ma1C&s&XO#3Xc?(>yHvvb?}2Rf~s zE1dhA_^%k1se#*sgA%su8WF-CO7gQv$Sr_a#X z(pXLgE3ONjYY*`f=wjaDGiJjxjBN+DT=|PCF6s#QAohlsKzHy&qtP)VBXW(I;H#@=Iq~S!`2l{R4sw4g{~$D6Nq%(2Tg!JoxRdrLz(!$PW`BtOLU#G3 zM(R1!iN~!#eL)=YO@*j$nDB|`K^2l-vmEv8T)Yje>rz%D|a)oiNRdp2#++LdjbA6 z&}DA7HQ}!{f!Ou&!SI3b<1WUaa^YYbG#V=(plmDuYk2>AZ-pOM{dWG>@?SP zu7lbMC4-JBTW@ffUwQI`J;~DPPeVueUU{dStv=Lyg)#VjXbZbt5I?sr+4DTetcimq z8*BORuz&T*DTCGcea-W_J*jtX>Z3ey;^8Og2{V91?9zn$pI&!PC zOBj34LNjo9xfw7nX}{sfNbX3$219<6p!1`<5ADZJ?fCee*WYE#j^C6#pL2H1(#tjX zACu4K{!0Qj_+Riy=F|S8>Pzu&Q~9qP8STl$-!VShTt?RxW5_@1o)PL_KT zek|$6(o;Vo#xm1r$lueP)yVe@`s(Lw^%(oUcggAYzMEVC;1L|W@9505;)+sOvtn^Y zf=zcXgccgars~d(D7^f4Am0_c79y_e66Uj;*bzVL!BO6ae;Q8ysx9j%?`iLQl6Ms2 zK92V|m)gl5(|T{QeS)i)v8tar*o6#y=~btf_gT+3lMj5y?yDHn<84{{ZHD4l_)NKE3*&dLqdHr#Xde2&C-d%|1`Q@Xx*~fTZLY|>Bis4i z@ay%^IQuUDrOz}^1>P-JaVn3l>d)mtE?L1#G@yprMslTG|j_lmRFK2u3 zr;=}HPR@$#{rFWSVVjanxu&otyNPE>*pKChw}Iz^C7-tLPH}xs zTGo{DhIq}$uswQLc5dG7**Q6PWOs6|FmRb{H_E5#4t|J@t(bS7a@>)9DedLc7VA_{ zV^^E`%Fhqx1>vk_E&-1U=Yv^Y9H3-Xm+K zQHXpnJ*{v)QfC*iFX{Yq^0LDAK2I{`iKp$f_JzpAZr9UczV#Is(%~aK`d3* zpu42&fGx%KAGS#@?FDVnXBoJ%fh)l>RWhyU_H67E)_j+NE6sP)M58hY4X))*fnL_b z8tTT{lb+U(@1FWRJdJZp{OpSDoc;4%WlpLuf9`uaW2h@jZ_7HByy4bj@+%OF*~OaC zrSq&1V<}_3kuLao;5A+g?>G!Br>03i#P=E7MD=gNfaouYU0I1}sV*$mTQ{Y*(&ZKissAgfP}U@gpO zvhr#>91g#cS<;(hbOcXa*0muPWl4V<$TI zZrh}O>1Wc*4)DB(_xJO?n7Vqu8CvVNLucv6*c2E3>z2v6lIFc%{V_+}-LY9?*E|8kPCNTG{m->e zvEJKm!Q~9zWW8qut9OTYgph~Ivt0&W&(OwU#@R!h;vaY?CuN|3*e@@8O)Ee5N#;}c zd$hu5?*W$+Srg+~%QyOLRiXRO$Pi?ya%j}R(+>E^!$&*@T@^X`SnDZrVn4i}hd(uY ziwxPQ@Z%3Y0IpbP(%-ZhMbcx4)!gw7bU)3_Z_b7%#Ndg+(ux1|Z;|DkQFQe|XY&2c zwL7fw`tf5D{ZG<((S1)bzCq>y-`<@+r!B$GekypFGWFdug7@erv)f3YLtb*nWPCTU zQ#&SL6NF~#>1!Wwy3Hx|DV!;lqIapDauGfW+yfRpxPuzYy=TKapQewupOCtxrtdH26 z8PA1xYs#nC~iP=ewuI1 zA#||02mL6bqhn{TO6vFm_|qK_k#{qLEBW8l3GIL@^>-2&$0N^)sr8D6CZYGqMxpWC z!gz#V^(WkSZ&#mF=+6vZey*=6RxYcP(~QB-p6g3{1p24#8+kSeF4ULyZ^Ca8xcg=Y zFc8Ck2AsmHv*$6!5%eowILuh{sdonbT{N!oWWxJ@0ay53$yTL1LmEopiFv8WeCWsH z8YoCCBgfS2HuRnpWW{g)>Mr~O@2dUSe6bJTob$u*4*Cg-)XjL$19FmtB%g*R~;w$Xh z7GCREtI*LicK;=EZ9{CkhSOY(Z%vO$LB(Ur*=3Tkz7PNXL2!~NIsEqLFIouJ4bmO&*%Tbt}=@+99FIt_k80n=bE(TDadK*C9;Rtu9~vE&YW_kYmH;M{E3w-&c|~vd1_p1 zl9t0?ca=w+58dYY`uZ<9pZF$rjfzRcuCPvj6aNnK z>JSg)uJNZbunHS&qCgK_+YfR08^J`4&ugTNGyPB^9@Pgtm zHq9|M%Lc97(&PA^!#wS@<|#Ay^G_$vQ|1NpL_Ef3*4y-!l=1!(%wqwt5`O}Yl$ZK^ z?!B0;$9aF+a>u;1<AD75I$z3k>E!c2OhANY{5f{YH7xqWN`< z6JC5RjYM+p&=kSz8tMF0_`R6=m7jE*9gQXZgY6g!jPjS8oA8B1P zI0w8IoC^Qd;t|=B0*lUKJ3JfXu^>1TkKIjQqM7DbZJr44WLUf-SUIJ1I)$a?^ryn?^y`}i)H@_ke8+x!4nNA$-amC$D13!6d8aHl0-rocd>HffS$_~>u@hAOT z)hF_te3bP&;iv69UkQ)b(N73jdtfZ<7f%ojG!R~K0cD|Qu9DjAK5!m{z;?7RK zwKv5-#==KMbS&E$7d|1v&r+>3o1mq0bQfLI{&c0Wc{F2-U(1|P`Z}+3T)3891D%Qf z;_p)!s4DG<(NYR@;$htmG500NU+qgIt0hOhjLm7LkRQwRmm{z8U#Z%)g!zzv&mP{> zyp|)g%7Fcx-hi#xnA3c#+DObX9Fg-)U77S_LR*gNPwrzI_>b=+CVLLdh{&FCg9`^w zAP;IKdj{=;H-jIC?A7!yc{-bQq#6Dl&zd5=<#Xh^6;B{ncCUe1|Y*UQjgGoJ&@ zt@6?J@mUESYOWt4H|y8P%ke7u3o`z|OJ>S}V6A7D2O78&yX%|GWBJx9~R)Hd3mMtNFRN!N4cX#a1lHb(njZ7>3AXEeI(OSJXrI?t|m z?PG%LX|tC1J?0p{>=f2-+7%Dc|3U9Bt9FTA@N=5&JGWj8&9$s`eq5X!rj5utPv5b1 zzII*QIuGAfKxe{H)7fxD&VFyv+3Gks{46v_4#L4rXQJh=P>vk#K^DnA=44J&(SPz; zbNYyxFN0=AqrZ-tpSJu>_;ghI!&&d58$F9|^c(b_m(h)WjBey+zB6v7RbNVY;Ezn`z+NxdJ+v(YUz4LI@F2U~Sp5LwfAC3I+p#wRn_ssNt z*g^GPs#o%%oOM;PJ(5>?e;f1G3$3|$1}%BFQ@4ydqfD1|o}*{}o3Kgoyqx~8Cx2t! z^we3s_EBEvn(F6su;q()LSrsDb23?ZM3slr#yy!vKxA1FZ{pK^A604 z=y^@#pczij`wa6hJjM~eBCCl|khE`Rp!Lo>|OS&bfY01e)Ks0AH21^uJSY1}3J z^|8Ns$>HG4#kO2=f92qYb>prkcqm=9;Qvgc`K zPqPGjBl17J&z|aa5yLEB#tm8AV{npcOKQ?h}zMOZsyqQtj(B6b|sO9oOk)EwR znU~-4Lvca7NA0eP8M!ahUy22zXZnGx7JNI3;Uk^bct*5jsh;e)71*F#7>AzMVW(|D zChSA6D0?MsX$N+JcGku`*3;>{U$#GO>4uj)emyJ4j-6LKdAXA@FJbHz@TvA4S*Tbip=OvWsSw8sCISlt>Qy6Z<0Ol{M~Q9H2zr(8?-HXBSv_Yta4=r{SYl&c zXu+cQ@N3*De;(({RlBr@a^&vMa^{$x-zlD`>gSx9o1C2^3+}Vy56syYYq#_~*2w7I z+3hU@FPW?h1<2v{2Oiiop6>y2(C_2p#QvYc`Yrh`-0O_5(z!pYUHc%`DeY_e@T=}e zS3Zhv`w05U*LdHZWB=`|b5fpoh5gLCrJp|Vs_8t?Mqcs#tXFqlYwX&O%+p?`mbiTF zVc6&Q_cBkh{Y~td_BlT0B(}fdp0~%F?3{aB{knrXGw8prWY-MZsxV#tI{NdM<{zjx z9S6d;iTytM%cno?N~!d{G{^6JxOA6t*8KPS9eTG;EPsAz%;YlzRVFyLfn)G{A9>{M zWePx0|y|2l9Es-VIrzah>I0pY{swPqB~D zIgx?f5An>&eb(HMciaVR-B%d_Taj^4y8ml?!;kNqi~k($>5Qm&l5u<=dW!Z6S`R8X zd#6rO*(KK;@?e8qLOz|mm^%Km*?#v*Ugvi-+29+>W%pa-Gvoj%2-~$YT|Oo zB}vP@+uVg4R^4)_oiXJdb{FR58VB>Aa~FC8?n2R&+V`MSRXkjMNPSfN;EqE}&famz zMf(;1y7G{V_WYOMzp-`xl0)^hr<_x%^yi|j^2?WOlY-vEws1gWC`9 z$T>|9IAG1Oay@u?zbcorN9@1(*lHW+^PKfgc;(rSm%v%Y@v1@(|EqvQfWiNWMI$AIK@PUZcDHV-Thke0C>+Gk0=^|PS^XY#BysQ0%=sJaYE9XA; z@(+^>oIKO|B;M58WWCF}(qUuYkZz=RKs%BxF}da@myzabZ-oDgTIy;GozZ9OEO|VB zSpJYa=1%m3163*TpbH+vN9sQA2*)RAtNr`)UReLdffyfZ9~=6(@sJNbL`Qkle0cAC z(Q{97)BGbkM_?=$=gpz_vFjf82e5JXa38mcJ~f-O8+5EW`mZ}<+E3h8=$K@TZ9g%& zPt z1?xD~BNmZo;=_2JZe~0e@buRsJe?ZH)7yvP>HnXf|GWHrAy0{)b+3%w`0Cv-herOqG>^oQb*Fxr}eC;h6gt|IM^d^K5GLS=!vUOk>J6J~?@jXHDeT zuhD-;tX|Gs8+FziJS&`lFs9-tjtw&@SdA@s1Y;Erg>$#z?*B7;gA|^% z@J#RMnS5!g%n^0Kw9I7PTQJ2~Ud40#mgeN}TWgHluvv2y#!o~0aTn{`o8YgV{tr|C zFl$Srk<`zC@a1s#3&)1hc54POxy_ON%{&J`v)hBi`|5N^cEBIbvn1Pyk=wI&}$91N{? zc5m6kce0Tj6#aQVFqWD|)9b%h+1ZCS+70`GPSZ{f?xxqrMEfZbU7z{dp*wkI7$tlR zYrRS4zv|*A`?}hS{lCF_k3Avdy^gWIXCDE-jtFjGj0ZJFaL~%{U-7$~@xH})t4y2h z$@t&h_|sO#>pA($O;0o4R>nJ#wx^mSz@xEaYI^s7)bmLt>mZmBh#!6W<@eA^i-cXPkqn|ol(FB$7ohV4MBX~RYq8LRqOz*wi7 zOE=;p{`zich*SA2_!YmQrq@*$|72^dvHuJBU*Q-X{ERKF9DV5EGiIv45#4u0HZ+T^ zn7v#j{~OBqPUY;YH8(5Z|3dw}E6LCA@5uL8o9;Jykz+@o5yq7<^$h!>`%5 z)2K6(I{kdVHOhQq)w(qQf~KYZD(GJIa;aC3UG{z2e}{f%Qm=mP0{@{k$xpnxChdu7 z>ummYg~?CcO^iTk({g{YF}go+f7%nOJC(XKZ8rZ@-u3YAJJXY&P`@+RmHOXlTHwE9 zo#_{UzE4@KuQZ$Kf5bM@zkt5pH0}Khnv(s!m$hwKV7UBy%q0Kk@X0EMZ$D)lI3Rb{yPJLj0eetwI$-{SjC+|lx}iScgM0wc-0+jRHWUt%o(OLE(mudZF> zm+W}Qu=#)Wa%r#^G<#mzyJ_!idvL#f>(A~mN;a-fS{Ph$#u@aSEj=WidC~ZLe>T-z z`qy{TTDC~$%1@-`x#XB8Va9?yYBij8XP z8@ax-CbeW^ahh=){;RA>GXiS&&(OY&?;pbVH96Q~nN!h@XL<{?zV&CG!HSL7q?ZI< zWF2ZlUShkd%pJ9N(^PVWHd4<)e{S=}<2^igo7KmwY1<9{4%^%QQ{Unf(Y^lb`)%5J z&{(%c{%^7WhsQhGFwc#vzL`o(ey-aeb3Ya5cvI0keeAv6xg|3-H?j};*aPBk zHrTQLwgESCS7&RnySq-^Tn0WD5odEH2q`y>f^+tG&cGEmVAyleSan!^NDXGH2}B?;FjW*#o-E)JM!; z`}#=(cN_S$jxO2Q@{O#q_#HQ}PImc-FL1Ia_IxiZ5M(XQuk{rw=fH=or6sJXC&=SE zj(UUk5#H1GB=1S)b0Bh06FEPPVBV3QNYx8ztr{v7r;cvv}L$+PnHL#H#aAH%= z`__U@9?p&jCpatibJmAVw+jI_hcmc?MeB`pAGX`@3Sx7xyJs?=q42lI$_BBIamM1V z?i8(j9DY<~F*~2ieHIP5gDc1*xR3fdwA~Nf4r3BIZ5NUh?{zs@~4TFx(3@^zv-wjuyZx#RcHU= zTZ_)tJ=c_F5I1ESw`Z6CHf?DxKJN?heK+uFUqP%#4)0ZQMo`1If2qTj^Kx}|oq5et z7e4Z`r`GW9V$QxauA?_c;;i!M$B9qMvE;TaPebWot2ruVrO8>|7j(CUQJM1}E6=>g z+#5N*>{ZY^=Y2~R`-9Hi;9b*^();w5tQq!!xgP5D+xdh?c(m{P5MN>8#dkUkpK?be zcs@};{0DY~zYwP;9$=21wdUSHr?h+f#?8^axEl-2QhcTtJ_{~A2XL28?bZ=#cXCO2fcss91-2R-;&e>%mO^FF@m zT`uyI5DP{g(8~9KpF@>7p#ppXiGA8l`^}uAnZ!{Uxi#6-S%=*CRQ0pQ$OoTuw+0tE ziA-Y0>}fUGHsZcYp+(W3gLo;?J$5s{DcS;mF7RY}e8ZN1W~5@t++G{8D1G)cuS0&V z(BTr{nz$;*D_*~ySS$I}`gqrvxFUN!bZinsRYJT+Gd|z)sTRzN<;a_06g~*NiUG3& z-!+BR#9Gy4{}bbEU-$)`<>MQZQs2(|eZ+woz^!>rqD}FWL41^hHe+p#z}_In2;W@S}Qu+8}4}Me8c= zWg7ERLJZll|HIw8$46D2`~Q2-aM?2xF1Z2;GKr|28N4A0Nfb{epfx0T;bN_q(*!*| z%>`|P){3A>p!Ni#Eu$1{>^TYA)@0C%6_jXu0BujXsD)~2wf2ypwz+tLf|v+6-}h(F zY=}|mIltfQ_j-L_zdzCLmFENzqeix#|d!{_B~_HxO-S{uTV#Qb>rjjJRcb3PhAWw ze)JLY(|LvQSlCYAr=CUpNPyox;6-wo)L*9W_Vxl3zL9U*t-4}GEz7R^& zdZ9h?B;<;xA0Em5j4rDzm$DndebZ=IVPU|10?4NxYNxoSP1#pU1Di&&_yfr$FO_ zCI^sVzsm0@>#f`zThqY!MW-7*MqW-pafua^9`B9W>)kVvcme)r^Zzf@iOT^#?l6wG z^Bww>tW$3po96IdG}X<0RHCD9XjVK!v?V=Z5%5ezW`7>}oV+W2$hHeQ-C5B>o-1Z1 z-~P^s=JDMPFIPNwk=rN~&w};^^Tr9}8)!o=RV<3d{jzPyr1HhXrf$Qu5SdKl>tsy` z)+>SU5OXRRE4XuFGG$%x5sgNhI%-(^bMPMG zUI)7_c!+8(baF1Oq~6D~M5oLX=QhQ@=)d;lHt9KOUvXT}UGzQ5hKFI#x65bp%(7#k z#CMSI7tEwD?U(vbeENcN#Rc)rIhweFcpOkKx!ST1$K!zB;67Bfue;m#(zebik|jiA z;xmp8)B|tn-ylgyH zZYO2+TsZjHuRG{iLm$UBElad7TxtK9fL_q%$8VJv(B~c}28cUy#tnuKA5~%@<85i& z)#O-d1aG?U*@+iiuXsV$8FL#=Vw}t=bH=$GUHsRmFFxtuS-hxcnlZ49eA~U?A(fcH zkKEqqA>w*9ugU}3iyrGRbT$|Vs~K0gCqDPkJ@JP&9+K5w6>X`V!ENZC2(>SMAw0j9 z3=c}c@s`%lUzwvsJKs#q&CA3U3qQ!t+^4SbWPwAYV*xT-1@VH18Q=SVXMW#_ZLGw% zU-WE*@rj*vGM;1n^G?iQW^@Prh(~cRXte$<;tYr2LG4`#?6i|O2W3U)0E^^X`4cIQ z?=ZL`u65v_d0x>K+c?0OG^g_S6V62+mxG&Qj72zCf7zXJJjHQ7QS@Tt$v>p8sVDkU z93e4-^fQZRc1#|4BS&HM&A-GpHl}KS4tLCd(MbLndA$;Sjq>wJ7wGgciF)yIOpN{+ z9F=euXj2TR8-8IL=7{Vw9(wPzpZ zxyB1ssi&UPPb&RLPwT++BidG( z3id+#+=!g7*u}x+&!xQJaPWOL?=^?rj7@seesHQbis`4HwGhCTqp>)BXxzuvhuc4b zE=30e_?UHnQhp+KyWAXS%iTfNn=Na@qe~L9^l2d;ZJEPh(VHBkA9P z%b~4q)|qHabK6CZDdAr0=SAvj9P+ix{9SDADE3whT~LTPi9TD77**rtT;}nNYMO&j z;gqq)V(*SnQ!BV};t!(uV!AkU$j6xYfLEU|ePyG+Wkh~gwP^DPIWK4S;37(aWD_Z+o??P_;3ubCPO9s(Mm3ExlA4~x7yK4YujJM=kg{F%G{IjK-KIy(=}x zG<|^m>^%c`L-E3%U_t0j34<2#5x$u8)lfxm(MuhPL7(Kp|;RgE3`N#5$muB zSQYC~Wm@>6&wSOBW}qi9a%Iy-AJC!i*o(%V!G6yR&?7b?k99N8l7qC?8RG)ULGlH) zfXk!#Gv(iCvHUf^d;I*Ih)_65}U$Vsr4Z1)=2fK-5zK{ded#dN!(4y z@y!GdJ0DTx6?AdKjJciY^Zb8+r?jO#9dB8d|EfV~2Mg(4jov5+d(SO_ZzMEv%+e$g(@HG23&sP~6g3j*^#@mxm{vLMP z$?~o7-*!7=2mPLrcCw6d&dIuIXPo@~W;{Qa-Pez&JI_}cKS@3LyTEJzwqa{aah~rs zzB|GBy~-#VROay$Fsw2b#bLJVW}kpzm9cElSiU+4&hrPA*?t0s4aR*ZaI(ReaRUBT zM(2q#yNwNT9LC3TqW#@Q;|K@N-9|;u$=~D8b$)Na4tDbQ*!XW7=3frN^SdYNt}?zF zA1{6FHdYV9!!HNT*V%*WzH|a7yNy2$n(H?QmDwD}t=+GYYr|8*8n7D5`>o_l20ZYB zBJ#Px-{Sg(hN?o=PHiJHg^}LUtTkkqKIbD7JVvDKvgqrAk|0$X4leps{@TsH#yD6o+eFVFmLa~o$u z@!>;HkuQ*YkuCJyTR0o0VFSB?wvV9Cm*2Ja+c0rC(#=0ZOwUK?`ni`P>L$O5EmQDZ zc@`vNRTA%GRxRDETzhTcz^utFC`Jw_3Kc-nO(-n>_g{a*X^>)Oo%St99x@9TP^O4zDD-sN9ec3HLhaUrVX1C1TnO8E0-D z?QBN|v7md#$<+k)4;)|mW$Io^8<)`5EZUrjE}Bf^E1 zJTKw7^q7^jfv!C_!2cfF4&t|bm^Ic;yRu;%=H6AERbfTc4stIGI#j)XpfHW#K*O*`a1>i-8|3m z%#(-(j1f#>{;-}U0nXJgOqG9qgx8EN9lgP;AuAAff2o6kWbCbZ&$ zWE^fIiUv5dc-x(oRj#yI%sgy{Lt#`7(RuYAP$S+>UDw!)@Luk_Ex zz@I#}e`f9D;7{|^^cT*E7Itphub_?#d0O`ifA80^wc`0(frYslu%Lwr>}zX{{%6jj z-zCINuN~I^Yye(c55cFUFKb(ITEEtGE4ggj+!G5L`7IxbwiT{Dt?rASXm!uv(>!B~`?UVf zamE04zkyD2{%m+Uw$@`H7ZCRpEFKr>M29b*h}eAM+Ofw&3r}=Ds(n-b3DU6`Pp}uQ zR{!lQ{@K=-N{&9h^ee!4b^Ebv7Qokr+cL}+a;rLV!l$C?*ox76B=2+skCo0j9XjdP znB1BDS}$#%L%$`+|AMn5n{Vui-Lt%ro@uU#*9`x{Iun`yQL{b3??cFGitXxda<$+3 zNLD0PuAG}A+S|w>kufe@uy2j>osExfyGXg$u;H>MH1~VC-&fE3Sp)c>6cqI!d!X+s z<*tO9JF8CIl~4-b>p9E&aQ-&-U& zy{gTyt?q`f$vdQx|LGKL*Bn8;c)POe6&9G|L^>7Cx`dY=UaIGW9Ic4Z^iU44O8rS z!@Pjex_BAS_99p0F~0%k*T2N9U5HIG;0qTPj3Lhm?<3eIyHm-3__lFn=Zf?vI#*=z z$>cME&v-uL_+;=&dm{Ftw-BEh$@m>NQ1^`slI9oE_A}@n8kL`ddAiT$w`tMy-bBB; zln8wIhKId;pNm-gI`X+t&VuhDCk!ZdF@O%d5uQDhF~nHcg8BAg71N0^hSrS>wBI}9 z!4D>+_Zeo^1s1g0P5BIW#q?gg?6?cCVTJ@#n#Mcaoa?V5CnUZO4j$s`C;bBbFJTSX z_05XuZzTG%c(&ISf3J6I?yShy{dHFT@_Bx9dUfj(?%EhK&JX_#FS*soTVs?=SHI5K zoN$n$LLGtfgv{Ug(EcZTODQY0zo98X5?D6~1 zf5R(Em-r)%_+!kjT2^3rJWYn_-y3ro2M1DG-dK}jOwUZ`PWctcYRpCcw8rMnBX_@g z=VW8G@lE6zbkChr`1p;1pJ=|xJLo??@}L!F4{2E2{1Nx}%d>F5wxo8YTsJ8^5h{PwU;i+e(-P>K z=MkO_^ps0~%-&!czj_V-BEfz%qow&PqmBF{ldsCZbGB?aS<%aZSvt+mi^vmLYHTV2 zA0A`%Ji$C0n%?X3gams}5wK5P*{n0!$H1pO`xxIJQ_ntkKzGhu6uGndI`O5|oalaQ z;3)Jw!5rk7wT(Q-_h|Qh_xo*sm!_&J+xIIBetXc977l3YK%k2B8B3rEjiiHuNfdYh2X`f|7zYww+7 zQasSE_As5D`e@UhU)rofCMkQp?+)l=PZ2t-KFU0L4KiJa*+%2^ z(&F&k>Es!e%>EHN(Py~Z;=-&UG=+@t&gsO0f#dKEIu9Gz){N+X(@*m_Y%Anu`3vtW zt~WOR(Up-Rnv)H}V7}L6UzL9FWJZ02d@RB{co zmNnmcrgx=0?*TVE&o^@K1y3KLmlPdfU+lMGeggEVxv_a8ed!FX^}Utf+QWxo zTWEwvBnyf*MZfyLcflR`A92?1L@(_zvMwm1__<kxYQP2@Uc-}GQlY9rr8(YH;TAIe8!5qJyeZg=oij@(`NhsNd#d?imu zZkI2_&W*9P%b0K7?arL#lJ9pDId{@Bf~ThE%aPlL^1zAEvv?JEF7N1b zoa1Hxh`CHV7AmK|?p5;BU=zWP!8%r%try4Fd3S;*7jaH*<*q_(>fr6ud;SIBss6n9 zx){7pJ?u@rtVzyKO|lWc!Q5#4G1j%@TJi7}>`4oe9XjyyyN0+pN|^T)BZb#8Kv=Er$F@tTo4p{LG6X2ip}TKgJ$BhCt$ z(b-iO6=;oWUY)fWt3&plWeof`?f5O@sZQ3h^l%k<=+=O19sfJ)!W}j2dpw`IHSzx& z+6SRsYez)^IL$ks`8-M=Il$lIBhFf~5cS?6em46|BM;l>Kow_(Zk`2}qsfcZ@-N>nYl~{f+acpQ^2w$wNGJZP~EdZE-&~ z_PHL;Q{A^p=9pNv2|ugoG4?eZKjVCYJ&cc527T(8=zNoFoX=t&WN$iE-{*Wm-@XJ+ z>akZJiCmMvhPX@eeh>JWXUU!TSq%8A@(a3wk^LyS8~E-4rrXala+T+-^QHKHa4t4M z@gVf5Ww)bi(%un3rz(2gJ>t$S50;A|rK82D4 z#PgJUUv&CBIR`$#Hy6LHzKYEFdX+9>2J2PpRO>aqRwqVh*A$*utCBNLtk+q_z#-aj z)@nU#O6yhY?LX-Q{*i}IPsb+LMBg#ywglZc@+RkLe{?BpW1`vItT`Ew4utnwD}qsb zbP{?T@&4GKh++9SmV0e_e7ziFt%*Jc<7cScivr>^xF5V`w0}w$Vg+yadQ6W`G&&4i zts(#JsoK36KY^ihI*8thIT5``H}@S4m$E|-SnGivzccdL+S$4r|JfVw52oU z;J)^O>%sq@9P(ag`BTlqIp_gSF%L`j)Bh>!Bh#Fse&O$jz~3_jzSbe*KPY&iY1xw- z(E~~c*oY3W8y>BF{E#169h&pw2V#tExvqOMbdrvK%(*L+@u1^6xthPa-UWY_i1sSkC50f;yJMI?~l)tt=|=?K{p-!(m%C7e%{IZw7TI5YSX?~ zi~F=5nPmcx6+P8`TK&86M~%kTI`(kvGplAUD@F%vpCeWq#3P@ugM(Y?!ec9pJ;#@l z{|s0HY_px5MWHUHAkLPi&Y)dyR=ks(g%{a;-1=H`_9m&*(r`IdBGc zto?3uzWAY0C)E04`DV4#jW3ant7EAVbZl!?AAP*;rPys`kBnS3sGd_t{Z<}}Z3-Jj zo zzVQNQ@X5q8v!}}Mb0zwXYGSXP`+)ZS+^Cf;bsg*Guh7zVSJnm7`0wW|)WNrWY*zu# z_OsCS{LrwUSPp#>=t@btbK{T_hp#1vSI9-Y1UTo4UK_=#IrT%6T!Fe$2!PWnH>IWZ`HFBV!o#F z82}fz1IxajvET6iB6x~;mfdfMS!-gC3>H*HTAi?;2Eg-`$J@f4K1RSf1jwwI6rz5l!MN&@qQP z*YY2I`Z%?vcWUd){MWah&EmKEcK!>7f8yDX5@j#qx7N!v)|GSQngpG`Mbs?j}S~4-E?h!L+kyB5$ zCD}q{S32B}{Rul!C3k$NZ1?eT)4#+2A5Jn-j9hrG zy^M=|-5ub{Zo3Nj)c%5-M;53rojt30SEVyFcrC%#P`W;&zJ6Y|;AhO8iMk%1c@K|? zsQz~B-#c94CHR*X0^1U`p}u*yN3f(F^cM(sm2n=LTuJ-g%W2<^-%XAF2e=mA1;b#w z+(SIqeQ3JxY-pZ0C#SCV)({W(|bJ?`f|Ea7%E z?dyFG_}@+22k2)P{p`dSV{q9Jx^w0&V$U1rDvhg^I#bR-n*jZZ_id+dJ#Szf^ue7? zqnZ|ZjL5r!-4iZ+g7)=3Oud(Qw~GIoN5Qk`H^$r+a3jCvN0{q(-J^ih1rEnXN7J`r zcHigsyFB}r=9c{;2VTFPT(dQ%D^y0l_dT?CFaiGo+7Z35&n^)CtmFH9+Smg;b{XJV zsQEYpeK&d^!PP~XspwGjehPQIJ7eV8o`t|ylpQa724&u+jOtIp_vGuE58A0@+-Go? z_od9I-shY>JgX@!T#&QXnES7k*%ye@`FElJ2O_RTwu->im*%yw5ZaKwq^r#BIuMRczdwSsunWenVqsIW(ad zmrCL|&4%lboo*5*N<6J%mQ}ZWhEbd8N-9(SKjjs0atcW2-vD2I1KnCVGJs+?bpPWb z@VoH!JCCUi)jyPGY-&ru^giPgoHLKc^4ge#?o=c1JZMIA&z*?_52hNM{ykAv}D{r2-cq{9&=nuncqu`V^eL#0Ewlo;ETJtx7+orpQ&F9>cuUw1DqpLOD4qt2H zp2eIc)t_loczcQalyvXr8PG8Iqj5(q=N0yeR(Smtf93vJ{#T$6X`2ZSvW$VI4=y_{ zJV@T#$>&%Ry0PC}cKn>K%Z^{M=d$DIG-}K0p7TnV)6d5mTQSK)4DiQ~HDA~0_W8WT z^d44h+em0`^045CbsaFX_Lvq(N1bJ?e^V!yhQed`MLugmdM zo73Jixr}|Sl>Ke?%^zG;ax?p#eeNMgPTA!r=KJPjq8mHrIx*j6cMVG|J48&#@sU%f z6yQsrJZrk^es3TDqtn9Ey-U*Om$#d>CD@zF z&*5A4sn?C#SQ+J~vi5Ijo%aBH6zgeK&h8X@J*7fZ+56+`%#xmb9Xx@3(B3;c6MN?h zGxAmTOn(~uGze`^o)ff!EW(7Gkw+IYZtN3x|gODaS`y|Em=XZdz57+QEfu?6OJPhjdS|_>uM_tySf@ zaRsVav)ELVDkhybaorU<^IUma^olurNYa$TR!b?gg|&_}nq#D=Tq8`ZzC(pxfnF6#cfseP9G3mU`e@SB;8p_4mO>+iqu zM1Mha@ZPY$P4np{R<~0QAcn-{RJ=zwl_Hop9?Hr*1YQWFs2*ciRFdtaeHgJ z-o~fHykkDdyvr9(dbfDtq(^~caGCByu{_a33?H?t ze;?cQFgi#5Z%TZNCL5*4L>{uXoxW=}vF8hyR35wW_4vN2dDj@HQTG9r11{qCLnY`p z)Q=4Vx(uC{Jj?;SCM9TIk_<1enap#=X*Dw5?TVH25I=)1!-?noCiE}bG`LsQq05hX zCcT#8a~BE^q9Jg_ddu~Gvc=w~y5QM8z~SXQGy{3YN1uz}<@e1kD=Q^$z1Dq_=)p{? zDV^GKop^LHbm4VvNb&QVb`@_wHM|4gdFoog8-qVyN`LlVcQ7uOJh(Hq>1xK@OJB@u z$O8=AmudU23J*Qw7?bI#?nGX&&luNx9*mM^5N?)et(6pt>P>+D(yoVv49u$Sa) z=Uz3-+YQgx*-~S5@F*M#HsSF`#(M+pdDs*5pEJIluQ{7s9v1QF>SG(v^(z#7f+gRe~*vO zwZGG&SK8kh(aZUsxGko8B9B|4zx`f$P8|8dADr`d!DnubEx@mI+zs1eYj2U9eI`08 z@Wwf+_UG6yIvHa*^7%_csI>XPxmH>5u8LQ4z?1S&_>lvQ`xZ`e?&*^bdT?8nNk$63 z)*+pHrNgxtgRLW6IQcU!b6vhh{KNyp zD7VDN8t`z37x&M8av5nC6#H%4%@P0T?h{Gy8c`9$wN zXj}RcgZW$b7BL9YHz9)v7e<&k?m=?M&#*B_X9Ztf+ejO_M-Saga^=oIlVpS{@?3c* z7#-4C&gQuXJE!nt>$$jVQMe~&FM1TXKku$V%VLc#il(DaE#|OBQ++Mmr&RQ7v zFL&;c8~1f%kCnJbuFEeST3U3}j$Z`Fo7lHJ+#|zX?`6R=cV>Es8xM88!%yf`dZ;q;qg+Cr!Fni@v)$V| zoAX>}vX9jcK1O}W_O=d$J*EUdSw|05%-+t$_PdAUk1;I7UbNajk-Zpr zD@3Q`joP=}si7QjuJ(`7?qOsv&N*rNzn#9?D68kU>D)7)J(hL(UGUvD>*~$uUoX)6 z{bk{S7kJxchJ$A$h$dFgDsk91u4A=)}F zUqZ*FxRc7MjhSU`Ph7{vn55$hLVGt$r;^Zdsa~aYT<0ZaE#IUl^ur+r7q^-cKf2kHktyJL`!OJ&2ui4}w{L=(P2I<8&#y5Li{M{E4I z^li^C^CKNs4fm?qIxe+2M8~y=cRye(qLINmF4@8!Va>#KTob_KnCKbweJ<@E;`h7Q zuT-aPOKj~o(RIas7`JPzM^30QCxrSwh~<8ZaU4jDLwdcTx-RV<-$EBe(Ac#Cfe`~(sl7{Ppx!ar<0S8d`$*rI_XFCwKq9BGwHgJl_SN>tKLsNm%|Ub zuBq7JZlr8^Lf3`9ch6X3?5<99U4H4hejmPb?AAzT%rt7pj97JD2l87gW7Qb2n?xtD zeuU>JG~5f1J_c_3cKZHObi*il3*dUe0aGLD%Lo2kF|V*Uh?nD|JjzvDGiZhl`5vXXpEI zba^ev6J_uZ<^7w3PDFB{_0$;=>-9yOJ8yDDKE#jqL-<4YHw<#SB!$*-xA&11oZW#% z`P#a!3?G+2@z0)i?J-`z{aE)mlmjLe{G~_ZvXSNUbKZX&*+uQe`AsT*Iv#jr2016W zmmV0OBfnyJOq_4LT_-*{F)BHr_%E>qsrdeP)At?VrXP6TqU|H_E48P7#q%6K)(@=j z(Eq!@_*2@fAofnYs2>>Brt*EN-G(<}3y2%H=X&5kEKfcX;9#oy*+p)G%9qc!b#qQ1 zYG3*m>6+SX{oI7I7SpOx`8k}^;^&IOUF7dOajtOc+ImmU7Y>YqUp(ubS?CObRpURx zvkx^!!AyP)!R+ASLu^KZx$8Cb;=t_aEgRn>&jIrlY?vGgy@%d%N#%(+8T2W;ZN23R z`6vy8KMiN!afg1%{JqVZZtX+A0>8HPF4ALC&XOJz+I*Ta_0nVVo@X`>sszWn%S-gD z`l4U?WA<_ukWC%kP})1_4TR@+SA_8ah~FXg&Mf&5h!3VyCw{Ml^PaJ@?_`7=x(>S2 z%H%)9nWa{_(gNtZG-uk=E3U0PCR!6s318yZ(qHzr;CqA)v&bA)TZYV8I@e5Dc6`j- zQto`~Z817lur|?q?v~y&J2`w@ZCJ(Z3m2o0qpcuk@DOM4pzc*8|ElWGp#E&?chOG| zZOAv*FPP#w)ZLx5q4wI)qv;;!AbPX_@zHJ4nI`mT%6%c&XPf=gyU7#hWi0V=pqr$A zHo3Pn4nO0#k?{$KL2#(?Sm^N>&(zNw&m3nw4gc@QvmU-F+}Puxy}{$*_h-fXqT^+{f9Wm+~#9uciRWI)eAm^#<~FQs^$^6^*g;pOJN*j@Rvj-@Pk-cRhV-EKhUBrES66g`cK) zQzvVM*iP(VSs~>lUBnvSN-Tk5;SJ>MhE8;C{MY!~?%w1jPB_#Z^93sz1MxZ?J@n65 zQdsM|&-oR(na2@N+JH=gPT9I^_L^&R12#Tcd%>o76o=r;$Dr+?7K{4jRIL#{<&tN04z<^NH^%Q?i8 zUf*F-H$dJJ7qq{E@`ql%Iw<{TZWlOOK>3@{8!(=(^S&HJ9_?5|p1;a?UcfTxLxX0j zZ@y{zdcmJ`Yu%4Fat~n!c}u-D^4koO^ANqLZy9+6(PNgS@w>&HR@3 z+ew`y{L9TeADfS?N|{dN-CH%E{BIy8sFU|yZnLZm8twwllX^~a7|^fgv~v!6PFL2+ zHt|z@pLfy;>U{-mevLNuf8pSA*a_aI+)?@voGOQdf13TDvDi7V@Yh@pd|PK@3rAK7tI|jC>GauagWbnOg7vSvZ{9pRYbzbEQw6z+Nm}Y7Mdq?V^k9 zMh`jKT4d`{4bJ`+@`^|8kRKyxHfC-`AIaXlKrrn@Us!^^@Rh>KNz$8TXLIggJoS86 zvIlL=Su`oiZ^_p)>Dz%h4kK}KBaA%ZQTlMn9`9a;9-nMb=}pE| z$NE0^wDv0keAJF)p?ho}_xL!spZ4=BzNGe`U-{_w(T3>1XCyKN&#Z-p;$Ay;;&-xo zxiObB!a%N0Hsg!}~mRV&oee*S&Z(`5ujE7xzhW_Os_$ zYf8`9!!|w1xU>c|_bcC1o=Ne?tk6;Ve1r38<+%EeN}ttaq=!3_S$7FoqOT=j>6~V4 z{4x26E1^N>e#KqD4X>J;cGukxT9`JB3E>VGbt~OzO%}g}4|JpQV+b7sZ|JvcF4um{ zoOgpi=})?tNAYFF!Sujq)t<(vSWd;@Y5lAlp|htQCuOgns<>{m=B3Z+e?R?vUjJ_< z`mctD$?KV`KE>dhTg&(5(Zo~AOIi!*^s!Z1nni{x8JJEJH>R zTspf5{%Xe9$^S#dO$iQD@DM8y`)|JC7svGtoJaW{NF_Gc7w&k0-`c-A8GH4v*rwz7 zb8n}8t@oF^V+-DZ2V8e@BYD;BJ%xFA z8JdXG-^4O!AAj@R*ji`L?t^|L159BI>m)PF28k?;4hKKc;p`vo&PXhH z&ke_2!7}6^$wrcwc0P-3h4<~8FTAYJz|G_pVgK#@V@qa<>7Q0KeBxq_uf7(W0e;u| zE#1(cTa>3JHB_Rr4|eX2-->Pe9&@EW{|k5r)7r8OBM;k8nru+oKZ5YxKyqWIast`U z_#fnXfW1KLx0iLGe#%)p_2{OgH=M(FiR6+|PF~Cj(bHHP2KORs?-4I*=fC!|_c*VK z-)GZ)GxmdJ_-1O1y{t%h*kCV-N>2YY;vWwpo_(Ry`X?PI&1Z?UrIMD zIF^ky@~U|*y3wA3jk!bX&H|@Lk(X5F2JAxESJDJ4JY|9Q&!J;kr#+4~G*;oB7?6%4 z_BQ!|il=Fx(_G4S*S(ZDltiCh@Dk>9Zuh^X2Y*EDmT)Wnr@n0&`)F*_6_hPa=1f6b zL6dkmY#G(qe`UX+kExx|y<`?=A3O%1Qa(KKAhoZtp*Js6AN7eo)OL1%Y*RkvG;bQ` zk97{DJU)&!O^iqNdcb{vyI~tS{|L5Y?7@?$^Tuf6YH7>nujFU4(1G_b2Tnd+or6-z zp?e=|-fyJV)Wc82JGF+yGu5a3VYM(5!?()!z#Fs=XfN(YZ`4y~k1<8_Z8US1M0aHH zPV$a)Upv6@PV6dLqrYs9J09Zx1jey}HZ_O%OAKf}Y`(*|IS=5|34Py1+&%QI zwx8zC=_cB3OTo7d`*G#+#^xS(9(ELutq*sRpD?alPmfOj8L(1ceImOo-nxdIfiIMA z?#3<};6C-r7yO%VSQE>;7g$8&+kjIvXyXXFQqD=mz%(LjXr4XfW)ux-uDz@|Y>|Ca z(6hfly#VX2ksQn94cx|Fe) zWjj0H9k-p;V>=t|u};{|wsR+fbd4%M)ONO2wliOr@6Us5XV|`M+u2nK+u73d#(v6n zCcl2!&h*}Q&e#*Svr76Iz;=f1E%H7(ls0<9 z^Nq-1?$A=MH^boE%^B}w_wZ0Z{Fr?Sx!V=J0v;H~?;_s~olTw@lr6?I$CU**TNS|@ zyGOc$??Dr>z34l@(;eiv4UBDx1lOsl$>cd||DnbzYS@Q}(I!-b88m9J-#i6rBwAK7dwiN_I~k_E;4_3Y@# zmdwBP#@6CbF#X%#oH##|@_IgzSUU87HXjnaJ=mnu;UNw`YIAwYV%V#Wx*IdU!8-FB zSv7+7PS#^3_WVrF-0EL#{f_p#Q^Os0eetVQqeFW}U}U%>E3ww=+~aEW+&+u(uA99` zIa{@F2>v?kV8~z{lJ^40K6(cZYA&Zw@j&S{R3E!{N9^BYYgMO2@Uq{kuKJJr?2N-_ zHZ{7Pa<5~DZ)6=?W_n0%%MW=n?JpT=%>9^g)G-h1nNN*Dyl_1D6Yfgf<3kqk1?q;U z%4VqjNW3Cf8F5$V_eu(mV_h0yE3wHRoO$X^o#3n%J-HCF7K{8?7 z?>IGDN6r+{w38dJi}7OL!@v8~x$)G8mK{C?cP`klV12&NgaoA0*~Yd^q^q$h@kFk1e+D3O;P#ZGL>=gYuQL@oe9tO|ZR|bEh&)Tq2K-72WZ!VX7ne{LST_=5U0Y1uEw0p> zUCi+q#=80}Y_^osvuf(U_Cjvct*-PkXr$1oU#0rY9r=B0l^ZdKHjXcyg)ibvY?{UG zAC0`EU-sdc+uTF0r{f>Oe-2;i!k?ad+rf*I4@>^^UEosoY2p4)@P;loHX3Ne;=Zoe z_}?=*9K09%bPXTIUN+nGm09T?pY|*3oTT}E!0j>PI#~Qgd>;IfbT^Tm`y!#6mHtQ1 z<=50UXwFQ=XK?3*1N(=ZJGyNyHe@8d#jwjY1JUNJ8eibqhGH*c+w%4-Z=G-|A zMsi)1QG4%H{FYh6o#=U;{8_BIfo$eNcyMCsu3(=FlBZVtt!I0%iS-@b$2dZq{nsOJ zamS^d!}m(|!a8$I-=c;eG;??6uKTcU-$yRq`+;K%IfmzuODl*D<_gNc!d|(Lvae9~ z70Ui@6gENj=_|MOVKE5x@d*KvzEx=;RJ(rYzJmPM|$E{W&;^&8$k z7kKe=zob>ave`0?UDj#X{j~Q(mjypjjNmrg{;&Ef|0v^@AAoWZzl2WDFq4|*kT3bl zRwHsB@S!h{$Pc9Rp1LPKXdtgKwlm|Nq9-hVOXr~+#M_7~=%Njs=L6(<)}Hb>aEJzj zo^;>wggdh5aH`@2qhF&h;97TR`=EIqz ze-sDUZyKw{|64IJ;_yX>3w+o{3X7*Sp*urA7LS*}HX6Mbdh+ppR}8s@GS_q0MmaGI+H0#%3m1&y z-OIe=ZmnJEj8X5lF?UC)TkQ`QSiBFwSLus5?yRWfEPV%f=cj*o-U6$pvOwqA9`tA8 z5BtIM_4p?*GP8Dl>`N*;>h?u_v>n3cuf16OMeU3C>A!p#oaf57Sc)CDd>ip7%t;M? z0n(2Rw%O+JTz1bI@@dWCTWitLMfCyO>HL>IZFE^p!7k*vx`pI?0&bPjd+on9?xdzg zjQ{0gWA2!mMFsCtu7z?c(?97_mQ&{dJ)5$P z$mZz$b0v?5vEfM`-f96h z#22b##C1U@^|W8q5RVrz@ENLRex35(vg(3T?s6|>9feIJglwIx`T=ldAYWsP*d?8$ z2fovT>@dFewE5?f3+P9Z72HPIcyeyuuk)OH9DDuvP)dwFft;2ma)Z`ycS0sm{9@JQ zZRp(b^hVz%`B3qTvKg!HFzQJTTRQULOv!+Ikry?dIgGP&JU-E!;WSnwY#w`-Icmh_ zQi&|_+lNouZN7^9@E|mbPG>;@=MM*_9QG;sB&)uy|8b8mn+R`^Zua)!MxMrEz)K2{ zVbF6g&K&trW`3%XLfn3cIZD+W5eJkP)Ev=w1#+_X=b{zFo^Ymb;_U24X0?7x-e%?k z9{H-yN9n}zz5hsT?G|Ff+yASjxd{KF<&0PS0iKa2ShObe?ZA2&e&CtzOkX2sKgDf~ zg2weOOyBnn8tWa%cwc3#!ozs(5>Xz#fO~w6Vleit=^;m2o(ZD0l$w@ZXUk{-ye~Cdfem6yvU8)@wJ+L=8jCy1msToD_{)?_XX-- zK4+qP`V^hE>KpoP;JXNTiuH}`SOa`Hz@+|H(sw84J^48V@WJY3|0q4-dqoT+JV1OC zKe0ae;R1{MmY*diNBhEF;1ciZmJE!(OnHviz|+<4N3X}$&KiWH@yL5)crV|S66Ut? z&y|~lId34B#%UFK+uMgvcQa%9Ds4VY+ZVaXM@lBu2bqq&kyt@RUNPR6G}#Xop#<5e64*WP zo^tdG!Q{qf^gdDl%l^&xvB#F7|M@=qMb?;ePPpJc z_NkUGv$lb`dTCU+Q01=R-Uj(EDHiP@A1`@e$1v7+p~W1Jx9J7;@3-8^A?^@6vt<7}zNuS<%oqvNmWjPZr&a5J7x z@Q=rw79<XWHB?KVf~)&&!q(xu12V{wI^0K)K?E zGp=`7w`~)R6#q}*JB+WMGWFmhm-_m?fbTx^+aC0ry>8-Rq~pYY6CEdcW{eK(Zh2== zW(s%O)|sh&=Yh8!&`OP0cY&bm<+tD&Lz(*z-u1vO_|KM_qnc{GqnbXjW!h1;Oe=X- za_v@R+E)@XZHqaoua){|FR9+lygapt{g-^I3+ni;LoVIHZ@~o|dGfLKAfvYHyp7&% z9q+_D#A{pmcFvSXu8H3nJv6_&?6^Tbl9_Wi)HiEQr*CLzmaXf~KgIoP4dTdkj&l4M z?_uH}q|otvU~< zM!!Y5M%wry|7Piix;T520cu zf_J5*zQ}xAX6rl;dnR!)oP)mcAL4`6=n6Ezeh9uqtS;weot1mIN65n8-2?waHx(+HpnOAq>2MYhBe|h8=d4H* z`7Gv;_p!#4)Kp=Tx0ADUAjdP!Q_;?T;PH7FdpG(g*}iZ6(MfsAI`E{eGqd`ioVSwm za<8ptB2On{uR7f`-@#MRG*f6NWSC!?k31N_LJ&!R|q)8rnvCL43cEuS4WUC>?Iv?f1aD%aKESIU~u=F(3WLZSa$Gz)Ls# z)DC1H_-#~t<5J$MPwCgxXFWX0scZKQ9r)NYJj}bsQpRtcJ8F`bXWmYph9u*_@0&;M zZPvL-ZH|L)Yrh>pZn?hBmvtjyJ8-r1^SnoP=-WmcJ=7}!R*SmSdqCqu@3iaq($D5nck-xP?5WD3y^Z$& zZ>EVyp@G3PLBQmRc^ym>ZQx)iO%(kxNmf!=Y_~Ch?RV|7*G!XVXO)bWu)Tw8kNHQJbI(wX6LSx_CfzQ3p-@ zWM}x!@bG)z5RK@6qic;#BmZdblbCz?n*ZO<6aJ_3ANc?A{2yE!=L`Rz%s(_Bo$#Sl zW5Ce`JGyu6ZL(82xg1>BmC#A8?sRXx7#ZM%?tdNhDt+xC?52*MHo~0<(&c?f4DsMQ zU!-Slr*1d;b?q4@6`)%%U{%P<5bjX6YqjyxBWVnIuEbe+P z_W0}^Hh#`}OUBsyX60z3<25s!FQ4$vky|g;ejEY+4u9pWe!zI@#fv2$+Q0w(#fvLk z^%t)uZmp8@x8l~+MjK;v?sn*6OmW*tTz~c9WbpK2T%UcJ@bm@v_md|{_>Uo5gxNRP zS3{lixkqBM5xI%jZQdVTsJ7>a3&rydc*8%MmnrmjXw0O3Cr6y*l#p`Hz}s5w@6=HL z=GakP}p_Xpk=G+j*x2{k@bE#8vE0s$&faC$R~@HqM@O$V8t^^v z<(N-Fe*-sBSF*SjHVTmk2RQp5^xV_Z>=}m7avXx|Eo;1S zCU<2^M-$KOAzhuGDHr!g`_WI4yG!Sp#($m3SjRQZP7V)1XHOYCn>i{x@W>~zjpQ9| zGCkpe3eG%b*kW~NRr~)=-heLp4TDGW14YlJZ^3!Owh0b~?x&2=wk2Nki_f4S@hOq0K(PGxuR>q+5k6U22XI$lK?}Yyu#+1lS_>WsIV}s|t@Ic*JM*D2`^JykD z!n05h_io_lGOY)CNt920kGu`|p9xo5`}NGPV>26U?;EP`QQuA*U#8DA+OO{o4+ySw zes8*QWW-B*YkKJ$S_|dKW`oYub#HS(@Yp;eBl;Ebhcxo(QU>2``wmjA#S-wGK{@>v zew}lP)2GhMi-<+DdFQWQyx5P-3LhKrLz~91`iq^l&z_Q7#Qsps{H>{i zSPRK{1Rt};C!VpdSnJMGJ~6ut@v{Twx6J_)njgG&Oyhg=ZUn|z$l$}^!Ssbb$sgTI zes{+9RE14Har@jSKZLh327Aqn{ZeGjFR|U(&%QUW^lA3uS?rnY#fPuNF1vIX_h6cX zY_s@*7!Q1IUs_MkCHkv}SDElt<+{@Pngh=Vj;R%#Ng79lBOYTF=O)DoD1MT2tB*Z~ zoS(mkmT#L$EEap^es_2PUUVS4ZRNc0{*wC?t8s;=y{9N6eXQV6Jnw0=ZDzNAYV7J) z<6i?j-LVy%ORT1fFB=1?J8pg8SDf8|Md#vZJ$UJiq3gyM(<(`we=E7{P2;=v_;mK4 zMB9JoTYIUq7Pr~ByaXNQto9EjZ_J9X!*6qEoc7Tp*Oy+v{>`m2rq>G z(i$(sHvw9$PeER>af&}B^@Q(w@U3>vrCi+zW2%9y!X9q(xAH`p2F|Hm%wxw}8_>n= z{}Ee@&*}#F)dA$MXwJs5k(nF56p{Vyushsw3uQm#xysg@EW6u==0H9Y(j=c?yso8bpTrg!H~ z_*}cu|E%YDQ6%@}+w7bvGbyKgy6Q5q_bfM_ki0dR*Y%hG+O~bq8rFDX{VCV4Lx+ds z^#4>g>jF4-jUOJ~lVN0#-`|L8-8%U@mP6ajQEobT5`I%p;%L>|4t^`W3H*+a{{A`X zvt++Wi(XINByc<;QFmCP?sDewMBS2e5_Qv}FH%=ys&FwLZ~es@8}^lP?V*_i-LABz z@60pWZ@?|AB^Y5(PFn#zKDgf% zVSZQ5{N?0`32)Mx`p=Ho+U4Ibg|`&9Yi&F08b9FZVR8s`JcVEOn9Tk!x;u}%2liY; zpWUoU_6}rsBf5I|$Vl6YO#AM%m7kP1u4HXA-tB2@$4-|mx+`-3p#(hZUE|%}#+^K$ zjUMrN=t8tG9ll!#1=Ttz4TJu|FcQw0PKq)*3iIi%CwF!2A(1RP?-JiPyDxH@j8N|bp-nZ z_rT@s4n}lG>JH~U`kk$;eYL5+p8p^8wP{FSzjE3FzR&CH=e)PaT6;5oc1Es#8&6$2 zP{Del&jW^Ww$1OgCfvt>o4GSSGzN%kTV*|XS>ejN%NtuhiO&tT(b{5omvA58j=&6T z_g8`M39Je9y{j~5IrSqWnxo0g(SG7>oO$^x?XOPEjX_=@^<%YPR!H3Ebjj@h8~T_w zh&~FTkLwcj@%#|_IQ?_zWtQXnhjCUtSOtW?I z*dJrBuJ(2OBOIlJqmlGEK7k|gzE9&Q{Zlw9OTd?nz4|r!Z%OPATBASayvf-s*~=NE zhy9|ocG&~u$cp5d-d*^$L^9A#^-)H=1>PN+IX>5xla+^7>-aaE-%`1Y(2br*=T7d9 zgcqhp-+0VkGf%B!oNqozAHd2TY4-g-e%jZXZeMX-zxe$+cz z!Xq9ULIW?jewg49sV8{E->K{Hh)UZ1&JY?9+=F?1(6YR)R+i?dzv{&sePg0gw3jz=vV5 zj)=Aj+t||uuZ2&5)^Mdu_dgf69~wE+<^|U7;zH>v+-Y$?U2{hKjAY>3v)ordZv~&b ze?}ixJT_i(v3ST}SqAyL%;VXbNk5_7bw<_{?mpl;-x}G;)t6-pdwSy3LB{jKleW z97V2!TiFXsl08j6@fPm?`ZW6?fq85IC0g_5gN%+3ReW zB>&pye#3YInQIemFLBxqpjYr*J8r&rRQT`2m8WC_b2)xVl6}h2_cbslLGCbYGy>CQ zD-9MWU(jX*^1<&C)mXto}LAv;UKT5M<)i|FY#PF zdcJjW_yOi3&oXa)fbsmod#t>q93TH}z~WivExDI@Ry&SvOthi1GP0Jf+q)b1{8PjB z-CUB_k>>}z+2~f2(HD=6p9jm1mpAUht~dKGU*me(bK2^OwIBm~YGfyDV872vx;SzJ zI(UQmaLTB^>Teq<=rC)vw*wC0jDz8keh0hjl3v?9&G+j zyV6B`gl^!-H7?H7C-o7I{Gad{vULnPTRUURU`!Tc+rggO4&T_G*yk1VI*z^tZxDDj zt}ftfblLnR*DtvfUUw^J6`fb@*v;-F=-7z7{z+_Y1NnjQkxtQhjq`1EF?s{meVN|B z#(T*xJ&E(Gg>m^en^PCEue?S+Na^M5m=M~#)tLi)_SJt5IW+$#F(GP4_x8+YZ3I~x)1jRz z`~Y>Qi0H#}X-Z1)V(2?3Y~Qtb8}xy1+BgsEve0wu14WYqHOJti$Q_EC{KYcH)55cb zjHBxA`1%x#x04I-Pt@ziZtD+DTD%V2-o`xZu4my;xH$^`Z2U~-jz@RhFRlld7c#D* z$unw1Ct5oytN(({?#n%)W+7|n&bzIW5^$)P&&H9^_wcyy8*)=gQO*q8|B~nPkZ((l zk&B_1dp(3d{DecVq3>PE{1us5_MPo3nXf8W|Fh0KxEQ}P7n+BqiFpV!#$~SX62Y?& zU6Sr)7JO=7&*a1ZN1h4C*gWjHsstYVh?7M}I-|=^*TIkA5zTZm{`+{|LcYGCI%E&@ zDOwG|8w1d2wHsX+{HB_AD?jm-$Zx(}JPZ6fXGqD1TUaxaeIy$?XGi5K;jH{Axo23! zmSv`jA2s0Pr|&h$C_!KgGUr9`Qt`D4Vj>;=vh>PRc<=DEFtW70mqAaZ&{GZcgwIn= z4)`i!?zNw&-2^V?I=IkzKO6aGwEBX!khwZEuBybiW}`C{UtLH48k^RL&TKPmyQubn znec*Q^kH$oumHu|#8%%%(A^O_MrK z`wbe0(>`#iZTTH)&-gWCl1wHU#n}^-BcqFQLusPOFcwS3(A;XSFGv)y(>6~SFtbQUmoW*I@?*x z;_J>@<|(oD$;|0r5;(W)`NY>);~I;-O)(76nRr?y{7yW|f*vJ*_>e#FS4_wszLKT{ zZD3REfQQ?2sImSRzdcFeCEfNuZ`u4S$UbM&MUt^ZKBbaDf5|tzZ&l8>&W>CPjjHam zJah0KmxG{taJ3a&omgXx4<5I;0T{KHdKq7NPN1fXGR{1xFYV=)clZ9=7%DMcAPu zrCr>6F!`U_{||M49v@|S_3`8PBm`!%pgRVNH6qbLe(3GlQ{2C-O4Yq}Y<^$DOBq+0D}z}hARt*Af-;hFFIb1%uAF)V%h z{C=sU&KDon)jH3ICM)d0gYIEfq>Vtgn9`e2T zy5`id9(f5_=~KQJCU^e)YUU&Ro^0aUua>=4cB5?OZu$~TyX*Jud5aD-ZkJz%?}_(w zln=y-bEwOx9;m#UZLBS3uTB2Jer8?0-u=ane5AE)1nrVbC9|$QUF#YY zz13dVC#=iMx`s1f+2nLbMC#JQ-k+}_&NT0q4q_U|izf{U+%xgHaPD~c_aeq3 z#(uzfY;wi?kDkF9;H}z??C-lTEu?<7BQxmSl8)!d^d9!jiDXU5R$lm&w?ybTyV*gHw=&Laf!juG+JXy{<{DYbNWO zo>NZ zenaqLZD;_J~h#W<7+5n|19S2xI_PrUHe_$ z(SB_PX72u|g*aO97=rImc7YIa&C(@8#Bo3Uz|2r#vT=UM)_)eC9%#~-4LpC*Wk(bL z)Y^1@4(9CKlV>#D#(1J3>|?R+^HckAa1TDh-fntIRQ+^^@QmH!Bl5_2>5yL&Ku~!wKtgR9CgDq--f5=cwBf6GMcWp=T!~e zwgFe|jb`L$(SKf(=e*t2q4fbXC#DR0M$>w5ueWzK7T4S@5nLIzr8Ly$iKrY z#3!zWUsQ5FB=U+^m`?}rkj|38HYrhBW z{jPITG+@#PI2caOBJKagJ->(ddfW3Y34307CA8=9&oLqQa947H{N&EQZew37KH5#AYLg7j%<8e%sfl~E=UzV%Jf||Y_L0`5 zy?(OreB=c1boRRq&jo^~_B%Qn`~CER4orK4XX1YUf(^@)+3!JnUWUEjMc>+sqGRp% zwe9RP!P4H}+Ot9ShW5DjSr8rZM0;I(+}!K*tG(V^%!F{SJ=Nmr94o(E8ajj4j!ZDU zlJmlpmv29PpyUIM&23oOtpsD&cMw}d&+I{Gh4Eo&L=AdKyq7*cG{z} zfR$vh0_Gd!3=R>;VUGKH=?nD}jy>*{gmIrd$#MT|kL$rcp>fY*+=qE4IWx%q+6Dh} z&Y3ebC*YL0=C!UfN;>nMyzkZ>y4T!OXa1EvwrF5CedtsALMG$rf1MfE_cE7cLp5Y9 zIU|Yj&G^Tr^vbVH7OU^`oBaU)2vDvZEO=dmQ z2GzdKipMusq?+*>E3_XS+k{zvHEg9LWIhxOXw!AvapEVfWb|T|u%hy5vWg}1& z`7j2bj@gV)RstmU^MlSIFU8$k;*7Qso#heOm0J_ye8>awNq%KJol? z+XQ20)s;_t+?Is>{{HQ&y+<$I)jr%)GJTyrVd$ROaserf;?8vzOYh zS<^iY|zXH3H=BRO$vjM)eW01#R(ghEE%2pnSZkv4P%+iLg8nEcOM{f2| z8zPAs5uev}#T#EHHkX=#ii0&_`~)!G%l~B$PHJlBbM(buiG9YW;9T}#YSY4Gf5{Vl zfj#{C5YLCPo#3yuU#V1{qM7F*b z?xA|`Cw>XO-in^5-$h4~_dU>Uyr$pIBR39VKjmUO8j9^`7`CI+8jfDlugi9XERXNW zW7>=AYle1q^@;p0yXA|01N+O!m9Z7P7A+&EJT&b+ddaRa=>Dyh!$XKEC#hJ%K;8F{{?R}X1ZQzGk`trSDd@Jmg_J@sFJNPUbkR2`t zX94iV<rFlqRS(pcf$!Or&Co{0bcnJtsJ_u{`l ztwnx!H_jJ*q}*ZRhu>u1T*v;omVI|ryF-UZSlqkaB6>4zPzjs z!9}vIzqC(8ca26&FAmo}7>Ly#if<-XWcmmEzJX^qEegbL_VkG?E$9>32pp#YyS>05 zpY<={|2j|e#TBL17Yn8hz_jk4@kZv`V;m2AHF2EE&i`zj0CiV?D!i|>@s7OdU(_cu z*|nCfv3O(1)uy1AHtpoMty7ydRzG_M7urqQ(kD{o>Z|flyz%ygcAnnaF{b>J9Y5y2 zjA%w}JSp6-?}o9DeKgzE#vCu9O%dbWqw#){Ft5^-$Oczm4bbrA3GK`=6Wal=Z)?n- zyXLhhC9>Vs*Tj$FjTgDvnDa_#Q^|Pc8t+AGUZt#0Fq^a`CDP&QtNnv`W6{a9`&Ya% zK)bcXvK2g48rwr(Rvn8xc(iE;_s_hA`$T_;n{`-^*X~N%RlgT+96^5t%thzSksE)> z-te$Tdd7;xFG=6&k zfi{y5WQD68KEN3u9Y^iHBz|yNDLg5sqX2vH-pQpFPrA$lPwMO9Nm=2J$(~q_&6Aps zcJn0U#!W?@aS_eMSX8Q@TLZEQ)Kfd z-<&SqwB6xN;+>uRl`)0KOmqTO30rb=_tp*`e?>yOS-rJ` z&;N~hTX%n@y|shR4$;m!?<{yFo_AZ@PhjXJSr=Ii9EInb_VDZzL|YtBpDL zgf>3b*rc)Se%5L}32lm4-!9GDJU3nND(!i07C}!3)b8d4cqO(&p6Mrex!RfYPi)5; zRm<7ku2l0&Y{xn`Xbt8$Y{P3&!a3X;Z+u;C?sUN`p-tr;c#o@%39p1UwSSK{&Udvj z;g!&)0eXL!Hr9C#zh%z7o#zdIjyE=_-9igqR=d8RJNFy@8gKmN$+UYn-uTs%X}3Gx zIFokAJNK{pjo{(ICA4e(Ej0{i zhd#5(%USK6njiKMPc41de1?u=pyTL!qFqvP5a=qM?5ev@XP&;P1>w}G!^4Hl^$Pwpmh?knQ=&fR zs&Vu^ex|YF@joetNOym|`8XUHW&I@(%fZhG3_|Kt_J=po(c?+kRg0P9n!gz_>~Uru zEZHvcZ)_$;G5Qv|&B?U^}O&V17_Ct^7;?GzRqHROc3ALz5g*m_(=G^a=A6=9tK;3#Y($Q1{6T4~lf7~w zbUT*6di)WRzXxNhxn}*ih&M>7-plh%n!RI)vAN}eTd*mrt_HqObooqZ!}J3|2e&~7`rZB8*T7i`^B6s6TGNZv z&03G_;_OM)p;nHQz6pDB|5e6{iDv|C`W@6!MjO#@8#Y_PtZkanP)L7@ldGY>vEa{p zN=}&l*HVOCPqESZot)~C_=Liam*Bb1?+ucbX{R*=2BCY9yI*!7b4{#AwoB1?3BSFY zWK>OHTw>u_Yff12iKfoid*qbSd-^UoIPVqP?VaZ&Kjv#>+j2)u#IoJ z_dS0kfM3p;Pb<3Ri}11oS|4x>#@`L)Q5W3wy)MM}ioGZpx9;GgU&3Q zk$6Cse-Lvsu(f0FzUAPc>Q%azaT~EF;*atPXq|$Q_@4fEU@be4?B*YS!B}x8v?G{0 z@YMX@9wP0*)|@YJ4Kers?R@{qc+itM-|SPLT$Rck9h!XkQ~C1pqCWy}@Q^W@ zQ`g*G^Kzd7qk@w(04UGQ8W%v5i&|N3>Ubm>A;^20+)_N}Ke%Z2OhlJWW&a*MC|_J&9{?@2tdk zNlw8q`Kw}P945AEoh!Qw%{v$HhMsjeVnd0=OinUwmXDvxemc}|%qa4kdx;@%)-Z}S ztYHl;XA+;&zqw@WwYPODE-`ur-?@7;m-UQiE(epBteTif4p)1fWm(E^pXuK=a7ehF zTBC(i7riiAb%9PZMoq?VgWqs^6>GeWHR6?OX9R&c~jTSpbe$xAOQX4}uR{yx?4x56rQ{7h#7l!VbR#x*3TbUU{q1 zu)|L}Z&~gX@H7%TymE3@7HnHpo%Lmhn zecBkZc*a%8hONXD2Y_ik@Y4RyGsvfvZ!8`^oQ`Z;W>L zn|zJ2Li;B8kGLswYF}RtUsHeQxNuYmo&8_nXcRaSjruy`<=+Kf2k@(jPv6b|4&124 z?I*6mPfm>$f{ALf>-QME13s|B3r_VO{ER)5)G-I&P8hsyn^7E|*toc-;@Sk$8(?L1N=Y^4RfbP{#TQR_5W6AxD8zCUG1A~ z(D6QEiCV}L=ir|^&rOSG8Y>8(ZcYSFisI}JG4J>KbGDK%xE+6zL+7-8tT@Bi zDLS86uzl5DPxWIH&#NVeX;Czkl#1MFM3X$eB^t|&KZgbcj{v&kXvT8WT0JzgjWJsB zg@2yb6ep7DztE<&7Gi7EkF)NwJ@HYBxrow-^Q?Y%e3ZF=Hj+~*#gf03d!q1cFYyE3 z=&9gF`o86R)m_OP>85xdcc*o_qY0DU=+`*9w7Igfq# znv)_zTW3VdP8|~FE>f%#AHYn`y;9`Xdd?r>>0^=?jEwAXfc@f?<a9_PxQ* zHTba$@mrd7$(bIq{n&;0u?O>eJ!f14XPl*vn|G%d{4=?BmGpAO)9Buv;&2pqw()dq z9Ow}BXK^pxGnhNx+2Q)LJPmESgGxO?^DIU_IC}3y;7|rUE(0!K1wLOPmf}+Uht^sA z2|jbJvzm3b%E!GFUOG^+1$r!Ji)eWz^zzYSVnKjYE%5-dgNtU}V>P2^Os3t2C*^nY zbAjd_YG-6R;GX>aG0GDT0nfb3`lZS989^0sN1qVL(!^-93UW2ac<^EnM z!N+~PJyGPz43A?!JPW;aoe^{JS8&!yS2nP9>g;*p>+l)mw!jyyvq!)6cJ?H(F9aLK zseKLj1kQDxJ@k7${fZ_+>@6E-{!Q!^^>-(8P|Y&!vpdlH-LR_ogt~}7P)u-Mw1#%k z9|Z6p`8qtqjPmFn%Wl7sv*4Mh1F;ddpY~a3ZaaS3n->LQ-|^t1#cx>+EHB3okV>3# zlurX6?Y03!!t&FCpBQxzV#dCBV|@ZVj&eR~j?Kgt2_CoE@Mu2U*sSN`tvZwIs1xR# z!!MF=p?lEZ*%TyeA>+I%48#!2JHpucTe)LXc_(_&$U&Z z+*L!?@`RNaGn0Iu@ZDS9qqb2qv2G?mn?6^0ghtov41caP_T<3ia3}jx@f_aEOJ69u zd(I2&yDIj*WQ_s7WbVTe_oq8}7ray08FI;#*mj!cL*Lp7rcOPS!>uD<0p4j2z>{s8Sok_?7IxDsAS%tpU zio+CN)w`05qd&6t;Vj094o@*-44i$q4}Y_2s|4}SV4LWe1ulZ`^@;e|C-biGhr>@( z!Qk8ep$>(IorTp~-sW zC?{c@@Wfp@@fAk*Wl|4>Y^itxH&ZQr4zRCM* z3%|m(a63sjeb&Y)u#3)4F=IU(oKAT&-Z%@KRuID{*&=hKvEsr-rfjiEvc;)({1NvxzP6FUG$o~ekqW(IP2S@Xv7vXmav=YF7q_`LN zcx&N3|ITW6fo(R(x`bzuLgs7J{#}@U9K4OEue`MIU{QbIIfS|XhN%);4PcvW6&{g`0$-XyGhXJP|cnGy5>$_^IUVEk6ff!n&>O8xexDa z#tb=g-}lRS{qVLwX`h^+-Q2zL z#(lKgK)bHF)7Mhh+?OHW9HmY4b=TZy_cddboVlm}EZ%sd=KkNVx#KtA=9>HDH{y-` zHFxmWWA1xWA`Py&Pr@H7zwU|JEx{-IYt5bhy5>$_&$#Bk5xao=zxX^iDJIH_hf>U{ z6%RGE5}oh|(5UAAdu#5C@F4`}^o{Iw&AsZ?c;ma86ZmuHu6rRNaJbbq_ql(GH{N#= z?V`}&Bbqz?Y3>Ckz4wj0>YDp&@SZiFo4dsao%weJjTPdHZJfX2k>ZbS&&3;m$a-qQ zmowK=?CBwJ*6x~X>L25c(>2!@m7Hqxeo{c;t3ovdCeU9_6I(%6PK$u=yxPxj&5 z4=np|GxAOlUBjDcj7F~86vQ@MMXXjW@ykyWvs^}gsQci%PY&{hTkuaUMV7uDT~ha$ zJ))U`)NAKF#Xg8`IwWlHDY-Gv6Zkj-nYxYGZQcC{_79Y_q5lQZMdz9CD_8Q;Rrk>M z*XjE;yYCn2cQ^9jeEQu;zZuj)tfSxNLB{3``hAu@HzKF6qR;tH24ZSI3w!H)&iIj> z@j=db&8a`{>b;vd-(O8KVi}TKK8rV2ePe#-P5lG0_o!L6-Gd#hzd=rKW2|}3BYWS0 z?EUr(?AIP--?vJe9ND{$SfMW2`$K39*?SbSccLwOBrtKy-COv6B3;=W8+jV}0$JKy zH9vB7FiWw&42H7A30Yff_&Unsd5TkW6VHQjdDEYl8Khb_}R zr)qq2@2-lkd}%^IQ%<(*Pg&77Y3D_LQhp8e+)bv972WIMa?9@7%-JBl?tf-jdT#UR zO3 zwH8=(CixOMgIq&-QOSMOfQ|*Bi%x0_97s+_ze(>{G=%q|k2b%tc`mZ!WN1UQ+K$|_ z58g5V*?|pjLL+6+hc54kY*Y2cU_qifu%H zYiIv7@Lmx(%N~$r>TiDV@jE`|3PqImbnML)G(DyZ)Etw zbvYJ%f;rOPWRuT~w&e7rg8;VX2k5ucvLA2q+B7I%?jC5cMZP80CclM4k3YknoO;EW z@Hf!kOnO9@D`I_l=%=l~S@f7n3}=x`kJCQ!;uqyN)92&EAG_}#*kkZFQqL70P7Q9J zQM)oHh#c5Sjo`zq@iPa1=-xqF)}4#2JNap3R`@{?{mp}aEJN09SOjlCCaeIjK3mS6 zhK4_d2cPL=SX?Tnc&K7{-+ z6aH1nzE$nl0{kt4!5-?fzef%q{2$SZYpuQLWxSi{pPC)fcE)^{F}I*2YMfWG1I~xv zRbkJcZ;vydakes!^cjQLGu00>=@PwFbe(G3tc#${D)iQatZ(f(sjKtRYorffZsS0* z*erYiiE!V}dGYVTeQ?)46Wq1W1b6p73+#+Hp2fIxS!X+E=uGKt=*pF>vC`)0bD{Av z))-`cZL~qpH2avyI;F4IA^Qn0dLLRe&ueF2gs_`B`=SCF#L^|X%Y;qI%@>>Bh?oBY zItU_-4bx~qaGh>k=zqNRDXch6h7;WY2|BXN@kPBiaUr957Ej^;G+(m^%sm`?sHS$%`|Nr(s$*zuwata zv77ZJ!X%76K(r;Cif=l6^*LZu$()5R>8yf{Lr+zX&ib*XvuYnF>a1@4QF~bZgyF6E z%-g}gbjX14kB(5Sc|L35j#?Py!P%B>8UQ-nZ~*Pr7qZUM~ya# zu&Y8Yf0#Cv^yA=jt=if9!@*}1fA8t)>t_}o%)ZuHdYXd==|r-voT#t8#0F=nFLOTv zAA7thF8GYZ&Yw=3dEmWkec*k*dwuXh@tnom?)9N(|A991fM?hGnENvK`oPWh zt~uH3OPEs?FwfEY=%>f}=&RPfK5QJ%s;^gE>$CN}uJsYK`XX&k)Yo40&>yNVlNRjt zG2T-5`tT2bk2dpwXV?0e`$qTrz>Q*VtvT82OPEs?Fqf`cNk2W-XTzZjf9R@rsIS*u z>$7#luJ!%eifMNDRe)V8n9$c=e6xy=wrC-(qttHmjC+0P_1Dv89`Mxq)Mg%Yf6cu< zaC2?foU*Kb66RC|%zsRqO8Rluw_D@caOheeeEiF_Lx)`6t^4aN8HPM#`uY4rZ!lK4 z&#>yp;QdE>_FZc3HA~NT`ko7qJx}vAX@k7bcAHmS>)nVfRY;q8z*p;4n|Z9|1J`+3uIevUSXEW!CbCz(vEZ5o6`Y3iM^!!Ttci>>!E^K&p&6RlWBPY>r8?uMm zS@e+Bu~s@QeeH9tDTBLXpU~!bFnxvjRAN~XespMj!D=}Eb#H-w2HWo-g>0pm8|pUIt9!;aT8;f?$<`sb|k5H^`s>@h!MZuvHx+laxl zYFk)#q+7mitZ2z*FW503TTJ+N=PEcfv^yW+8{`-W9JOHUey1AP?s>8&Y$L$7+J-H$ z5%?O94_n1gIIx`#Y&CDidL82L2fe`dr}Tf&gl&!q+n^2Gqf=Rr3%0KkZ~yT<-LP%p z`-glNJs-X?nfQXw$3cFB94Ups;}dGwD=*{$Xxa_mgJyh@2Op;qErX_s&seVbi~#g- zEpx52VI4GSdN6y+$}J+@>wWg>KcMR`0(Xt!z`ZxRK78XP4Nm;wt-!w7rfc_m&`^0z z0_@KRHiiv*ayi8spts}EwemeVu>Tm}k=C&rnmx?l_+DUt1^vfN*ymb4^G=Jde*o-% z>w^7!VEwoY_LY2pl<%ezlb}J zk>`|{C;y?Oago6*awJA0|iYJNB>bKrvSuTebWlvTd<@f&y4@=;$&(W`Je8G>p z<0y}UOZ6w))32#3=J42U#MyszcQ+q+h41_LuDY8CuSzD~h`NFIk|XvKPvltic0F@Y z-xb)QHEum|(GHDW2Ti$Y?*R8{sE4!TEof-V|Ge9>g}Cy@zQ%qMjYJH1te%N31lt#Y zx$|2o_gVDpx2(-gqvLEE-9$X0ey`UX$$VY+t8J7)pg#Dul;Pb!5 z!=PcG$HN>851Slb@%Cg3{#V*~@Sh|esq94Uv%W)q4Xs25O(3=oqt&S!QJl% z^lQ?k3lAkOJjAG<#eUc%`$S*hJ_5M&ojSDrB1wEN6dpJikKCL@%w6wiqwbC9A{@Q${iI=Qn4#wBMKtAZ~Xw_Z7 zkMsY~G^PKp31=HTlukjZ^2|-3?FrTyC!A@28pX zpQCq2n{%9nh!>Vm~0e@Q=J@i??|>np_F124rY zS3nno)Mq+l^;Iq|>v?~k<@e_qcN&~$KAr`NjL~JRy$Tu={EX|*iA?aG2kUY0dJ#&wTD0!Jct& z`a6G#)31BJd+dJmX_rRd&0jcizq60kuWQY((r;aXF*-oM2WBB_4?l6gQxf_;3ao^$ z0B5;qcMG3dvzvB*$~gF4P1-HHz+dt`%OB=fu1oV>?y>0I((^w5?t0#>{-QCu7TgxS zM*bk?TIexK!t|m4&F4FL?lGDk;u&LaTE}za)nsUGNc3^uZysih(z9E8`8~|<`TVZE zQf2=>Tl?nKbdy$VH1|}CCaXtT^Yo1V{5$@~IiEMX=Q+#j^P@eQQ?<1hZ=!7l za{W7e9rrnq5j)~|q&;QReIG2&1Th8WQ12zV)XUKN>J4(+s%9}6b4k-KI_+e_( zB)jjIG~dE=%=uoWzE8LImihchf%$wo&*4#{e|3y8zy3F3_!+YhUz&T&wfJz0+MamB z_vvpw_VzI%TL-`NaUOP)`qPMA04LseN|p02cD4<>B=W5qJI0t}j$*7WxrYvvgH75l-v zA0?4%(6}zc9Agmg2DrQY0H3|VdZ`WT!r=+)W*&KcJp0#w(I{7cciZc8)7FtY*gr$b z-6}Xho5c2(yl?T29qWXXx(m!Xe5`iBLF+e;h%U3i^RVbXo>z0fa?L5me%0e!bFH!8 zz{bAjGT?xJK<$SMo|$9KcUoyro^cai##t7;7V((~ueWH|e5MVr-QDo|4RbU3%RhSZ zt!=c==U!Qbi%0&BXWW14!6Td1?@)6c;%Oh5JS}~+$_jN8q<8e7fpEzofu=h##HKNG(2B%i|HOIq_Yea$tm*O<_4|6|~9n2o;>cyQCL zXDIi-zd*dG#{IymFV3Cnj(Wus+s{O|x}T&mXNxZDto`|r;9;MYtaFFC&P|!-I=@YO z(QCfLv+yZXe`&|9uO~$o(}(10@txc0gY$ZnaH!|T6rH(4qBrxrF2@*M$@6;T>Y}Nc z5noe^{3Dz4O8q5WXTfxT$qdHYLHpk5cJ5!T{YoD_){P$7>waw~N0a!$f77QvzsTp6 z-1#tNzmG~|-$bXD4*wTqpHQ)9dR`NEo9Ih1%!jczn7RcrQ5}2C-KGp(qlont7PPG5 z^9|po|N6Mr(;^ z-tKDe%+dU$E&8!F$C!0a-OqRRnZ_KX7Sy`8$3wk7Q%C`;~BZZ_qUSUKw}!MNfFt8%>VOG zk8C1uXW0dFOIn_aj|#mqw?sKMW<6!f^qY*k&W&{72YwxRne*^Xu=cIdb!aE;}UeZ-TFW zq&s#6nZqNq=}Y${U%Sj$p?-r@ek^2vhr4Jon7_op?z1-vIy+iVdqJ96_c&`UkoY?HVf*3)M56k`lNWkO8& z^1?5Q)^g7SIOcoEHL-`<&eT%E#lOb5qTXAh{is&!D`^2ACCH^!ND6Ys4Fqhjg?8 zSEtQw*n6Hx8Wz^Qg2C^@H^6IkQck!JunWC4HCFq^R5M4%c=8*E-pYu9yYkTMQ)7?6 zkzqa$y`B+!@~2Z{2Z5c%6C{gdMnB>{L5rWbN`8}kcQ84$Dw}-8e+72B%QUXcvN!Zt zPmFs_z$pZb_}k$HUIwt*0IYmHU{$*+NQ{e@dTF#94K8;~;LPbQkA`SW@h?Yiex?4| zCyY#uJw#(Ok9e}|Y#!`v%AZ4B%$JRb?kq0y8jarc<`UBe$X?lo40YR|xI;vo6TE+i zaGntr8qxRB!f+>^Iyq+bws*&*$XI%)H zNV%f1*Ogxj?K}N61J@SbPt4U4vh%O7_p9pA`!x^uGlOT)=yK(D@RLjFO7OmyTonhn zkEpy1MH#7Pj)ze^*V_AMYTxO86M4k>KS+G4ndb$(&mvA$IFzpY#1+c_LQZySV5_}5 z*A8$q^AEhqxQ*BnH)A*a^nbA5*|{hlg4U|(&-y+#vf!)u%lPeB_g>B467IdW*mQoZ ze7EM9*e^}ooB3`fCsCV=+$f6qb>{N&d-0V!z_mr2)Y)X8Yt3p?zdt@IIDA4(>u!Ud z^2n`ZppRQH$c&Z)gYVk!il+PXT`@Pj>v#GmPej9{g!PX8U#!>34{`un&9=@GbDtkK z2NnF%!M$*ef1%~T=U|fQUjs~Zcgvac1Hh!MIMCp{dxyCfj_rH868l;`cJ^j)qFl}L zcVyUWww@J7I=^D9c$?n`uO}}vxqk3jEI+h(M8r#eANW}e8_wo7>?iu(MlI(j;fcE* znA~(L@`Z4nIqNI2pK@=Fb8NvZ_5yi2I;!!dmwU~;E*;WicF_)bYk3Sj-@0c+WW}Ch zlU^sW9@Rv_?^Ui`H7)d8kBlHZUSj_iL|-H~Q4?oOllRPba?9Wwabhr$@%gi#6U*7= zYU9PHf;>4u{p_S4_ULH!{{}WoJ$sdB70|i%#|x}q>vq;lp4=Ym{WCCj;CDLj!I%7k zspD&#MT}c-=bYnfTS%^@R^amM!^m6Yv2prJqpv%elk>eF->lj}*b@s-5g1FHe#1uv-tu7c-! z_RR|T>k9a*=qDr|OHQa}aCa)>2B@b}dgHXF@kholSdZR?{ISWnFFEo{eAAj^FvS@4 zN8sG{qe)GYO(trM*Nrvr$a=EYN#t2fJffZ@eA5!_itK{OwPMI zTH=dl7~XRuTlm!Tb-dR$ds>t7BNWoFk9XRBl-g8?JYnu5&Sb5j@aURQF>^JLe>8tz zhA}{E@Oo-)gW=9YMUseW%pn&0d3QST<*ukq@4 z`b*lVeOk+Yd>1+?+&!}7So8T3^Ks@qpLvgh&P;sa)30W&TUsq&R6e<*Odpufv}NG? zv}MRP%NO$|vVr;DzzOC(QpIY2nRt1vSv>qTe&_7X8EWN0|)u2s!!l6yi|T><}6>~W5A{weaPMKkLXu$YCG8N zqxzWrBe^5Q_+4;2NR4RXheXrzll+RD1$)@5qEq=vUf}!R_+DrP9y9I=z99d#cVP`W zubJP;%%y1j_G+tE`$gB8pYcoNQ%Od4?St%^g6!HCIf$G~_mb;@{6eNrMKKb{zsk_Og5ZaY}89=pLW@UGoDCF3oGA} z*Q0r5MMJ!kMt>pc9_Jb15d11Mkau{`oHuPl+_8{fv-T8gej$8Jpr_OX<;w$s9l#9nN-s! z5-uj@|CE}?MsW8hgTF^gro?{2U)3Vxxqo^3hksI%9(#0bAg2Ex9c#oMy?9D&6Mv^B zof6)3ae8d|mjdSh%fDpAmX`7xe@kh5YEpW5Qz^fX55!*goU%Qg?@PxUv1h(w{r=2X z(qmH|40JBLAP~#TPkwXh=>DshUSPyj$J&RU@|19ju0kD|=$cE6*we&BbnbS}(;-oC zdeZtw=x0-7A@U4}_KF%yV}0S#UJp3ux$gV7p?iSm&3myGh2f{^KJIl@Zg^oT^LykH zVz{vZQ!ZgAIziEKM_?Fc!Q_JVUswdd5=KvL^@Dw0q}u5}zDdL%$p8 z<5Bzl*DlUjy^MFMw^+_Oo0H!!uyOb)r5i(Mq(*ArON}g9pBf3zN#%?f96iHReeneT zX7Ok6mv^SoG=#PLme_DvY{5k^dENyR;Itxi*0fmd@M)$TW$I*S^|y4g)2GEAAKu@5 z9y+~$tRT5`;~8mzjT*1ICVbJNnk5%4uBpAKrY3aJnx9bT41VN0_^^eC4<9!1pz&6^ z#;Y-LFcTecX7_k+<=HymOmx6JYrNO<62^O!@g8BkWsJAzhbIr}3rXOnD%O8YyX}#Ie(yno{q9Zl#g;&OE$6VPx?uA*_ zxaVIPdwfimHSYO5W88J-xP2M7FXQ%Q+`f#P{kd^PM-L0H#Sg*f*k5Nn1YWbEnnOX? z_}S46jUT-_R{Nu?yT^|Xw#Hv~b?osU4YtN#H`ulAevIFb@%u5pm0PDbI=)fk`yRH& z_lT~n@jb5bKbXMy!>^0go_<~T_``=;(8>}(t7wdvR9=3_2dTW)wO5%BF2NI z8BMhz zlJkV~R(J`$p46~KGCk+FX&dB>l|4za-_d)^kXbH6cKIqY%vX?QE+r=u@rmTAk_s7vG*#l;uhdlPm)v-d(3KM5TCYZKQFY-EP%cdas zE;?;OY>Dv6`5;@*q=E^tG~d?TN#HfO);H8Q5MM)4!)oty9@!ARWhus3x9#Lmze_tc zH|^0oy7p*x^iFU;t_vMmuAY`9+mG_iUIN@E0JqWx&F7wXsZ|TFm~HYZ>s@ynY^hPT`{y@^ ziM&L1g5G6Dr-u1KJ3iH_Eo;SD&m`BT?q@KiodCwJF{ z0J7s8)^xmn^uGF0A7T2?yYLLNAJvOv9CIEiu6YO^$|3d-<{`KqTyHdRKeegge8p)D ziAw)jGcd5{$F17tthIWD(%<+v>Jq^1=`s1=K@sJzSOXNk31ZSKcsVS0Oc^1nay3f&Jb^ zwX2j*RP%0Q-agq%uTMXV`Uy~Ly)$*^V6PU=a}j@II)oif&<{HWeDx0`EGepGayv{ZX7ds)j~>e2f% zQ^P^Sw|x@pnap~sSkL79(_5ZlJ>!9^Ij2R|d_>EP^Z!#?F3RkkmIYVARIu$0u1kRH zt2W(y`2RBa7J8EZzX)I8Y}Hf~-6q0U=Z5(9-?@JpPw(+W+B|i)cour8y*MQLrBY&=hsd8jBwEh@ zPbDQGdkvNyDmseq-Ll(Ym+Y1qUGY^THV(Ub_3Wfb(pIZZV+;O3>#S0aF6%Cf^4Cr_ z^PXo%>xcu8-8ZqukyAI-i@Yp*hZ~m9Lc@u5FdUc{0&neuE3t{)!QUS94!@7HNwI+0 zen+kufS;!ho3T@W!l^x>^U=qcIwNPBGOKg;TXlP#e$?K`R1M1G=LYRzGeVagjqOKr zmCw{?Ejeop^OYQM6j)ez%#OC>8xEq^R(9#N$U3%O8$_?Y&DLv!=(TtEofc~%7eZ(q zegxTFxIZkLlyXuj_BS*;CGym|l*mf%F0LBhH?o?$ZP@n-MIk(m(LMNOPb%j}TE`;jbJ;3#x=^2DDLD?7Fel~tRKBmJ=U+jl<1n9JsU7yR z9_{!(&(-c(a?z;%V0yAWXZl*o?;o(&oH;+j9RS6`?n~-!^NiUhr>hP3{>}P>Q(XPL zYPQMkYBP-^5p1rVU-4 zSCSvgo0=c{9{q8LC-UxN#{P;k@&EFh4_z=v`dV^m#$Z16&iP&Rl@mQ3`ZC-3D#*uP z5lc%7&7dxYRR`6VAM<%aGlpR2n~5(|{fMuADw`=bIP*PEh1J%>9PDSg1r@PX*w1|4 z&irvD=tm8FzxBvV`aKzBWJqmt`?ctredaI~*8Uufmy zFI%=+&bGd{y1W#%Wy5Bj}Tt_Q@PCeT*@C*rVMr%Zom)e6h^&7tArAn9|`|19bQ^zMH&}_UnM(zoQ3UO{|*6 zNz?_U=Q?`etJKQXI7j|$;cJyM9{76b82HLRKsH>akCW(@VDhTXsU_%1xP+r0Q5_U1MJBxi^XPZO4BTCh~CL<{s$#(b2=WG1pe zA@tY^&nWwGX#=@qWBT+BFk;tqcpAjp@#RFvs}1mcg>{`A-3sRx?aulC6V9`3oL`)P z^AZOK;O@+S6X$;>e(OXyzud<8LL29&IpYCi-!X8`-MG);{4sF8M*A52nf4f4N3`rQ z$Ip@dKM(6J&*x)qy6+Av4obEdPZU3VY|@$7#!R1$Y^X+TPqKK9e1wv%ggfHPz*{qM zQ>w-7{=EfV-2L5Ye*^8+wvKoe+3zIh8Q5|DLOg?fN9zBl$V)BQpW2~q>5*HYJGVW= ziGSHPP>1<0p8qp&WXVQ@ zqw6jP2J}&Oxp%eC=K0b!#ovAK`vChyJU>-?kasiL1LEstm#41wxp+Nt>~iH-D@x7{ z7o9h$g#8xt-hIyt=|x*}M|io<#2JF%zFawyG@j0aRB|UBOiuS}Y+^2=*h$R3uRS+) z^^xT4?c&Y*sNGX0S^x(=`W%1ew?D1}Ua19EP3aEaJJsaPj!kMXxXbZBZs5;PwrV@k zh8RxZG<8IH67P|FYyh&$fF;DHb&xkYJ!5isnb{vU6YM7E()M5E@&CY+vF#?tURFS^ zpA>T3uKe~-i#!?ONuIQ5^Lbi7HeTjcPt27!V_4X@yQEX`!Q+#&J7#f@G)6wIgV4Y3 zfGMWL$EUu(gMOiRhA+H-m*Gj;?&00@@UI@$IpW}AB~-1M;Y zX5rR1GN0e``8_|j?oaWR#OgJD%3La;vC!WFu>xo=Nbca;z5(J9JypaLlp7hIa5FNd z7vHK+@c`uZW=>5T!EtY~A?_GRohwg5!}-wl0rseunryN$N*?jjhvd|J=&Jc>eDqxG zidC{J+H#ESezLihG53Yg)v@i277nG;WS?TmzE!Nvi@pDL`p_8t7^ChmV_@H}l6^lx z$LJcvw)Y3I_uppQ`-9l~o%2y+9PMw7alExp{*&VPlJRBt#s1q5JFpkKrVsmOD!%MA zV)~N0?#^;gXgN8n(Tm6hajp?7yziFS_s*U`emM_&(5j16%bk_Ow9y&V2Hb;Vs6!yU zeHlK`JnRl8xZB8^-Fl4Wlp_2xyf}QbVBlame(jnx?yp+KH`ZG*>>2ldiz1VlW z)G0cG{-JsYZD&&-L+{?~U40lCESG&C`*sO?WIZ_fj99%-2YDiUxI^5}v|$c|e_Fir zea=2=AVlMXJSDMYPsu^-pr79CX#f@_WrbGV_&^G=kl;{#9j_0)r}y=4iT!Q``@4{N zw4LE8;V;sD0e{#^+s+O&sV~`f&*t}5?yhJ~mR&X1lvCmt@D4WB7{6`jj+o%qnrm#< zy6drJnl?92ZrHK83Eu6)5V|4cMl$=2>|3NNDpzcyyH~JvxaPg6z?0tNqO9 zkHq--Sih+oaCS|+*|(Z~*5qAw%`VR0Di1ly?lSkv;OOP=5WCnr&bsWYqAl5TESk!R zYCk;?vt-?OdEdd^_Ai|ncfr{v-gX9OV_R^+r4HM``M^c?(_+@!KCL)h#QG-z7tVkI z+&KWIss4`slfv_PUIbip#>o!7v|#J1rJP-Y%RJz+@0>t`b#|pjvq$bO6{M+S6tGdz4H~`kqupYP4Z7YWB*^;(RUHc4chiv&tvQ-^)?VKSykuemTbOm`riIG{Z7#T0M z&d8JDk5d<=g*!|-&n2G~BID#BpGp6g4Nr1dc^9n=js~C=wN<;`y>@BFUcoh3pZ5){?4LX@jjJfvWcc3k29ecnj@~fZvzbh5Td%J2iICDV` z@k3Xo8Qy7{caeAsvWG7tG{dNDzv$|F@9s3dyL*~Z*>F+y+wqk@1Kt)rs8-Y9sAQj@ z?k5ykzRs2mW0ZJNA!pHS&LZJN?IYibuly99+AY6$nX4neG{7fJ`GvUiMEPX|``3|2 zOrNJIk7P%WFdu4s{GZ4pAE!%J$c|n{pOQ!JFy#^QHt`*Q-2aI@@{T>mFnix?Kc^wH ziXUpE}G zWG+S2yd1?d>K|1p_Mnn^6rIN$rWS@r6DLm~H+;g>CD{jFzv8Td+zY7ZTzB7txZ<+J zdk~gw+_9mXaHn70x5?yw%WTGR>U28xcEz-+{gmC^_Yjg4_iNun2onG4+(V#0^BzJo z_7=@geuVRxgBz~1`Azp|oZs7sNA2sXCV8Bh9o?KCc*d z%Fda|oGCqXdO10#R3Gv{avzh=DsP&2s_HeG_I7amDLJVY;@__UKZ~G+wsQiHl|dtM z>U4SWTbi&(c5Sokbd5u9{R3zI66)hjq{gIbHeW@Z$y%Q=DolONdB`cn=s!!T)AuC& zak9s%r-|ROY8iaS><{1XaD1eV>Bd;|JNye8D5*^+=eqcry%*}a-yBM)7c`PNnfads##p8ZssaL$FAngR!op=BwnMMeQeDWnV|_9 zTv_eZ{=34mmsq|i)tMaZR}6RC72N0JencDg4dpoN0nY>d#zcH&b6gLY({E79L4v;X^K8)G)$7pU4YGO`3aKzeoEj7py`oTjzVrno9? z^$6rsWNqrarf&Bkr%t**ef4-xTGiyEb9PnTaQ|ar?EmvP7bG)JI$E(SioF8+z_z)_ zd!Z!alogx6T#GLEt@a}i$ez>8-jV;`gjuC;HF89g;@7pOv_3Qb2pcJKW0S-0f_lf1 z1+c%$_Nv(C@${#A1hxIqZ;(aw-GI(RKeFtn`jj5K;tfxO9~h5k9M#xy`jU*2$2-{3 zk2!W5wC%L_onY(=?(g(A_Lmvki>*m<(egRyeoa@O*#8aWDcSpOAMTRBkk{nf(B7H+ z7WY@u&?9o;{X;qD zhjGrIM!vCr5oce?7At;VUo*6mb9D3V_!E~%iBw;O&}*BR777 z8kGHP-EUCz31Tjrfzijn*O9q5ci8|lqj{otY=BX0fE!#kz;(#W>$_}#gF5Q%Si^c^ z4WGAT4eQNVL!C9I{m7q{Dw&%YPSz-yd$TEXXGZ%nMgSXN6dT|Mmkm%hq;>8wY#U%b zHo)g?8(_U@1MD8-h|d~hy2fz9qM7=Zdl$u5Zdej9>(xj<-Y~&cN9(~r>^1V1%Lo1p z{_jT@1!Bwbf4^SPCnEZ~8N8*z+qd%>!1o=7*6K3S%v|Ea=~QSfpWN5Ff4|`|dkpyL zhQ)ol>s2vEhVo9R%_>)$m-=+qg`&*|_`a`Dn{}=>Nv`@!nk&BVNoo^ywVB|mb3~hs z_`XZk=BKVUi&MJm3DIU5zVC5r^9xs-mr}ax_|RrPa>lu|nZ^F;nKM~7I4f`O>*Q_A zCztb?w2QwCpV)514&Rsy%?*Y2hCzeyiF;2+C+HWshWnvq;6^rsg~)~VHMyN_;NxI& z3NoXWt5jz#Hb#?|h%ej)A1NmG!Z#=_Jc7OCC+?!qjJu$2CGQrcK=<$x#a&=WH2KJ2 z_=xgiC?-hbHPl>V?u&fRsP#5~>4d*@F!y%mol&FMiqwupR}66|=Kr!i)Wa_&AGZ(->&V4p$iunDeipK2 z<$RvKn`Bf;_jxfXvqNi#$Ck(W|2^@CWRnl>aPvXwB3GcROD}SGj`*^}bGGuHcus3? zz6N-VVwBwQU1Z9z-vZ~~1oziL1J`1Ag7<7EmYujG=%b7O+^+a6_Js7!25QclaKYwP zM67G-aq3)8@??c45a;T^v=$vlai!>V9c{p{kl07BY~S?dwfjo5YdV+NHM7g8>AaVk z&cw!s#}fDG;J}AIw1#+G!R`|3X~y|Kna|&L$?m34Dmocku6x}T#Gz+ER}LRF?^M{n zl-K@%T_H(vXgy)oj9mQNYK!-;^O`l7{laOjd9OS(`hA`WpKJN9+6+tJ!(kU6UJQ?{ zaq-B|G45qJ@`KKr=jl`S0WWhBAAZ8*!&%Y);(Hr1y+GikTnYY4ZsDcn@vh$2)rRW6|Xh`9u73r-I`@ z#v9+J-SKjc#9!{a(~QGA1>Gy;O34SKv6o)qe1`I&-qgX2CUwqh*0b`JJF;adEwjQiIq$+98Icaw=6JA}DP$IOe~ zhU{Y`8)J^(FFpbfI1Im^86UsDMY@XpzVw#1q};IfG`ab=W5-z{UssUY&E`Ib&uZ-1 z)bl=q-*qOj(Jzxb&P#0czu1!p8S@L|P!kUgQn$w8q4)3($x=4o#CqrR_d0fgjj{#U zK3=!^?ZyTg&>TtD@Hch-`dYs0`{z1kg8O}kQR)kyMl&A#`(&a1PGu`TG4!7SB? z3Y2JWX6;{>jiCX3`L%&Y`CGY0<7eU@@G-@(c*VEjJt62;?`bW+vVS+v1ZrD_PO@%& zJNs5~>gp%YlhZMQ`AIH**Y0l(=M8y`%0rz!`KEB`gAe1o+{yj#1%>FP?Vjq#3gMmY zChzQH^3FeEd)&t!89xZP!>cs!8b1G0U~GPRy`=*b4(h1`%qkG>$iX_iTL-A-f9U{8 zs*4N01PiUTWkk=l*20?;?+J9Ar@@YxRyA@Y_`dd@&Z|e-_#SED zn|Y6}Pr&v3Vhf*NPcs@90{>QU0*nXBcGT8Sx`NU4>|yot@7KV!W4C#dXH)p@OSn66yz^T&ha~o} z`qEx~iTc~}E$BXuL!-&m4x1PsA1Qk6w#Rf%W6$f@QxjTMX7ZZ*cI6Er7gBq?yiXTj zQQZakgR`P<0YAawAimIe(vXfl=Rdv5nr~)wTP60f&lc_)-!HJg5!+V_HpxQmBY4Q2 z@Pqgx-&L4c4z*O3`VNVp#ZAiA{QV;tO? zk@!lA>%zPyHqum7aJpzx{&e ztOOZw zA3Ads$B-4>fR9cz@&gyF>LnxpAHa(9DNT5(#5Vao&cQb5JZr5HISaVu;L9}WsL|v- zP4Ks>m*Dm0Gw7|;p|}3s^!5pL1*0=A;4UZUN8sj}&u&X~|^XEhxO(`@g{@j9}#<9^<@W1%8>Pk*zExKnlni^W# zv)YG_{aZL?FZ#dNY41boCS5AL($3*^;&Zp*XK&sfr_T@H5U&%j_d>6sf8$&m&bg+3 zlNrAV+l+8pK~0)M#?x9gH`QZG6~BW=58lBUJ`jP$Of57YUY%a2utk77;vAM`rg1`2-HW!VxSZ$7D zbD0Wl+^sgpvAK*q5O4erZ5$b2bR_?VWB|wJvS1STbl6wlrd=_*iPkS2rx@L2seAp@ zAN&pLFW%s4gKqMStIbqu+%>8Vx=B|*=q9ha+KdEe&!`Q$Nmm8PtDVPaEcK z-jlHIM+~#&ealvK3G;5<6mNW*cH<55RFO|e=UNCGiqZ5HGv7=bdpM7~UZyS8&j0M> zey(JV(#`dbbk$nkDjnCH)`38gzy;x9GWM?%<+byt`3 zJ|fwq4B9J7kb#PIPqf5XP_^5bzQj{KqvL{b?)Zz11x4H`op@etu4JT1oVU7n={ll(0+yjzD%=*(-ZnFUqJTBF6_Bb>{ZU;F+3F^@T)vwYU~{&|1i@6zaZmRYekf( zFS}PQDE4@@e_`!!-i|Gp*y|e_UFzuLVv?wX*2qz`3#uXc|h~oUL(XBFKOsyjLuigvv_d5cwEs^{G(@3qQ5 z(sPwP^Ud~tsOQO)FSN=vdam--R`~~d&Yr*bHmf{O&sF|`Ri3Tq#HaJtTIKuoT;*+6 z`MY|aK)Kl}->v5=Csv#7-=XI|%0sMjg`TTC(JJ4r=U&P)t@1QISNVrlxm3@)QeJ13 zzpm#h@3hJ{>UkH+hpqDUdaiOxjoJRSdQQv;?+B}0q~|K%V3ohD=j3nqe#a{3>AA{F zt#YoOdni9|m9NlqmH%RuN9(zp@(HVaxt^=se}UQlrFu^6I`5TMd9a?V{0*yoiJlX` z*ZZJV9z=Nx@z@?~#pgbesn`fVl&pkIWc$F_{xaAp{jwMR_`%Dfv*oKr%oICDj^wKw zfyW@U%!-%NCn8;5@lrOP!bhfqxPgz^-;6c)#@YKPXFA{LePsXExO#JlJtaJx&-;IY z-nB!UBICd>`G;q|+&?rmjw7NoFAy7KGrn`;Q*FnziP+04o^a(n<~V2k}O{6*JKa1Eb&L)Xu8J)p99x2|8}I-|1q zbzSe`I;*ny6oLTt*EN2`u0Un+?{wXh>+33ux9EBx*HbEsU)1$w zT;Ew){A*p0=em;k=ejQDdSzwtMqN+kdUIv*v%0>E>xRnWr*%D-YhzCF8eK2uIxwd= ztm`MZuAEb>n155A<$Cj+;wN?elHQ+Nyi(V@^v>MkpXz$A?$0Y;rt8DHKd+d;7xdrF z{fCN|=(?w_A1)5*dZ4Z!DSnjeQ}>SxPq|Fx9~2+yI{x_le52Qg*S^Spodp--Ig)!a z;gy2z;lQ@O5x=<=9w5B^@Y8Y+2wsI!>8=>TPnk+?DAh#hELh$ zKdB6#vdg8=>f2a(fvdddlhELh$O)A5u?DDTwhELh$U#SeA zvdimLhELh$r&Wee+2z$L!>8=>FI9$5+2toyhELh$6)MB0?DBHTCfvcR?0bT{$8y@h zZyUIJRP{LEs#;|STs@+)1Fq((?0~BWRd&GD_f&Sk)pt~Oz|~zUJK$=j$_}_HSJ?qq zx2f!atEnnG;OZ8Y9dLD%$_}{tipmbS;;_&ea5YI~2V51Z?0~ClRCd7Cc$FP+^(B=Z zaP>u%9dI>DWd~ecrm_RBhN~+$uw}Y#2LCY)<Cmn`gT~p9nSLMAd+d1%$q&qd_X|~a;Qf4+9eDq+$_~7rqp}0Vl^uA$P-O?+zoqXTc>hwt73GreqT%u?)3)brv#;FV z9#kT2eA*rq{+Efe6B+C~=>OAlecHJQh$Dp^$%-STa|rokbWYQ<<>6m@=oR@f4Km1) zl^%JCxk0B~Wa^aMvvo@I-Z)EVG-Gn2*DgiA-orbxnf;pYuq(|<;G9_Hcd5WUh3(5>Z`lE7@b?f_-I^WNMDohGdaYdpXhTN z)z?Y%RWU-kntQF*b7#yd#R_=|n-l&$R-S_RS&2$~Ef$bFO?7bAH+m)Y@E>VF-_*QS zFh9$gL$mDZANdaDe?ynC$^#;^DHAKvdY?U|Qp(n|w8))Y@1dRszMH1|=pm)_p>nP} zR659WXd3*-{U7)VFb*#Yqf>=9l$X*-!J34 zcMUf1tMT2t3g5jv&WxVUpJc=PNn&GY&f&95w6}MzSRE+=H@R|*h|5sCv=r0N;eEb; zOlwV9d%uS%SACH&bT$sHhW5xuZ4dX(MSHN9KU2Y3Ssyk7ogwz~&-c#^b|vnpx`w^= zcYe65FtcV^OZK8&;l<&@E$_d0IQ;N`9o})rYlpKFS`M$S+&7P#$za3oHYp%L#jysO=vGY6flRtOdvjY5Iv)K1nT(kU@<8ts%9I3g< z0`~%jD-`u~AuoI9$j6>e{^QUr*_RY^;Js90XRLJPH2l&P?zdey)jN``{5+iF??;?6 z`SZxXPO*)=UnN%SW%)JoeRv5t*t=EZP&@|hAN4i&z=sXsTWnv?eL^0qp7`CbbcKy% z#@*A1${+K)-H3(prJJM2IHM^VDgNVwr_=)T&A?b=SA)Dpz8bZuoKlMU!M;s2-__qw zyo5S0{W!DT{*m)Cj3vv6De@fq&L2Z(-oQs9$llg$Vr&c`ZWR3~38v5&`VoxB?79B~ zS6H%~#;5bj9}xrVaZkUROZwusdnspLyvUFA2mdC2)yb}-RiCrn&iIe)gGR{L>=&8X zK0fX`dLQfPd`GdIX7-QM{doMQ#uHsfHKyV}bE50$T6|;KGg*X8`1#^E9q&47FL-%+ z?$aZgR~bu2v6rYEm1dk2`lUGx-jx?cRz4#(sP1_)O&OYaTB)t^3(Wh-v*!E6@wfL- z@?X1WcX)hj?AF#ZEe+uK5MTyd-U#7a-yzPk?8NXOj&h~u9vBxt5AJe@n^=e0*eM08 zO`juf&w1cLNAVo52foeN4D=!D=?r{#@_z`79|gwcQ|O_O{qO~}`q?{jV6Z_P?!0&$ z?$nmrct1tA)X!RCHfnBvk$RdxS$mF(0j#rlt^1{sG|sy9WX@H$?6SK4w7NT){O-)U z_MNi$*_dtm?>IiA)8-|eY1|0qzHmoe>>aW{7M-vC!&BefT^cHAXt@)A_Y4{>n zXwMV7>Z~37r(bm2VeIWCvitp=_>r0PIh#0q)%d+?&(ZN+k-n)WpMc~#y)V1Dbh+13 zOkbTvUhHMrZO(($(uQ(H`-lhOLkCj4lI-))chGzMNk%jiJe1E#W)CBporLcOI#KYJ zK=dABL6~rI3$Yn#Pwhw^Ltc2@NWRA0i`6;2*n0*%U5L5j%Gg=y{{FL>$9sjw$M@`l2}X1&XAytG z^Vfhe_AEyIj`gtOK=cJ}QvJ&5d6moBn;68rH>L;bQi<1H^`M#mDY{~>E2ruvZ&>;7 z*7Zd88EAxJOT6MyE} z?Znsg?*IO-ufvnF$GXikWLnok?MLIESH`&s`RZw(`v^SgL(XTwll+Q7C43bwrwa~K zq5I^b?a&91J?{ef-N)uEX5SmX`)u}V63DCKs|F90OZs3Td(oWZ?J@g&&#df==BE{W zt?YQh$xS`Y_rev6<)?4H7a}edW$|If(&$YL=Uim7mJ}nW#be}%jx7b2Pau;E$M>a> z^TlI?+?`f3r^@e#AEnzoLmRobld*9o@7zrL|Mnz#iSV^q{s&?=F|H}ZVafktU`%Zv zV`P5!Ku!U<2AP{4_%i3OB|4UgG|Vu_@iwC7ziXny?#7A>v+ zFHg9B@3{InU1dA=f%^A$n-9A@L^;Kk2jK6^fm z=zX(U^9uYNEc=yf(;IAB< zP5`ETPrJee7u~klH{<5@jj^8=d+%9!7kobA#aBECY&P)|9Awfb^|`&cn7xX*Kyb*zY~)(xDz7nj7jt=dJlBov zQElQszBGQ#%y?kr2u})K%DUh1RkM6h zIim(so=jQ#b`*K@cxfl}gU-}&sUhE^6 zz~}uo-N-<<{2K4)fOi@6`?tR*{{nppC}t;luBy5lBOBS1ZHni9!xx`xV?;f3nnkQA z=Af5NBk>Je($H0UwH0%01nV-zQNDw61MxWR=Vg>_e5)efhw`OLhwsLGsD3Z>!}8sb zuB-D3$|um<#O2M%Cz6-5c*piB)^q$dcqc{v$L4#LJWz`tt+@X2IzASIfL$~Qw6L$C6QE?zInsh2RG?d>0bcwb6h{v>}(ABG^hS<5~ z&7Nq*0c-)#??UKzAux)~`CtWWPyzi$jvHAD{YLIADTfZg!$yd&RhaZ5p+|o|Ix6T2DMg&|T@zhcB|D`W)qmN2Y&IuTyH<8H>LIW*5 zqF>~1S;msc#aD&%^Eh|Ox23MWE#Wx-3&a0(FTB?i)|sM4Y*sdWk(eJ0|_vt)DeOu1TLqwln5MO|=Wg`=3Aamk}7+rO2 z%h(sLK|ep77tO%dw-PzD067Kts;RYPQB&3cFV2uPe88&sooGufunPScQ7brQerkcq zoAU6Yh-*O25%lb(@cTknpB;PMz8y{O zKFDzH9TxngN#2KM_K6g%Hp2PX4wBK)i1oQ4UwuV4l5DRz`8%{j^r3BRloPj)$2K)C z3>=v>%g*KD1tHG1lR%7Q8(e=pA(wj{g z6JHehAj&pE+=Qho8A~H$@j;V(Lj&u5UgQ?uZHn8GRx!`Qn`ZEk2 z`;g#7@$oHrT=7bzGl{QW9)}&td+ET=X?d^T+4A1I$a~N^)#YXG>!EXv%y&I71C5E+ zv+kPz_VN$h_xo6CCU^#X_R{|r$N#DOH<1(42#tpZDz=em`U9LvfnOh*4~>+qCL_5E z?Rv2_ZZY*nr>>lTGB+e!z1y_gNQUSnSd$-_d~S1sz+@Kmj=tqMbx)@+)7|J+!GU*} z@tG_itZ`xawulzJMqPR)`K|%qW7T0li*1$e)ERoz1$xvK8OMu^<3q+FKIlAmNP3dw zPU1SQlWe0{a*dY|qve-(=GZi{7JIAuCqGW+F-P_|^;_))xwqF-l}4`jb>1iM%;{vt zAyoBV?W`U2I5^vzx^ZUBHwcNZb7anvE+o5$QAHM-Wk5- zJfq~D8#ps&z-tGg>i~m$&=ujGi;VM9Lb`Vfe^}_(L2rSnbLM1qH%5Gyz8%BPccEwp zdw~zF%L;8SkKCm-+Y4UqqhHSgk7{>w{M=-^t50(y@Y2A!6T2V%?0!HWx3=lWrZ)Zf zYny)5Cm184L+iZ#u8B6eQ)-~+JKl1qvJ-^cV z`kXbt)0tl{{>SK6hqiK_LbObCpt(3b2cl7ZBAS;wnFryrK&9z_-=C+xG2jg>~bUfX(Ngnf%|6vNHblPua68CO+fCSN50)S?W>)4!SSCs#y`GIf4@Y34d&H`#hcpn7q}cL zx)`2!%W>&*7G1n1d}Q$H*jFcrDfk8Il@4;0J~(v`d!Mg%-ucIYrA6L^{Or;hTwP_#1k9%s25&cq{vRqXomy2*1*C9jHXF7G6$uj4P?lxV&w~bzz%v zWwEZJKeguAzG05-R@T6lyN_yo9&65JOLV^1z?}b{xbGBU&R#GNwSb&F*h#1tBp@)5tE6sWF*yjg0lJ< z%X95Kik|-AwI29ICwQPMWW!|xbiI*%=iSVeXnGajd0myu;D?JOb7uAH65U378KT!! z=ZCIkj`L+NjMM6T;$GTtWbfA*>{*XwuFUr)jyLm6_%kYt{geCN=f!5555JV{$y}@8 zK<#b&GjFzN)Jk9*n1{x*FT|d`$0Pe|FaMLwyY!_D?y0BL8(g zay2@RdB5`X{XcWxuv#l*Ce<^)^5=L^qy3$2ZsU&C%}XA9u#Xl>J|O z0e{pwKw}z!A#6yd+^@F2jn3Jb*jt+pr;oAqDUJPT*E~~pX{)2D52cHYS)yOiyOL7; z4-EbvV~;F+xO7a`Mc5mA8?zX9$;LMK=AozL&M{^M^uLyWU?O)T_Cx-|$SYZl{}{X? zcMGr%f82IKV2po@F}XC;m^GDmO2I=Pa6Hg8H?1_onDvJHv1Qq;+g8@Q9(oJEt~0Rx zDfYrqes?Dr8*kvb>Q+uXyY2w~2>rF*+@p^>?faGFK69SVl(F~%swX2m+|Ujz?e14_1H4?yMTTx zZf-XBchH~d)V1Jzqvwi?kio#iiY>;@+3=y4-~~nO56{NNy_GR5|4y?fF>(YwV0vel z_06}8S;!|P6@RzBnFrmLtTO%K)6c8){2MN_&UuVi^f=JD`pr%5K4vbb@0>Mf$oHIY z@=?znJj;iEP^USc(vKvMRk?q>ENIH-XZrLDwo}l@Pe`X{u6}3gES->Rli~B-;P>6( z`{%*`dqAgp!pA#>f>X%D3NPO`s8cwYVVJqHBpb!pldlf^lxZ1x0= z(AKfGFQE2j2PJtYojl?S^`oy1-7Ck2i664}3UXYhR*ua#kblXKEt(p9%NX$%@rcGV zC&BVjW5&~t=2h?7@9kj@kzM@gng0F!bpGuo-fP01enRi12F4sGhoxwR&RI_ZhJAC4 z$+@gW<8Y&70yJyEIPxwV#(6dOmEBwDORNdF(%%@*x#E$0v!6N9^=Ph}gKV~&PYCc0_kJPl4uHHC4s+qDc z)98^4{1mvz_3(S*xep3N)!%&R8_z=Ak3#?JG*5yv_YGAyHBJlEHQ|GVF6(ckKD*yj z$ZG+P_H?%0c)3yHr@q09h((#XEp4!|@-FbH++d71F-Fx}KwmY-*^hxYJ-UPyZ15Yt}g8z}%E*^*f7mBRg4R^~HP2 zm|RgnjE{Eu)8TyI+J5^7-*Z3P1DR;u2fYkD2dHZsV>9F*N)s>>+4R0T@oa zq3X?Rfuq9i)`^EMHUtf8`B6L3?^1| zjJkmLqB82s?qSuZy61QjYYLv3pB6NZ|4OvG5}eA!kFGCskZq8c;roFf;RDoi^zy1@ z@P^?H?9tJ$Lm#2{{;?l<20TV_RX=0Yrk=(j_m$cUCMFEHIWT0DJlaWqQ>WLk{-*dE z=J?v3v;E9LH|Agh5Jf` zqjeejNU7v`63yj0LuKC}Huj`f$)|Gw_qyIzWxcO>w?{?R8#uM$2pSW}9T)T<$ z&;CyM<6`bI<{uy3m-V}j^{-0S-d93Iv9Hp@;5zn)bHT*g@Y(emz=KT-oHTF#pJOG+ z^Swkzp>aFi&_vN)(e}X3SP6T0ie2WHJ}vm%3w&0wH=w+uQ`~)O+;e`iYy|sbqUAwP zYJ&8JZSKA`a~RtM>RiZg zYW8&BJXZTPipj5if@b{LyYSvB-YEn|&HFuMH#ACa9nmmzVAiT`b9h!{TztW5_LXuD zrH?7PGO3%3?`t@hdFzNhGc$Q-u>HKVku#X*HK(k2LH4^lKIu9t$Hg9#^Zie{j@G+_ zPx8!XneZ+N zoSRWLeasM7w?yF_GZMCi+42xZtaDNNLRMZ7UrW~XjJ4duT25rG*0PrOv6d6#Yq`*|mP765tmROi zFO09H{q8HQ<$aE|G~Z_}_3lvC^1k?5%7+UZ4gTU5{M&RKx)_p;LO5a3@obZh>%QnX zI+UrmY5i^8%@rb6=}Bw+g5?0?5IkEvtZz-Z$Aan411(ung4qn8UKv(5?)*F%1ePU9rLwc$87=L4(tFEOU{N9 z$wd1t*?o~@cggKqH^E4PFR=LKvhp#5UCD{W^*kgW2XK1lf{MXoh?`a-y^`3UJC3uL zxgC3xY=P1%`{SSDW$y$TD6U%yZYRzmZ_I7 zEA0SYwxGY+a`m^-+djKo-Tz-kuHJk}`*31#zazPNIB+7~t9q{o!9!7IJ0VW3lAAyxLgFmsuBE&fAAffIb(J{wH0} zfaYucrk>XITAz*Mp5{f*r#(V0B6!}vqI|e&;F)vs72}46f2qEMlP8A z@tJbN%r@m#`^a}=__Vqrd-_+7)fpPjRYVdd!()Fieyx?mu{XX!*s0<1y0#@iF2{&Xq@r#MQoYUmM7AzvDHa+w~)BhFdp?o-QC4YsJ9?m;}9f+|=9+sWWmWlt!+B#+A2Tz^R?;)RHuk!(fq2Hc zT+)UHUDJkUz1gNbg?`~%zf<#Fe_7SC+2vVd8u4@VvUh05ydFpY8^+$e_DRmHt@4aryWu$b7SHlWnjAiP?RDf&vikG}eLC4D-({>T*5a+k zPTTLp_AQcMQU>Q8^5vU^KLYW&U(cS?8$U?!rHjsJn!zr0R6_uz59??*o7 zFCqM1Y5Vh6!T2Z9-&fV&E|tV!H$v|-pFZYqZJrTY&6x1z4K3Kie%mF<;cbj#|K&;H zJ)UdUHo|jS7-I!v%w=p1gIKS59&=3O&y3vTn!jw1r|a6AG&Y`T&st-%=G^j=Y8@ME z-8#0oAH;VB4<3Bc@V7a2KmIn1-}XgIzsEgGzBc$3mf#cpI(_kHXD69^l!Nj0H}`^# zmBrellz+li@STk0xy4i1=N!ki`jV@A(2lYE8|*_}q5sNCLLc#u&MjMgLpruSl`|@f zFV{b3_J;F6g*bJW@*m*)p}LPx@L>KoQ{G!h?0K$7;NO$UeB6O8rXg|nc$~lz#fs04gO4mtD>UQ{vGR} z8NK3b(Kcr8SHQt~>N5S0R`xc12YyRC8ThNqU*%1%fq#DhAIjeBFKtX4WcrrN-+MRD zkWV&v^VwhTf&U4#%9wzyiF0qXex_NfC?^XtT2rl|(Jk&Lm={-Lr*t+g+e4UV) z;>Xn@}>jCd_>fdeG zUt!h{|CpySSp1_AnkIfA9^r+iiARVh6hhC!gT54AJ$P(bd}YD%rCzg(`Bcik(05cY2?k%i@lXY`p{Jesu; zJmEvPA;6l+_w*jlM>bQ3d`*CFKR$es>n7lTH`R(&Gy5x?a`?mu4}Lzzab}~J_=)A; zmSUd$7QVLiK??x4Dfs&c2L%h``L>EPZ$a?wu>^SYz`%WBbnf|_k=C`&B*Dj{wY*bB zdnvT@nA;VqOqs^OUzdQbgF=h(lX1>r1O3!I^?GRB^EbRe?!fq5t)yLW#hjk z@ctz_Z=29k;YuocpYndnoUtpKjsFsRwrMq8kB?5Eo}%-K(MCH()ElInf4)Jki-d^m zyxYjpt$z4vw;WkZIgbl?E?c*3`(=FB!haLKHPgEo8}wc?xG;hL3DmcO`f}-``Z<(7 zPU5$gKEBFtldD_He)@HQe&y27wXSYO2Pl6)`BhiKZnv@ z^2ykSFy;SZJqq|A%KeGlC$7PU2xUM0Q*2?$9M*iZ2f@7O^DL2fi{fJ~r<}mM8fO!+ zLqyLt9*xVVd1gF=`A_Gb)*%}nn?(OL4$ZU1vBH(yV$ZF3qUP5glg6`xF|2VdNL%AN zKH5inLufyVx->Ucl;5YVEc&pTx$RB+@6(qet|fP5@jQw5hs58XPua)2@pqVGBU(oP z8nN>=Gyl%MZX}>8sF(&fosLGtO0>&eJ^7}B4ZtVfw7^S|3d03rHyppYBoQu%Zv`!C9%VG zS&(vkG-vIj(`nDSE|hjOlU?gYfakH z6>#8xm98W*r>x;Q=}H0fi@ z+zaki4!SMBY~e&uv7Wdu8WiMTI6a-U5)G)KoXA>fE}Dr=kRZJ0`+T13zlxYn+2Dlg z)%Sum!CV5cwLMN}MW0IfB@ojk8QAKG#)=noL}Q17<61M(*ktMw+}2XId4Y7Ce9Fl@ zchXwX&*}J7OBb4e3=!npYWk+}i1ui`?Ed-5EAtUBIX^y6*~o;FAMEkzUvwpVixJAg z-!_}^i+1SWrXQb1$IHm4(w2_<+R}06?Y~RMCwvMW7wwiTCtBAL?S_V&lXhn_Pog(1 z_$dBw(C_NR&rZKZbNB3ym6$Xa8YVca=HDrIh~|o({kP=~@2As;ZNR;gJ_MN4j_AX5 zWE9bdX4d(ifj)c|-e<=7t+#Tu3a>hpOUQlne;YiDCJK)~>OSCjTl5}%hBiBVVkMrky>%g)QwtEKi0D=$y&tEJHmyR5uCwXgC|{*_Z$tZsqQ^|)=al`$lNNpw8&$wJ z?aA_g=DW7Cd?IyB;OEqJB(v`LC^qWz;!{~0{!j)_PhxyS`7L0KD?$@nWqjoOip|(X z7P!wX&)2~JWn&Z_$Nzm}KdzJLug34x8Kf_~d@45TT>Ju_a!!5`M%I6qIkI_$`tPUj ziTn!be`%BOAr~jXY=GnSIBRHLoPMdzp2unvO zV|>}r@9e&od@5RAjvO#f{o^_I1XEuykPBQ6o%yriRAEQ5EpyWmPBn1ue|3*f=TS{< zWZ1{g!J`&{+ZtB_|LyW9`t$!6PBlv3MXoErr&c;-BLC(5+d5WmTz45lA7}7u4<`aX zEO-gWM-OJrq)Rz<)-q(oCR11PMaug~m-0oVBT1K9%bazjOC_TR>b)BDuXc4Q`f>-q zSAp@jLf5wHQRq-69eNcy4(!Cyq4t1nYi4Ju{cUB)bH{IDr zEGUjdwbLZ3z2AjG8^G1HdZ}PpED>dJmEwo!XgwhktIj>L5^*@vWq`Juhy8y(^D6l~K&J!kSq=~`jd z-j;8Zu+?attEu0%t%+vXZ6uts^r((_=}^Yh5igyGkP0mhIrW)5cQEJaQb#mrF!L&%{ap6jGUm#uNBxv}wc)I-{WgjD6iiFs`)A=%wynjBZDc-p z(Vnd(pSXqJU|goH1zVSGYmvOw9<5!|5*rm{-a4|wY{#zC)(%s94m*r(?{2ko$PRN3 z8W4bvB}2!wb{*MU&SN~u^uIk>c_KVYGVcGx7L)J~u*DRxW^HxX>^8D?fow6%m1t&r za>W$pYRIR^+LEsoH?1T2n)7Psl&?R{9^>oC9z&lylCg!i9rB#dX^$Bfx5s$F&jj#u zdmBBm)gD8CI+AJ6MMqYD2K}+AjXh@EIq1lOxIHF;|9=)b@>%o~)Ascrv66pB&J%uh zWRuy}j!owO2DrBEGHv&^xvzb$Rg%q<#b`j;%{ZH~GY;Kd6)821Z z&P8{<_T7m?;x>9V6vUQhQrDCx(n6UlyM+d_&pcH57I;4UHcxbRxhHzhn_cR@eOLb+ zd;i+5%lPv(b+X48!93G`Bwr=lPpLL9kQ447sEb^RkHtLV__~aq?0eKTU+N08S4>Pe z<>tZDFz%mn@{SkqhjZZny0u?k zOFWK@sm6xyJb^C}`}g>*L@#;Pj**!fNnvbFt+Dn~S{mcA_8alDkgs>+tL|`?hkajc z=#54AZ#-~Uv}+4-nmoi`zp+brqdTqU`0ZW8CoUzIbYJaPTX~q3bL8*IhVmb4|C_T= z#_QSaeS6tc%1ZW{d)`gdXZnmnL%qaG^b-3o-POBgpL_;71@3El^O`#OE6@+(clj!? zvEnz89;y9a^{)J^Cl33Jb-NPq^~$>IwZjEte}6de*w(SlcP!o&Jbv(ScCSN+BaJ^l z?ESuXx$pbF<>j@1JRJF1(_!DKTh|BYK3-hA>V>y%gjbeBPXoyc`hn(uGV{ge6r_Q#L6 z`@Y^)y{c1nsZQEX4C8B;z!`V%`GLjRbFK(^FG^T$xLlzN@cUUpjJ}P)&2w7=IWGV^ zmQAo%?W5{9&>hd=3>R_u5ugE9(+>|3^yOSCfvBy$Z7n5TrRnsnPQ&9x_`GT z+&s-4dV8Nc^v+gy=-nsXq4(xlxmorSM_c(aJ}fn&Ck7jJO~7dLAR~-F$C4A*8+9v( z7$f#v8;IVDE}%8Ox*IX_uO}w}wh?R1dqo1=+sB%zz26I$cu)1puK`~YVos+=Zh}tN zrJ8Y1*z*nKkgscmcT8|}BmFE%1is)A;A$(ZO>f3`gJ%XdmcGO>?7Oue{uO;&D#x3? zU6DVKJ5SH?&4~JvfwS?(hGyC?S|12M^y)p#jW<#V%-#xY%{607rbn*ES0jTMS^7@? zBKkI+co|cGFZKCj>dXh`=MxvgKQiB6=n5>(cip`nIWAgunK4@9+sn817_E4wX_2YK zqege>)jyUSy5qHb79Vr9fNp_SOQmofZMN z@q4@)-;}HH$I1i$$3q9k;TQV{)4%hJ;L%N2jE~mG<}Kcd-B|J2b2*Rk1!A)|=7Beg zmrG6@FY(!Z#Ai<@K0EtZW*+*c%jc)16QA8n%-}?PAoGV;rIlyaFWZq2Fypi5UsS)$ zM|}21;AxwOc$>r|$-g$M#>f0*bq**WRg0UL1KbZOF0uCO8d(Qt%+!_K(y zg~Wtc+ZDuZDkBd-HaU2x@ACZngU7wJVaIaK|I*oc1^kRBm+|1MHMxd*$;p(K%XqRG zPucMK@p`u{gI??mDwhHA<`by9;M6s{8@u^x5^o@;Eo~>zcGf%J+g(JA+UA$iLKOpy zjYapB9XI`?IS0WRMl+tTXiei4zy`6}!ki0UvM;u=_LJP5b{~VB={wih=eHSq0&Sm; ziEquh9TUG9|DSs1oW7fLE|^SUK1!j7nse60oOAi1RE<6}7Bn`k;<9)we&<@-vG|>H zZq3VfbDi_=C)YxJ&P(wPsU-$bz9W8a67^P5FF8}q82_4ct>p^ld;xP_F?>N<5@T9G zj)y|#yxNseU3k%sW%@#a_{LD=v@F=@)BWDB)8H4&$NV^jlLv?%%PPl1r4D(HR+_8-ME-$#h z{CG}$ZgM@omf&FK{Nvf?c}a~Yn|cSw;jto)U&k)5X@y7fU8HHfmH%Kcu|@3s2cxLl z2Rt?dj}w4L3m58_2@VC5#1^j}K;ANJ2kpY-TkZiB+x{La`8VjyhhGmwd!ciR{$B=N zfqsn;kGO!mB%j99#XrPfZJr`N;vMHR`N;f4$vgcb4?K+@+Ks2%=;vso^Z&4oJL7G9 zlQvG22BP14Ew=I3oU1A$ub1X`4EY|?(>=3liP0cDJjwhV+|((2Xb^rclg88-t6ZVt zC_EB-mgI~1j7PDf)5)vTh<{2G{weq{R;+}k2eGrNE}cuNXWo!4qVw@$knSDNe7jGw>v=Yg=~IzMN@v-vM{^{pmXWQ}--e7UmtZ~B4! z0DF_WF413ddF8TPe2Q*LS+cB*_`kaU(ywAARqd=r+ck)w2bZ_A2Er$eBmZM!@Bw2^ zx*jCX&o4N$(uka5$`3EbHp+Ke^1!N1v64@CMobyyid|QM{F?N@ykhyM?PuQ0lgUkk z?`Bb#%AWMSWoQO?qh(z@-tUb3L%Y~s+n?s-=6 zk+-d-G-mWMjqr0Q2y4wA*Ymd*rke)B1tzNSR}aGo=dTEDaNsJ-hB*F%r!&qibRUD02^7hAd+`JA<3 zKeKX|{5k#S&s<;4J+EsD>!930X~@dGBE8X>^qx0*_hRre8ocZ4IbQhRzu}|3A|-H$ za}Xu(AzQzDxy}aZJLRoX%=naMK*d%Yq~o^g^kHBSfEa)t`|-ahMD70A`GK`Dm-(TF=eoT6!EEc)huv z_@B3~XYWWc>v=jS9KITH9av{2ME`5ai}%89dC@nq1=g15!8iJ}Y|}Xp#;bJ;pu5+y zPT=4O;it7W!mYmM9FbRy_36j@^d?VC8o3|R{lZrq=9hTkU18?bo=dL*e`D<$;pLWJ zd+*4Ltj&r`j3xHmr80McW3iFzS(~1$O?!SaBbECAnaqwAT8l20a4L7FU=Lfsx@_`( zHL|DUl+`98Q$>)eHnoxQHz9AXMc$OGSuuQJn&eGv&pYu=sIEcYEJvn_Fki@>ld~pf z5nn{>c3PKc#0SNPObBl_Wzo+gyRrT|p}(ds(T8@v+pSMc12!YU|JRT|8b^WW=)ghN zC!IJkE16gQH}GHei*eyBa_Q;c*TaY|+2sn$Z@T`Iyxrb!J+L?^{cTm6NjLrQoWjgE zmT`_{yUzvQ$Lm`Kj#epOCblQldn;wxp@P7nclZrEg4mp6)x}8*N1Gb6lK%ASY>O&O3B6@hBg7Jqdp4+_`d7N z?}D??tXkxm#$5i1f!KJ3S*|0#Xw=*IgW~&4U#EVNM}Wsl)=c@Ur*Lmpck(m*tTbA- z?6l8R@Mo6KRPe{8(R-FzK2yOTr$?L6$&?5CdweJP1v^@_34Pq=Z_5AJ#Cffz%i)#4 z_}kv;{X}(_%Svu*70NLg=pc6Yp|t&|27>9pr3z5 zF%9|lV{Bbn%3bivxaiI7?Er5kjc&X&5dCPdfj>y!n#NCZL*h+Y@NQsnr)14}$drD* zD?9OEo$}u4ob+H$HdN&(8GBa&ZRkDPTr@dYnF8*~i%U zf?2lCh1_@y9(+AGehGQq*h`D8cjv^Obm!EH*9gzHU@xKV3=`jbMTkAOWF^mntk>J? zu>)ZPk`GH4Y$6`>?5T3t^^eFt!Z|N`vvy}AQxT5{ zSWAw0r{BDO<}Zz~cVt4O?Dw}VPUig7f{SijjO-gpCKsI7m4w`r90_`isPe=;hznijZxEj&Z@hJ`E|mC^1a@~IOM}8*b)7En>jV%5Z^t);cLiHL45bh9KL(O zxbI#N-@SX{zI#D@_nN3L7eAp~{DjJ%18(u<6HapuWkVtRAm#n>d1id3FA4K=e{ZV{ z44C#RFaAR6Lx0Kx>Bk8A7KCm?%8Q#kS$=f==7s^pWB`tX=LbjsC%!_<;bG0dS(9vZ zz_Q@~IC*rM2b(t5o^NB%duN(E)C~MLd$954VdEPl8=nO)R&Lb(5!nY#8y|CcaT4b) z`x_(P&Lf`<{cJmL!lBj5RfNszd#!Vw7P$(()c!oDMTk9zJkT%l{%ZIGeft<#Ay@jw z@AEylvq-ojyb+(%GbgS!5L4xT#k7g{`DdJc5?*41+|U-Tg>!wuw<&S>CU$o#eD4A8 zPgZ={^x+bGTQ*MR9uhrBGhtlvp4$0@@BV_nPi`E}8{*$u@|F1a0KQ9$Tn~-g%pRfc z>)QjszW8hXUt>%6Uw%!v92l>{u35o4F2HU`?y%i!uAaGmMk4-8+{cz!mn8TGo>v3& zRb#yo;#WnyBYlwqVEw+Qjc_7xUhhf-#_xJo@GUskILUvwA@)h`p?dgc82zFd9i;IS z&Ml&6G^2Y+PgI`k?dTgxaeYzwt|Q8K?MmA5A@WNNdZYBhHLj$hvTWb-`RIk&7rnep zXP$)@-!}20cjSULc;Wkp<3(>1FKz~Y?uCC11!mTwf9YJ%>g2?lnQ^^Kx!9zWy@KBL zGJ02-F;$~qmAaBm{c8RL3H8+u{i+=Os@apULw3haZ0vxY$<}@3T)@Hn=7}WY~{_DVE2cY$$ownKkK@WhwuUW-6;23;Ng0Q#8E9lSF{6E}0^u*RzWw-#FYRG&ezSmq%FuU%8eKEbU(Mgx78hr@~ghkSn@}3!9_gP_Rip1xkO!&*_ z^Ige&YnC4+x(*sjoHp`9QzlN@9?Il}-}&LSiwm>pA@IJjeFv}+fjdJ8+!Yn&}1jAZK*wzx4i*9 z`$zLO+gD6H?ijJUF0PHGc9ic`^4d{sZn9;|mkXaZGv0{&xvHRZKKw>}%yB>FAV>Z% z_+@N1Ws8)Eo4EpCqCRMvSw(%=(UD>LwM;Q-a!TYfvmK+SGj`DN(sAKv*Q<}q{utY8;QQt#&lLLUv_MX6 zN}%o!hS4*Y7q__$V$acI)M2Z}?+u^0t%^xo`o=kJlkvg41BAQca4py*c3!pZ;a0ev?r=(SpzM&ludWuAuArw z%1mQJ{l$UsTKc|*zMFd|Gj3iy%$16~edpp0)LTPcvSC%Aw`>2dB{rvcEjs@l%-2lh zkD0*rK~Iu@|K08|`NMZ7Uo&%k#FY?9a?M&?AX}E}Zfst%W!=Spas*pp39E=^>3!HcX$GeKlY^gXZ~vD?gN{Q@T*tPT)ZE>;ehL&^*d;P82!W# zBQk`3R=B#6H|;JnrikvXb)`noJtB?2>Kxt=e?gCm?B)NRL9Wod9#`l>;LGB51Efd# zu~$X+|Ef!v#JEe|*=K~_Wxbv6Xb)2F4d?ku^v&O}2YzA@_9p%t(^(_^_eZboZm(4z zcqqPFtYIp4C@(rCcCrUf5HI1g;}Pb zV)x|<{3pt9@qzB9zvA?`zv3GF6)U)>Z(hb{v4HDa@n_VxFXOwIiJzi;6V-<0^O&?; z?RXzZGV2-4Gx;+5slS4Is;7W|yAFMC`!}i%#*w%by`>vIj;g0GevTi_y&|OVRhPH4 z+wwA3LZW;gNAaxT7sQnE^fA}Z%QyA6cujxEH@WYm>EDse;?P%a1qN)IeiHfsUnU0O z|F=4eXu0&3_4G|KxI~xi{emssBm)y0Zr-{5`3_LWfw9-E{~&K%7{90JE6@(k`9(g! zHf6@}`TK2)7l3PPx%YB{!E+<}XlzCOBz_;H1fuU>kr$rvVqkIiFI~5OKQ!c(Q!DBf zw__pyuK-`K0$(2hZ?8T)u@&AL;_$Yk9e7*gfVVXcczZt%Z$sLJw~Xf?Xvf3;M2fPuSj>D{RbXifo9c0vm(HU z^rvnaf#@&dbZb1cYZ||Y_;rScZH9IcKLA~|SEPXNkC6|#7yAZ3hF`n`FMfBL5gKID zX6*-vhRUv)?%&NG&pUZMAJq1Fiurs8&nHqiMvSg43;8$awPdqEx3N=UJd0)5kKeIo^++jO99gk2l#mTr#p?Vw9 zA8MJ0GJX-}CJ}#%B*rfO3@Bp z+wm_l{nUk@1LOSQUll)p?BEBf;NTk${A|4GG(Q;0vzz}N9nIall^@jJWZ~vlxUY2u zZw@-o74(tLik;<;@qLWcCC8sTK^fhYt^^0Yi*C) zwdy~oT}v{Mc!ZyR=nRPy-d8rpN?w}G{U?pkF@D#i8%t8r&FtLP$n=Yh%Ko8D@Z`V+ z=!vaoShkqH;FZkd9_ehASA;U(?jLfyD~pvMdpb#ml`s9|9d3NbJ@}A!f<|?QMv=e$ z{;tH$b%nkI%-L}bt@9$T$O&lDbHE*j*&y=uBOx#X(=vWVLZmAzdanY&w5nN_AZ}4+qXRH;rxi?Gs$F<#Uy`i z@ARWxic4Caw-J2qv)xZ#6k@5?fZH`X_n|W%I`5JFaKU!(!};5F=EFYgp>rNO)PVG;AU-`c@{Vrv)9!;%k%%> zx-M#OT`y$-_|?R8zvxtUK}*OYx-&$+Jf z;3YrV+jX?!NZYvFcrZ2s-Px!d{~@AUNC^*rzV3z~2hLmYo&sv(3b>YX^Q^Z{gP^U-<=` zBWS@_{w@5vUd68~S!W1_Kbp2S^W|m3fH&gpRIg!Ax(0u%)%2}QzW);~-~Vj z5zaVlFNdBtBg2*A`%#MT$6%gUpnq53^D&qhF)B~znz(Zt=JdA8)Qen!Y=O^5CfB}i z=81Q0$i|+VeIENGl>JFYG&9+Hmf3^%lc1sB^hAA}C(Ih2U*ipW%yWeW_;&pkJ|TZt zyWchJ1<9@#+v*itoIt-KrAU{Z>?5W5vzlLspAKLh9Xy;nz2g@y;A z;kD54J)X^D72nzGO51KA|0vg&eV*?0`Mmy|&+BjM4bJm+r=Jht`GB*Y+xn_#cR%QM z*0j{+eW2IU@Ba3rg^L;7tH(B>XTIb-otuf~x>8yS(K8b|v)2Ug@?{!3#p|<}TQB#s zxYpcx>t-)j&KzXl64iMLb=q?}2m5^w@Nh^aXUc%76|BP+-kb1{&Po$UqSA;KvR*~B zH-j}FWXvCRe*R@ ztTXi&1v-U>0MnDM8&dPi!^H4E72B`?JexrIhjRkaMXpYvS2H_>)HZv|(L{K{4*mnK z&0{O*#|HMdE5}uiRX--uj}M6-qgbCsi{foCA4Qd&LdpYUw~>sUTl>95KXJUXxl?Gm zxbI^GF%4y|##vo;#r_=4V9=Xj^GbLeHqI}6c4 zp3ytcJKovcIrP5cof({^uH~Io$eEeQV$$iQFG&Y{l$guX<9ZQyWd-lVwszfl^7b_5 zzCUX)fVDUuyt@Fr>l(UI=XKdLv3(|`3ubV>^D*gVd2RJBOUF_>Goam0oM{ICZ2ckt z9=6pnq+i&2gmjC4w)3u!LmPD7Rd%2pXoC0lgjQU0ffvZQBcy{Hw|ker-wC@O*Iuqq z<6N(^aIP0H=)}3k)q${dG^c)M?p3-n;%6SL`QrgTAIK?m05M*Oih0 zJ!C$;{xR&Oj4jb1{kP(qr$-b^ zZWy%eh#8mN9obJijh;^aWB4j-k4p10j+~K?j_(w@l=%u3Ut05pM*`7x?D2j8jr$?x zz0kOQ(753PJB1eXOsaW_HfFLunt$yB_&A?8449~d-n|Q4G(hV#kBjKX#mwDM+W(OD zM6)%HOvaJlhNdpWAE!N}u29S<~4YpDLI&u^RaknERD=N;&jy|x7hVjJc&XA_~#>Z^SJ^V`s7(Ny{K zzDqrV<42cS?YemP(U&@f?r^kwkp1Vqst-D@`t(kI8#+D_+WNlgdBRbTyL0G=j(XO? zyIxm4D;@Q;q4mkc6?;?lL>%>u?;LvCQO})+VjJb_J`sM<+MhQ3;1$}cS3S=<>Y3X) z^qQldFMJT&xJLEBGg|9u%QI-}Io0!mqn?*KhdyxBbC9?)%Ty11#jJ<5%WuP1Ceqf= zc}IL@5V-bpbUf*K;xRf0blj8dkFuWlokTuN!`9~M>wnwR&#!gHUp=C8n_BmWse`yw zCj6_ehaC01%XbTS{%JB{z84vVycx(WJJWr}p=sTTiA7yQf#)lMlP|KiSFkQ)nA05Q zV>DwK#duTj0q+K#T`;GAC?DLB9bg*v5u0XQ8mAe8?U-;K8i)KIl^uXHeuo-8Rm-}` zhDQ6!TXlDrK*;XLTgXe(*w_6kIPe8%MYJ@$(ZVNAGQr7zHryh-6@Vn4st@9jYo{44~10u?GG0|~KI_}SK++S$k z?;rUq_mds>*P8eHMkFr{b=-f&yx%V(|AzhaTVqqZ`_22Qk!{@9zS92ag zV}z%fFl0a{0%6wnb*Q2-ta&c#9VUPvV)6@J%quTXQk+G}vWC z=dsT63H%%19p#)k`>N3UWY*nWPwqaT)WAdQu2G!h6KkhM*ziGFTHg_9F^bz1~_n=Op+uRnL&F5t2P3=<-%$Q{0iy{WA9eWEWq{ROVB1)!o2f#4|4E z`6~i-bz6zogAMKkeg>Qqg^qM0mx2-L?Mm6P@0YIdCf4jNeldQ~vS+O8Z-c4-A_O+fBUK-V2#j$Ur+x&mKxzwzc=Cc2-@AEJ+a&hMvboG(@>Ia!%Q!D+n6RQNIR z7bjPJeU~qE3Vu~}+S{$hAC1lb~Wtf!7IxtIBDrKxAid?tQE zsQ;#zQg)`zm!>NE2DMt-)AfH zEb>g|>G9RJGRzBWY-R38w-wno9$hR_S9vG@+qN>-pks<|I37JKQigfuD_fcKSig&G zTZJ4F-Aek~r^GhM`u+FhX&3p{+Pb}YAJ>c}X5GHtT(>*cvGG^Vfg*X{dr*>pOmdEmCEr*M*Hzd<$B}m|m+Kj9 zp)P!E%CTF1iCuFb{|oVxc?SDt9eY#L`Jaui%vEB$VUH?%z!T>^gY71VeJb&h5&s|Y z>k)q&@xu{c8<%{8&rSe4Y!*I20qn5ZckVb({Bd?-hZTRGTuGN zCM*k%1w+0c_?dRC*l_32@3a8}&y)Tn`@nsG(;IkVlitT(Z`j?ZG2>F~zP#VaoYP?L z|IVR}jXjd|AoI7;A9>f@AKSLgD%%pUWp8eJ1YQPb&6{>7-bswlN@T8Xmb=JS?i^dW zqpjDXVP?7UpoX3S}M!qb6!?$}z#KC*RQp!iG6Ie1UxOI{RW zDmdR%Y@EN(S2pU}29G|DJZ0aCeVe`6P#JcSy!lg`Bv)9@{LCi%IeN0rjJ4;`d@imQ zY(US^)&6KvD2)EB)>#bN?PZyk&{z*XK>GBwoKquX%bU_S<}ct>6Kxo-;Y+ zzw2>)$|#3!c)mW*TN$97z)|o90gwOCy_A?{1^X@jm*RJ_oqOSt=#-@?T4)2$ZyZP& z^*lZJ68AoGXqOK3)v}JARZYLG#-_Lv-A(k(Li&?)-DTP6o%oaz`>*QhhTn?~27c^Q zUL;qgRp-Nkn^|9g7=SqgwOV_5;)|y2L#FNRejIIP=sg-^Q0&oJnrls#rme~1>_YlO za2~LPKKXw;|L64+eX209H@T?E^6lL>5g4X_?-cRL1vdkptTD-(mW*BsTn)GyaA(?J zl4A(?`1k|&%EuVT#G%RAtpa0bvcaSI@CY1`*G1@YvWH>lQ}Uhk*%JAVHr>vB8UHgi z&Yk4{-sBbiF5un{YrXh~!z=rk5W78ea~88JIDqW00#f)nv;E{o*DOg}ug3%Nk- zeO_R^__*j08u1n_@dO^j1L29qYvIueHl7T$@noRElYEQT6Ku4e!hgmw`~RB1KmW1u z4Truh+T&;V7vR^-*jTKOVGq{jD=2np`F1|6VhlU^IS{^Fbh6=_B5mu^*vsNqL$8(J z9Da54ndrK?T+)ZFyjdE#rL1yZ-iZ!xm6vbLXPd2O0z<#U^;p!scLO@LQLj~p=H0Yh z;L5s8e&wv*{b}qo+0N><`klzN?StSi@+1TJ>-pxl!#FqgNnaqv_voatH#~iPPIk&; zvIke{#+Iyf0sI`Kzp;K1dc-DR*{b)muCUI*mHKbu8++OHjJ@}Y;WcqFBD<>jR`LA; zgYTvEr5wH!-x)m#-*;j-^)BlRGRbSz@B#L5O)F3|Rml0n9{P84oX%mgQtI$(v$Ke? zk(@8cqbcMMpY0;XYCAE6#nj`L^$@?%R`5~}ZSDH&e_ng#a^nn9^5(BAM>b_@X)5N| z-AB%DO$*+AdU%=G@JiT!dYJD{>cyOw1&p5BEpxhQ1=TiLlf@pxBhWjoGrCzfUxCEt zum)T#aGfTcOLXf8&m>MX#6I_kZCt`rk{~}&Q4!;C@ivu{nO^Wo3=QnEn3!4 zCEHVoUlzGhj{RH9S}k zyz!5c7^W?nZ|xLeEU$9FGfA* zd{ueoJ>lHYF8co{)@0M@A0k7&Cv?UxkDRY+UVQNxB2o+#+CFd@AhE@{{^=6Cm*(^ z())d9V5rKMc1K?=P;;57i>^m6%!WoaMoc-d1zkjai~U>L`AV~$SDNiC?_$Y0KYdH^ zkoiSuBj!0G2T~mEyq~pxLgm0wwDZqtXW>cZLGjsBdT!pa$lX_HZ)<#ixh=e`Cv9uB zeFOL1+GuOxm9%Yt;TLm)@JXYcG}X30F@vYX}AoFDDK&_w%-dSxc{CfcY z`Isk!pEsIqS&p4g@W#I`zmzi> z<+;s$Vg`L;VP3O5KucYA7x%I9;aPq^Zu0FvG|)vZP1(cE2%ZBEO5lOWoztLy;S-|+ zvfis4sD&0X=U4VK)_;}1^#m%hi!o-_9mIaQlXd7Q@(`|<`H$xk*KiOYj0?lznj6m3 z0&`fGTII#oUW(qckaA_{vaA`@JA9EtH}FjGc#!Aw;eptB#V=Hz-68j^KkfDf>&!{# z84PJzF2BTE%6JkxLN}AHmC$wQyYRArNmsQ=SG7r3HFQ;jgXFfOzp31LCHz(r?*gqM z`4#!V^~a6akj+f$d$ znufe<0rJ?%zRfOAPEhp8h6?sS6r9_)a-Pthx(svebF#7aQMx>9@P4i&$#L1UAx$eF z=6~J3WUWTz5HYxm4-O-@0PChjeX#j_yc{v`> zRO^e{MZBBJclhD-)E1s|YiXyksXFJs)AwiXV0~1HeJT^5%yRyBO+z;_VKuN`UJsoM zxzD}BH5q+tmgK^RN8t$CCw2?1 zgx1RA&I>&_u$y{bd0aR0T~lU$1?wT5eEx>tG07-Ze!3vGrTnh!t6@va$L~16m=`-b zuH$1>)>-)Jniaq&O6%&Xg7@om z%~#7fK41EWSJP@I@NT8-OQDCzdPUaYF6edI#hgB@(oeCeHCD6!t>erC)fe$G*YPp; z;zyp+6(5#kwb?oNYVUNFFOgU;a_JGf+>0C>`Xb5QI(?_C_cDX_x_#(DpPVD|FD*EM zb}eU*O3qVc{jSz+J6OlbK7|hMQs${w!^_0?!#WE9KZC<^jHZCkVDK3QK4ZvPx(j^B z3%`7qCzp0kA^wCh${5`ES$N7|VD0x6 z$Cy}J&pz`f#EA`LyiO&4SmYY}@3ykNf^#Ux*cd7AE9h%eIhRAmSS54HPVlhDk)<=< zoqf9)TZ+}UccL>%->#3*x9{T}{Y&jXMQpxmeRX(Dg%wXBxGY5G2`&f0MP#JJOf}%s zBr$Evc`jq}{_XTp^e&zFb^89gO7wuI#8(Cxo$N~5OfKhjuXpzCVZNWDXhQB+%b6g^ z{S%P;I(`8=DlB;%`tW({e~REH7`@Hr7^Y{8Tl;lMHl%m zw!QuMVEl$S)c<0$Gjb_wd2jK))tnKdlOy@Qg&!hgiFZ>uBc!iMn;T~(xgs%YLc<8& z$sWNl;ddtd&hmvM2CjiT1OjK5^p}SULt~lu>aLiywdoD5V94CkCj5j7QXi-w=TB-C z&p5vWn;-KbIqLFcF9ZFdXTw%>e&Xjwdb(`>eyis1MVnYLbjM>q-SMgBPexY|x@FGz zT>1KC2ESx{d_RMD<5aCS>;cB-s;z1SR?4h9Rh}XF)($Hc96O8)fk7UgbrSn0F+lRY zRqv;af600Im)&ZeIwJaAzhJ+w!pnpwrL?aV_g+Fjzi)JS*?wTYirsBgvTsigaU)h= z%t@5`|FPOVS2f$rqsV4D&Nya#GyR^;! zIVHSI?6(cHv&?-`PHgJvVc6x3b}@2R^a@_b7!cms`LLb14)U_)g-0j1nhQ_0os3EN z-kCPH+HMg2fE=y#oluR~aN=|0Is{Eh1kBTXwo>(y@D;+T+}k z13vbSDdQ^4coGY~>6h|bDfgIBZaZ>++#6<0iCIo!N)AYwCv9asW=x4$M*N?@k}|7p zWk#DZC1x3kDUq|8Ia|tzqmjBQ%$O3hjKq|PFXy;F*~+|O#*~<4B&I~>ad6$8{YYWR z@P`mzhQ;Jr5FMXx1o3Vc^K5}(DJD$ETyt3pJ^Y)YhjPDo0+5UVx{aEO8 z`iUGO=HvToFOI~0KvRS+Sx;r&r_HWI&KZ2n2)^(-^QEo667<8L7loIJz9{Dj<-ixa z?z)ii+l5$2(OpGv%;jvQ!N|%k#7(l!nq8dDxmKJFbnj)_`Esrm-}YR1!is*qUEWmT z=|b|uoovag+QN{`Pu@<(90GqsW7iahGP6AIPa7<;$fLH&cuu;*mF6BT^2NvnLH?V( zzVIoBb&{OBVBxBxOC$nU^xah9(@t!w{lT?2a!<8+jmiaG8)m)r1pK%9M#$ZPnio`_ib~V zM0XZ>|5L`4#JG8AqdMquZAek=#Q4nnj6QuVdXTi!UnsMAktu87-DYB5hR~PZnPkY| ztMPFb8T=Oab{QHa+tw)lzr%B9 z9zJ`0;)^b8C97W1Z^f4JB>GK*tS4Cai%q1SzAC?2th-Cib1CFk>c8O~S6~SJyp(r# zd&990McW(ngHC$``IT(;hKFqShDg28tLlXpvFa7uQz`a_%FaS}T4Oe6G_Byj?tBhE z*I=D`nXZp{pvYi zP;94qQ?zX-*8G@C9c|5z_@|@_JdL?Ve(nBG8`#Uu){f^ai}Ie*E~4*jI=~(U{&BK) zwcB}1HoE-tMv&|4W3dsjUch&%WWfVxmn^uS>pre~xvIDpaV_Mkt#guFU9WqR-=!V=a=vk?n{{^vSKuoQU5;Lt?n)Z>*td6VEgi+W_m10_aBkq{OvPd>39fq=MKa>Xi6ci5D1hj}aRy@dEYi z%ZZ=+5Y}S?SKtdiF8an0)@3KtpHH1^#0xyz6jt#9Cv(4cesjElz+TLHZwUNpDOSMZ zQwj5Mjnv2bOzMG#DMMJFO|aGTH8`=}?mdLHS)>eWv#V`oBwk>ZlwoZaDdREX1tR#O z7d#?mSer%4j5gv0tTGZW@PL$IZ5An0VZ;kqWh7oe;*N%}Hj9*b!-yBK%1FGx9h5QF zWMbp$OMf@kWlPANYqgcdhb-bjZ=n2K=CJhc+VFYZwAw@Txl2x$wT&llKz8}0+t`cK zJ^18!Dkm{dE4yBCcfG1Nvj0ZfN%Zp4sRPiq%ayx-k3NuKBKXHJrP zZwY7Um4q0R*p_-Vdc9fV-@+b`!1h-4qWvs*UYAkM>&gnrS}mJ$3dS);S<#^d4r5!L zSAj2AZ7KaP#2Da=#JukqD`$_+5#K#@V2LsA9enCf;bjG^OICRLHr_S}pG^AubZC?D zF-H1%*S=LA{DX6t--JdT8Z%wJ8#U%{DYKKdokzjWDcw|=RoAux=rRBSO#5|Ew&8&8E>@<-QID)9}MobpV2qd zIZLjqNyi0`vqy(-!*{fsG1v8W%yqqj&oJ-a_$_|J0}FF2nGc9%O23hHrnIRJKBbbA zfc9kl#W_N~gZ}{6^|Ci(y))bGPy3i3t!FXX&S$n=3T2|&kGu%>_KUzMEI|kA9n6(> zE6fU|Q?Kwf(7C);;=+pHt>$uXc6Ie`zAu+|%p2=E)5l!tvnIfYbt0$Tv=egGS_@0R zczv{-Rp0FY<~(=$k)O742HzWn9Ud?jbrDS#vGJ8fZt#*LRGj7@5JGM6b)R5O_ zBCn?+uW!%dygO4?i=6MaQuFUaUX!0z$?GQMb>FfZn_fa*_oFSYVZ4c~UOJHdPItPJ z)us1nP371YgdbLUk=28d)m~Fp=OL?eOj+$}OI9z?{I_sj!#wf{K7$s1QguJmY_`f=gKS)d`Cva>I8vv2Tw`mOj;+?K5EL7(u;x^F)9 z=ZK6%HcH)=Y?QU0$k!{exrzQQcpL{F*pz{jNgjZ{n`Ioxc>_|ej&gmF!J|BVA~JXy zu%+MUDjD3TN=F9EnQ9A(*T{v&QOMvd>IqP07d#k?oOnDLTbe6nv&3-}&dO`D!xR~O z#4@*^DRVv1GWT(t%x%ISq%D~{@kL~e$lPAO+LmP6ML8|VH0U~7nKt7m@O$F_uy!-z zAtWE7*w^Jukq2wk-hOR^>9<@dep=Kq^F&{6?j61db>yo?Kgwa8j&h~UDn-U#O&g28 zFYz>L4a2%Z_L)nOxhweI{!X6R@11LWeNM%s$(mke_z9&c+peth2l`y0kyanQree}c z`shIVX!W)Up};>Tn0^4g8mqT;Rek`~?@S22^pCFUdG$M8Lre2qp{1v|Le(!!P&rrh zlM6$Y+y}T{$h-7h<6Zj6u25xOp~{=xfX-k2Lf25`0+2tF~wgwjQ1m4p@nC< zLW_Rt3I$)95b|DKF)83CJ_wmh9+}W0-VcUHRg^7uiLQ#x4qdf!7&c<=4LhshwB0XlHpe*plboRc_?C4o z{M!`aUj{ir4gS@^zxPf4)xp0y_#}1;A9}U?mh)Q*pUO^wCU{rMZyEPF@b3orc>}!N z1aC^=o%m?2MBdA9J-=n}VdjX!&=`18#_vq-$H1Rb_*Dw;#E)KJ*TH+KYlHYG@ZSz^ z72`A)zHWM{tMUPx#{DP!7dt{%=$u#N!Vc2RW9gUnK2PqX-IddCQ2tSRl2#z{PVP(5 zWu$#r^DUKrDr;V=U$?q`tLXf$mFvn2Z1xV;%DP(CZMoaW*UCOZ4zg#IYk*pF>OY_d z+;r;_@l~oDsP+lhI(?PW*=tyVPtpSP7m=x>I%UnqRx%|W|D*%J--c}8fbYZx_8Y{< z%9tO-ej$6OnZbP48xz^1%Q_A}BygEbxtpHQ{EuS~uI@QN*^(c_9=sg;CwWIzP9mND zTg={v*0o<1{_K%8Tt4;Zesz1)S+cG9d8D{fs>lzzNZMR{66EZ0JI?c&KXqX0+JYrg zW?#j^{jVxEG3lX+q^>te1~$bJ4c_rV1|@<4!prZy#bw@>mW ztqUyBLIG$T3_U(*S&dB^U#n0a{Y=(yqC?4ixSx4N=A}C3rM}D;d--+_JgPJKGZVj( zUCFFXfIlBOC;VwZCl~(Erw#u6aNX84Xc4>)aB&F1(%sJu&=c4KZzh~@=-3Pnar`U4wm02skRt2U%Tpsp+Wy^`-4>uiK zZYA@T$PJDEV`vM3#e9-y#S!K3J+WAm@u!;AfZc7ITOTLBX!Hlx3%zD#iEINO9e+3% z&qiszW|bhfs?QryB|Z{2vd1qn0-v_LFYyy&ek|CTEOlzL58#WciytNLWe@IA_8hJM zGA_^I-tv(Xza{2_n(cgRz)M%UlRJ!U)O>aZ<;ioH*ML3KTDQzsIrcO1+B(D9>p1(l zwec!`S?P>dkvHy-Idh3K2h+i0ALF!n92)*t)1X=8iNYUUS^U8a8Z-0K7^lJyd0)N9 z@Xab_OesHC^2i@B=@Q;dvhhZ8kXtkhpAO-JEcOU{-RJ>%e3w^9{mh5Tp8R{t1lR|X z@wuJ$4zLfDN1uLXQeIWnUF2@8C%+rIN|5^r^L-(bXZr6YM??wZ_hjV3CdTs`#^XZ% zuOvT&kNgla9#?lFX7QiwpTmbuCVcVJ+hoFj%Ywj4tppQU@uB|q2O#xBL39I3)T@E1&*!E zCbNtMXEn0%6M^%D4G#NNi)?VJhyzQ7ZE#Xq^V}De+$=K8MB<>dnWsA zuTf5WCtOoOxwd>BEWbg~E%1|Dn}UC|=oUHX76%_L#P2~1deJX(Hk`$ta7tqxayjSE zXGe65(qm)m82fIG)-l8{vo3~?v8n|fVJ-K7!)w?OHGXwj%c4{C5}g7(1eZI& z#r|ChbFiWJ^lO}-c4T<@O#PyPHec9^eo@X^$I>qz+IW_qHC$)~`xz_HsmmWO|H=H$ z+7DAs2rpZP-q66@Ue3BmbP-F(n7}(r$EcdF`R^T;qU`XZXWUnocg88`3GYPd7u~A5 zxiYG*L0530BkZ#27K?VIw4hsLDBa>qn{Hw06N?zPH*NEUZi1J_yd-<38NuIRKhSNy zDH}|`m9~6d-bcqK$I4;z;EIQ3Zq`)JyKLl$)HRuYB=es<&-vb%|K^}W*5NOk>B?v< zXP?)=saJ3b_*nl1zn^gb9{ulV#({m_p1vbIB`q=^(zjyFhvOPsnh*Qh=EMF`^P%)> znGaWTrt?@Ee3=iO^KI%^@x%8ugljGW{#gaqeAv&J$70NfIf93p4`Y{+`S52_M$L6* znHck7uQtlae0aK)QFEOM$9?#GDD!x0Wn@0gmNIItGvO2+J|BL|T+mt>nGd@Q95vTj zv>!en)(f0g%E)~9gOpKooyDKS=fihwWn?}KIOa^54;v}Ne7Lx!`Or-+|MgppdAzl9 zP2?FDo$CYOxwK@z3;7o-rfi^3^UJ=q@@%#n_-|Zjmcz(5l@A1!YJik)G_xLLu&!;K)9{-b$=a(w@9{(GT=NBmW zN&ZhA&nGJQNq!IU+jjiNDELYKL5}C675pUsXvg!PDEOWHGaS!HD)^oJ6^`da75q;A zCmqiREBKxKZ#bTxpx}4*f9iOiso;0^dpyzj_f_ya`v*Cmrz!ZI{i7YvyDIp}{uz$v z$qIh5zryj{rQj#~pL9I`4&S2b8l%!V*JEjNMAL3kg_v5>tX-Bn&YysN%h@>Plswp{6` z{gfW|C_3sRN=M~<9&}%^@!+e2KGvsdCFPYqh9AQqa3+sNtKZ9)SYOkW+{2XX=)Hlx|5(q_SPDWL~puSE5^%ve(=n-7?L@ ztGcH_(H~`;u?5lxZo8Q6PFib;IiGsrI^@1 z;j6~4rRV{ALq_BlqLW5e$;2Y%QRO+6g^u=7Q)4`R2WuPyvy@Na|3Km4Z^|LV=Qq;1LV zFLs&q;AHmh#qZ6s6Rk(TZ5Sasrm=>&%7J@1aY@QEV>Gj4M)NwVk%9a=d>V6L8IDa{@(DE@aQhB#>N=-tiFvK zh(&hl*W={8f|&NKJ~sWmUzGkXvP1Oua@tPvhFb8q_dFc_;E&^nzkXY|=0njJmA$NQ zqYM8rC;nCcX$SoGi9@l$&n92H6aKgs?R{8kQ4sk&*SG$`A6ZJzBc&tiN`z){uk|l|8H{*qt;!08&@GSowmxsjp3R? z!J(BhtPyJ_N;|evrV(3QSsP_Gv-Y^VjWVlQ=lr3KGFAJ+HJjTgGmDtb9a2WECr#QN z>&bDSn(f<48QQnzm^R>~a;{EZ8)X_v-*p0&E--?t=s!5f6stp-CngdTyv+CQR{89%!24O`K*70Yi^S=YQ1fi*%-av z9`|mz=6cF(Ag-dNwe+dPDfu>qYf5c6VUJ^+N81t_fi*|?XyP5K%x3Uj+(wzz$lNt; zl&L~)zSTyVSsx->+bA=RKIFCWQQ_?1< zvpq(c)HlO5CAKmyk>ifK8sYQZwldi<%4~+uzoSfR{gwUUrP{;L|9$J_$iuyKJ>|Y^ zqa3kYOY0wH{I^l>Z=~Meg={?q+^3w=!) z>|JX^UH&omgqO|Lw3^osjGEnm4lCbW&f2{k`?%PtEqp(r9CZ)3aD5Y81;^HKV?Szb z&Q0frbae6*_B8Tt>Kw{I-`oVg;^!>+7fSK@5}zK4Es-<-tojB~pFHcrGl3&C=6ycC zHe+sHQ?bNs@LkU5U23iJy(usLI99&W0~Z!?#zA4gI$O&bTOxa6*0-{E{Ir9<_~y$-+yXZ@-)P7@I$y6YseWyf1GD-m2b}r~AFK z$24I$d;a+6(_Z?WNlmYVJ3gc948JF1O)0TZ;{Wup!oAtY&&lfs_9XRb=Z1#Uo`S#R zZ^|st_ADA$w_MNl9g=$S+1j1+{G#O#!9#8R>Hh2?1N=pl7kg6m3!YFlbg`}-S^dIj z@+6Tzfmq>9%!N+BDEecMrNF5CWpeF`KjXKB4NCUzjPZ~bbnpM|)D@zlKrf0p~P5l-FSrS`eIieC9pz^T5Ke1VXM8(&f425%$(FR^%~uW>dj=K+cT zshnHYXO+u;7dd|KOwvYX;DfTCGg$5TYrm(q9bv7%#!F)sn%eo1_rhMh*9#oF7! z?^W4(r5$=_l~0mf&4akCTp+~0=KY@bUP(S9?9O{?o^bhP&-j2EKfM?~qv354ye$k^ zwrA^Hd804iRDi4+zEbkpUNp#m!<)t&BpfLW6mA5&0g1f{CEF0l-wJ@ zvp?5(eu#b^yC0Zycl`)|En}kG(o5>|{ZIYZoVQnvXnEeAKO$=0&OokJFStqkdqyZwV6@rew)L@`;$=-N|?5uabYcyS7f?OZoNGvCk^|Rb`~?T*{Icd!1h5VoqK# zNzWm70)4Aaz9ToCk^#qP1#+Jb--(H?*%1TImB3kvU!uUtb-+5tXp-MgfUf4xq8~2a{l=DFL zvKDwRgEd&qg(66*}({I9#y8~Z$|`8T42^+(p%d-?~T>m%n);46TOIkTQMjOYbY?;Pr_ zz4h#-O+Cd&o4ow!4Rz!>4MC5^yC1U+ej!ho$NL6vg{F}ntx9-CToCdsGY>v)Ug6Oj zpCn%kV=-OIU1ydP9=pgnBeG2NHeKl9rJW9;DXe9KHWUq?xvHK8p zX}gAR*dI&$>%oJYnj^wf5Bbg z{1^EkZQD|L;kP}Pf{br;U2Oa9R-q^MU$h zK2X2R2l6X&f2{SLV?H>C=Ym%;GKe+a7Rg5<``RhsIT&B`S?p;aS30jZKup#g`zQzm0$J9E_Q}hI#sY!?#!LM3s_DpB&~&R@9tJyyb#P^O%!; z@OvJ4H<3+KWUijc{5=XA%1+Os<-`hB>CBfRCw1ma4cx0)J6O3vH!z>u$C;JuA6Q$o z>cY@=^h@?Rlz%b0VvW{!!KCTveymGE8aP!m#-#6~dl%?g(Pxi5(+_#+Hgeb=Drary z*7EIeyJ2e(nkqw*_gwt2GV?B>9qQ-{{o$8dA8`If4(r16($xJHO;7e8&;3~L+d8AQ z{vJ26W1hUvM#)*D?%+!D+#o~MSmXTDVCDm^S46Ig?j_?+av6Td^*xu!eQA%J?(U{B z54fAkx_g>dJm6{iq+3$cy!(@y)OcsSitZ+O&xY1Y`tQ-|YBS95dPj}T-c^^G&+G3g z3JA;{$ZIcSAcb6#G6zd;%+Edj(e*j+8Qp*9V04Q?T4319@G>=)$q(8vW5JR|;53f- zrCAqT7#ebyuW15fDwCWKRr#rpjYF5T-j821Av9~ug`p}}YSlPoN#yzSJfC1bN47+s z*YN!O3qyLg`+4;Z@@5rliH1?W+Dhi8m#Oy=uDPUh;l||0M2L zOO9%NRuSX1A;@`i&yqv%IdTRL^nkxd@K0@cR;$%7<-1E*=TA3rxP)(KpjQf<>E!NG z_;|p_6TxSM1D{g`pKGQ52?jpb0OJPkcbWKH&wZl}pIzYdHTZlZ_#}Z(l7Y|H;By<_ z-3mSnOnh$T+X@q(1zPAhg-;UrBt`J)>A)vd@OeP+Ip4tNeqcPveO{r#pC#N6E^Ovc zo~yRul)_r?sfFOv34A&k_?+UZ)gR-#<>2##iO+JrebU6|3D(`8NqcnypH2~czUG;| zz5dI+{+!@*fq~Dnz<81SnI=9ja6ik2&rI;S27Im+d^&?qX9J&Wz~@hV_bT{oH1T0Xq41B%@#$N7s znfQFmeWMMZUEuRI_E;UQX*|m-GWt+Y?gtmy`jDGGR74*t0w*sxdCflLuGRbV zT|ZzAG-37Q+d(Ey16`pqg_9SYyb+u(bKvw#!Rb`s5P!W(#g{`HvDf2&Be)-F-jhGA z;Y{w&GVjmi{-@lJGVg!NeG&Jg&HEzeA#|5g4SnnB3898RQs2!Llf1W7Ov26?3=zBE z&!bm;N6vu(9&goy_~wf3B<15vYX@`Tqq=7s^U+LKN@~i-{cH2~ObDfNbqP&SyySUR z%AT&FT<#Y#uX{sXL+e@Rdp*6F<9b!))l#0T%bzENRN30Dp*;SlZkP~SB;Wp-d;Y6; z8w~pM{>pnU_3W>_-^4way5H0_l=}9BP#%}Mf1CSvx#v>%?}E$w6GACm#Ee&Y-|rgw zJ3JM9azErbmwNtTSI%GWRaMCK#HYm3^7{~%p}%DWALf}MUqzqG2tL4lgr?8IhpWp@ zv(C@Kk^iY5URt|IXxhm=|L2j@Vk7JOln?vYK7%f(qZ@hOziW~Xzjwm#UEM2|c;NNe z`P%cWJ*UW8r3=^9`QtrOxF2KQ|D;Q(`;W;Z@0~C{ zbT!{Rsd>Y3vO8@-$jJdns2piaR-6fZ{#P;^d`J9Qu%ur(O@gXn8P;qzU{ zp)0ts#b7TcX90a7UO66cXymtzx6W|J;j*u_I%VzNUOIOiWu1yn zY%1;H?O$Wm8B4}X%#Q3)icD9zEwG~rOX`iM~R(J z?5f+<*i8#Q6v6-Sy%cM&*9-0e_B^e=*@=Gn1+b-`KFRN$T-dmRU+gh(vG#vO7Fhp% zl#P=AwmvU#;6qJkF2OU0`|39`&%obp-Q_|P^RD2GjNcj4|AJ0E1|Cj257VUr8SNTpjqHm#btz|^iEht#{txPW{{NuPrAJU_d&cMOG2s7S z^xI2f;L*{2=qE=|r^Q?3^;&&Qk(vk5_Xt&(YTVVPfk2yysEMm-i#} zCZwJJdIa@bx{w;%dyM+qGp?(Tpw6hiH7zo(+ts%g9YMWOz?t~-2A*wI9B5l&HzL0#h^>#{3y*qmxKHi;upuP8U zCJ$#%hU{nZh<`_beP(M7yc2uO&+v&mk!w4?-Ln3**7t9H%Q~37b6Ni?9}Vn^;#cBh zjc>%yXXf3&dVPvzdu49TW?d`(;4)sYxz^ghE9c!YtlJkd&x-BUs4p$}N0S%!vNA_o zek=P^$tr=*vaGvnPYf@!*7Ft(GteW?;rb10-cOD(?1`&1Y=HQpi@j55`cY^BCu<#V zU(*|Uo#Edd5A0rhj|{dwo=0qN@OF#X%Egv)*R2uzIM4F^*Zo`B$G1e=$J6ch@h!UD zK7PIyC_OY!|xx!P)Ys`7| zX1?xN=Q)W@N7nv#k;CB%t`SMTP~DFuTT8Lo3O`Q~`;1AG)rTfJ=sGhdT~T)bXu7cb zum7!Y$@>Sw%QBAf6{0yroT##Ab9;44aW28H3F>Bdeb|`@!e*ufK5#{n&GA zu;(p%-vJ|_bk#CRl+B>j&3+QHuV3R=0e4Zq{`ygPXpf%7~0-zzwnXPK1M%C*2w z#zHP*P~=n|_HXth){W&i#mm`6{HAu&R1SD~SIjdVA3*6Z-&RQOmTm0G8~|<&zE0KV zn17!>I-W8A0c~{f&BD+K+N+*&;)ir!cld`-W*+0aejaOGu*Tt1XMasa;# zmz3aZZtOqMAJ@IdwNUn%*e{g*M0v-273xDBjKAHw>WBHNAHL5SQO$5Of@#&z)|5O_ z`rq{rN5h_sudvuyqxVqLf?q+exyOiK@Cbj|Ez)0Pk1=-KS4(Ygjpv1ood9@R<5$k^ zI0*muv8O0;L=r{TDvhO3~rx)W&>T|L8Y}F_G#214n@}Op`%xjAF5&r6% zWeiCCOH2LA6@19Vbux9mNB&;%o0mNLR$mi+#yQq{(oS3HcV9@qd&Bi^2;bnQsUH00 zlZb8WMBH8Hzz-F!NZcKJ(IG_}_A=&}0%DAZ%h<8-ly=mO($#85i_c2_811-{cuJl-kU17k>NQ4L$n}L(jedIcxu> zoVbdQ2ATdAQUt+1uG4cs@zl?6v4Y(59+GmjFFQNTh9$$!Z z5u5~{BJhzIVe8v1d`lnk*RkFU96?OLblNwT{-$EdOnk_frhHbcv2dZIO^RZ+N$-}& z!DU)N^fful>>)1Ir^NRI8V^F#Ui!V&X0nz+#?-pu#RrDJj*_zvL#MS?lo(&;xjakH zi?xoi=17?v514bK%$HezG~{BPDG%p^1N|eEX3mYPxG&*dU&@u3>tfMKWd4&m-;znz z{I|QSwZ?DOKdtq}(cqPOgm@(#Azp8`hL>YM`wz6q-CRPW4$npIc;Lrpv`eLC%p2R4 zzGb{G)uMgLqIu#8u8!2*yvOadHAU5l4&G{=yl<<{`;VZ`ze1zzp;yb;{6oaP)-K!9 zbw^OIoz_ZxHf6seioc8ni@)+7TBG(XEL+Md(}!U45!4w)E9cFtwq;$_p1qR%Bd9lu z);?{}%KNrx?Gvlc(XsPMw5FGxi2NwG%_}|r8Eal?T_>Ak>OIrQ4=8ITIqOM(`CQg| zzR+0Sjp5hJuf!~_$CpojHFUDPHNL=1bOiYw$GR`~56DB>LSm`HZtpA?^OM9$p_gzj zxK@*foMBzId)t!2s5;0KRpVv;uz%}g&JHurA*-hBBnC?-hFW5;1Xd3Az6+i)#hTKmwQiVxd&*7@PU^k-ud%b*$`~T2;=uKvKs9 z>L?)x|6i%2M|ODG<|RfwBDXg$Nb*ZgA*-IRh*|uT;IiCSW@D27DqESi$>;onlzGBd z#@)$(udU1))W$!%>wS!zCheLN zuS*0DGJcce_nDm-0|(VUg@gR_j`6hEKAl*Tet|xea}2SP(k8@jDm}rk^~kxneQlE$ z11BH-&F)uur1!DbE9P9kI943OU}TUfgL$9t@3l?I;Ki-jUlvELA+f*YgX_!i=V7h} z>Fc%4aR$;>QF0O+n@vuJL{2*G63zdN@3V=dw{Lv( z{?~?k@RNAM{j2by$g<#UURQN&D~-j zJ!so2=Jtim;WPpr*a(HbdE~agtC_-I)}Vt zO|5i}Xgge5aFZThw#~4iD!hNDXpWsLv9gT0}#oHMzO1@>QVWsQ-g4SxlFv=1?q68FrSJFh?aNCt4Wd0$WE zB)xmDs(!3r58+R-jT}IqxO+FQ(plWA7mQv|2{P|d26Nt|L>U*`0tzW|0VDrF!5j3jQ>qG z_|pXbrQm;|4gX6`{3nC|g(m)!oALj%3I7d&KkH5d{~PX%;D4D7{uqJ3`TWqB|2@AM z|IIxN{NFe~H2Hsf82G=@BZB`QO!%t>{?G3+;7`0O0$=){#s8rK|Evo_V@6)k4F9YQ z1ODk3geH&7FyNn_5rKc73I9QXKj>}){@}YK@K3hE?_wN(vH{=9hY!&}q&b&AJ z<RH!9v*=kC-i_DD7>=!H#gbXA(R_=8<{62id78^Vx*aqRbI|x*7iBf*z*IOqQ~*|ir(1yLt?dYYxIVJWgo}8wj^*I|0ce1 z{HlZA5ecOC4F|o?CX(LsVt4tft=ow#}U9tXYGCz9R^;-k07LGS-0klt|)dPgLZ z-pk^nx3h!Z1xHD5YcjgKgWf}bk2QYdnU~!DkKkRr^U|iI=y~a9isr=3OaB0tU1t3` zQ8Z6;`+wICn(uYctRE%Kt;y+!95k25q&d!c>G}BPrC&g=_?%mF>04JNPHtW7p!b|a z(tB5Y<2b#U-Yv4mTO1#~>RYGYacncaywCUR2EAKaSs!j`ZQl_6a!X|2aCUt3{?ij_ zUxo9O1k$@5Smqd3^uG0ul3R(P_on#hecVBBp9IqT2M4{26G`tK@zGo2p!eUij&A&# z^OEWt&h=86gWf4I>5ZC~qWkmV>m}8nwL*`7IAbH;d1(+do9iWo_X;X&&OB zxp$&yp62l%1XjE>f8ma%dHPY(Y|cxHW~bcx23YoS{L$M99LIa&o0o#nYtBmw=M!7$ z*Up$(<}&q-#LT|ppzX<+v^B@fDw%7y*%324k39AA+=oAzmA`%(dndWKi|UpS@Gqqn z|4C@i!6&V(Vv?Ly;bL41tr9b5*##G9Kk zy@U3z%Q~k2-{PQsMgnO6?h7^c<8JrU9kf5!L9`b*Xg@vyw7({_4~W0r_i54N{kjgK z{Sa{NK{wD|Re@p=FKNZ@KkDvBA4%)vh?U?a?yMy*C zcKTnp z)88pR`VWMEwDg^|Vt2T{ee^3Bv2?xffoqO?%H;e1`=%QAYol!tQS*G8^v|=?-->^0 zYwiD%gZ^P1ME`3J`kzh!{iE#kx8m>G8vSz|^nZJ0yT*Tu?SH$2{wotefA}-o_;1Dk zwKe)jIOu<&gXlloLH|h!p#OC{{jK=JwnqPfA06YrgXsSrxKZPOQv$}no&Hw*V_T#D zB?tY(I*9()9P~e(0QyJS>2Jkfwl(_aIOzZOiVhn84*IW50R7=lZR5Wc|Jl~)AK{?? zg$|nL;p(-`iFH8{jWLbe>ws5kFwMM ze0=oJanS$mkO%=JHT&GAne*8hJ~<9~50>;E?CpJ%7P6+hQFi(Z;+y~GIOzX&M!Uv;i|v2AgZ?WMK!3R2HvZ#m{}B%QUuc8=O395= z*fTmmL<@O)Odkft6UoD8-~W_crz0Ia85ol%QGN{3W5V=fIDUfsJ#1+|v@Krj1Fl^@ zeDc>=^5N)sadS(&;Ox(Mc(K~Si<3Ht7tcF*@puAwF}x*SaPDb5yeM_>;`*TH*y~9LK|pUpaX3&uJYa zFQz+qF(CoG`20UD$P3PQj)xb09lUs`gLskY;6+LTc=1F_yx>ghczCg`DOz4k>L6bH z8@P6P@xO1hUtUaYi5H>xc=4cv7u`FE7mqr4ac2T}F`y-0a7KAN{o)r6Uc6P(G4kS< z4qg-{fEOQr+=9H|eD-*F(beR|N;Cd@VLNzX!HBgUNi%t2l*#x1xG}t}`mi|iW*uC; zv-_mRSdTp15-;||$BQ=)8uG%TrTA-<^=4Dma5uFbXpw;aDjix+>2$%|%xg~NDZ z`fJ?6y6l(c+HA6e7qTw%-fZ}4K1{z8J4HOcns>ny@maO#aVeg}=d)_Th{cmC2T$fC zfG6Ez@#GeRC+D|=Cq4&HUYi=9T|?Hb7EfZ08R>uJwtg}+CQpuT%xvPEA=%eBY<=?K zr7gzznrrx&KFvQIh#oU@6)##|t6}57&TwpWj>T4N9If%^N9wZkXXl3aWQzDXb4G}= zam=^zN9fiwyjA*`3*(ZlFFANKtPS3@X5VOy57Phae0Vwmd>9pr4-?|!!;KC;?EZE9 z^KEN6zk6SF|G%n(=sy75sPX@LeB-|f-Qa!GFMO^^zn1OxuZN%U>ebIV zcrdtwcu?cu!O8^i;0%igg>L`6cJSaD2M>1rs$=B9jSe16jgtrVdEg-D`9$PEpyhZ^ zQx3$`olkMlAM7Cdhd0x|MfS%t4f-81>rwOImbUx@UbfReKR!9|_1=ga0Iw1LneC%r z!H6{v?gOqV2NeCE{Ew0Y?V|r?JN;M3NB^@9`tv%7{&f!eS0sS`k#_o9$0Yy7v^{x>@4pPB&r4{{bxWc<&Kul-MP&>!p|`iDE{&y16P`#kuvNxzou@&5?C zc;~^dzKxavWgWzWy}-50fdBkCJ{e%=!5oVR z#Yg{_d!l7PX$R5&EpY8J;N#cglL7Yrf0Lble|+>mp#P5@ME_~c^sg0r zK!1b&7USjFe_q>`J>W$<{m;io|E_N$;~%_6_^)Um{R&1bd%$ksn&V&5|F2io_-_~e zH`wW~jgS7IgZ>jci2k(>`X7sve)~Llx=Fv5-N}C|{VCo#pv=L8&o6A33}~?rT^dH|r^grvM|Iq}{Kh#eD+wsv~>Y)F#Uv`WPxW+;M z#R;H)4`;eW^niEcqd&(%|5F`A|0xdo`zC}3A|;Ke%+e%=_70SfZ(@nG9aN(Qux2iI6UC`|U(w1Wpv zIe3uOK|BaLc<@l1Jh02blkMYRXMFSExxhxSmkR9JV zc)WxDl^sNXo`e331kk_6PJh?<=>Ke2vg} z`m;KS{-A^YhY~>l$#(kt#z+6<4*KgSbBV#=n-W`-d~0;++Qv zIe4(5gLshZ;6ZuTa;z0v&?Q&r2y7tR~StbvJc0IQ! zuKmF$9P}U8LG)KU=zlN)^bfYvUl^YpnC_teKj(Lh94K|rKRE&PH*#J^L=QMQKKchb z=>L5O(Vyd>KP>_DueQ@aG(P%wG&sh82hraE+^F&YVgkm$o&JLO=zqdN|8X5ef3<`D z2jisQJ`WBy>DRKo{_E*a@y-F$96b2=7ab!5u5|EVQUZ9ei*tCCE>P(84{ZkzvK%~k zyd6BCpYLwTE^wmB1EWm7zXv=Zes_0Eb^-Oy9l( zp;&gYD;)Hnp8)!Iavo1a4>&(Q`m-GLKi)y~pXi{!M*`>%*y+DGKKkpwaE$*BqW=rv zMvedH;~W3>d9ci+U&~JMuZ5rS&Vwr)Jm}v+Jotly2a6NHgB*(og(?1P+rfihJ9zNX zc^xAME_d)?Vw^m%+lBti89fm>5QuLcJkCM?^0w%IqtmZzz-Fj{1-~mazR*7Ml4Si5-o4-B4(=tFsGRwj;wqn{os;C>KFH+O4Bm8QkXF0z@9sc&uT~H~ORMF(;n_cgw-o&lUYp$~-(OUn;jgdI z{D17KEfsk3F8i2t^=uW-8qV_7%C|c7Ht@ZAf6)o*{UDw9gM786=qeW^Yx#qcwe*I) zzJgrZAou*^{DssZXK)tts|PatZr8%)*2$@ib`s$Bj_XKnUi+GRNJmiEh6 zu(x~CgF8HF!P&ss#`|gE@RSJ6!$BX%1rtWsCuP-!9(y7yn5#y?CuL$bPEq| z`dAw-{Y!YhkW2N=V~+Q)s&K#i|Jr-^_^68WfBbAVAvZ1o0twe-36~@wA&?svWs{&p zKv6(Ju_YlO3HO_bq7n%z2$elTixn#2V$DXW+k%y?v_`RQT5VlE`cYc7CM4=60SP4` zStR85e$LG7o^y5&VC(Pu{r&;3*D~ik=b7hu=9y=nnR(`t1BF2+Yw@>3>+q{T7-nyH zHPqfPUh-6w_HOG08?8Jer9+0MLHJpaC+Fj2MTSGjTMJrWA>Y@i|8I~k<>lWJB&Cu5 z<3glW+fc?aC?l1#*CTN=*PyJshKlm`M0ukt(vj~CUS86@!VCJQIv$L=z23S>;QZD4 zPdCv2+gH(lca=P(f5_vqkp5Q=R>~eud{90sWTmz)y^ti6zW49c>XUR&wr7VfkIe7b z9bc>HlG$+6&2kR%kqsY0KGZ?7T9thG+zQuYYnR(veov*Zhv9X${%=K$A; z=08Cb{B2aG1=ve%88t!9sq7+emco=gdb|5O@V&a)5))8{nWX=JD&T8d!Hz%`P= zsit`>FwI*?|EIilYO{>GMa}`P5zRwPS1zSa>xKlBGl}&736oQ?yR6=F4seZV?oi8F zhBmFUL9&x*(*MUQITxhKIlwid`6!c9@fXsTz;@D6(6PJhe^ho7l>~hn+sQ&M=NPi* zkNxF50y!H%v(lxiqqQhIaE;`=3pA-6*2CvsV7~rtYHwTN*U9%R4wh!!1ph&{WZ67M zq0^W0=zR2b_y~vIM~8uq@LA*|EYrp-^n*D4Ex+^gfe!dUhmj9#j`YzZc~SRG)aT?R zc2ek`!FMVv+OInfG<*uG`4)7FO0s5vtiZVu_-5m)!c{;(Tw`p2hO_mar5qJtM;O6T3x!(x6X8FrC z)>lT>Z(KH`jK*^96SPXk`IT{~k5-f73m%J>bAV@5#_f!jLn`sty>g#qkd%&kc1h8h z^8SO+fy#CweY{9+hXpt}pd;$$72vZz%B1x7s|2r8_&d=DgXGh-zPf44hi?$Pnv><4 zQtkI9`VRZ#Hsu&X?I&@hoC8`f1D|--f#x^JcLKWq6MpvyJ-s*1kKgBNelLXh@%xzb zLpybl9uywMwG5K}i8PWPDqveoZi}j;b;G@Ot^nnyP z2Y5!b{>*4KNqINRdON+-M~mcYvXqUHbAU&*$o|Oo)MK)vqKwhK{pxYNR$nel8GH!v z6uAP_<9^Um?CbIN}7PgFb=CI^rOaxjl17FB5d+RvMK7KAKYP_Exd6)B}b`O5c zZv}Uib3p55;1fT+-x?;$*Tlv=>UQxf(Bu8IL#Z1ZbOk&ix4ykbi@NEYVf+s%G$e}a}) zlf{)G=KxQUD?r;n09s<)nhYP)pc=b|hfB(QTZ~ti6K>$~ssk|vn>wbTn$!A?S04bK zK;zYWIsLp4DV@)=T|5S2<1rAMJ_d684e4ZVkZ=MJqWFuh|ucNUF{Jx&%d!Sje4EjMm z6!Cta#w+T!bsFPRbzhOBrb}az0Ced-(~>1Vy5u)47?)g)Zcj$HNs+f?h+I?XFYgOP zx7atnvdkGI=d`i*TL=7ux_<2n;$hGMqj@|q%~H@TTjZtLWLb~#c0Ttn0ch5PrWN)n z1xG6~o;Lf&+bc;vHa{W1*JODN^HKP0rQ88%eZu5p^&Sg)M@)WoG#51U3t8JyXkNzL z;tU(Z*8x9QSLSzknH5{Aqp^WM&D%)M0&kfe7Q`QNma=}V4)}VSuYo4nh&%rJ+tNCo zCqT^Vd$sQcprdZzicgz19OEq3pIlrcC4INVl;kN4N}?F%9OBF8&J8PkZEPaNEisRD z$L#Cd&`U}i2b-LL_JO%;)4izA3$XM1`=!e*&}R(V%-+US`R91v_18H0lRKm3qE1pq z7aotL`lNaB&7zFpv!;_IE3r|~-xw6$_-+qrR#mW+aV_7Y(87E;S=AD_TDd zvB$w4Dy>p_%^0hvjK0BEYYR;>#ZIa1A1RG#TLzr?iP|03^O`~Oj1RyIxP_<_%jPbAyv!P2y_Is+{|I&tI#(-KCV1s%FY zJCr_3Q!Z;)KVJCz3QBv(Yr?t!>I?qOrRW>67{o4=SPjL(-01&~a$I#g;Pq;wxV-4+ zM7>6y?ardJ=S0ZP8&_I$ZH>kdOQ<^J7iu6K4ARoFvpm8S0(-0>zKn5YsJZ z>l$^y*ZZ6uT;{xiUZ3Nj{u}w5sC{7Vp-JC=-}eKRbF!|Skk@7*pL2-yje4Imp7;u% zGuy!Dbd}PuUeE-8V@t;`1ND^&U3`Z&v*z1TGU@t_9?|k;)L|_A&nNc}m0Jg9$ca9` z!`d|bPMjzwctQ6*zhe$>JP;|(+T!y&iq6Ou^g64-dQ2BVN9A+IGM}Tdi*c}vi5j0n z>p6WZ+B-?R#&VxSJZj9nD%t?kk8ONDpm#;vg%G=$+GU(^yL=1&=5Df=e!N|_b)*x0 z;cIHlG2pNDHD+FxqSkA-$qz|J-PrSLukb>@Cp7wVtqQ~_qetvc=$);N4HidW0YTs}# z*4FRA8v87J+q{`rbDv?qyy9+q$B7WR(md}IK0dgnZyWRjl{-pXZksZmqPYoTef=*f zYu$*$50NuVJ9hu*(<{5luanP~>fn3bsq$K^;{YF^hhq&a4e%Hr+@{dSI@U-nLJoe8U%QV569|*l$vVc2izDuq{kt z>+9LrztLz8k}Gm0dC5DHTpv>Me8uY%>A%$2E zhi$gn50;uvmWAGYxY`_6u^+M!U3=+|l|3i>F=lSxaJT*P^tUVm$wT*<#_v!;2zdVjUfD|u9-F&v zNstQ$`Qv>|^wfCeUFCuxf4sjEypRj{;+@@(zvn-gUQ7@wpgPY2)kfN_=W6WVuz{59s@7$@|q_y}}befv(x=h>8f z4_GfIy!VL@cx(l)eXH5tnk{`LXK@6NS&bSbFF>6h#Mq4R^<$4ZE~h(8!M9nqjFcCk zUXAciar`3e(V_BNw)d15pni?;KM?d0%L2a3g7vZmsAnVmN(GfnRj5)l(R8hkap>Y4$~Zr`i>{5<_Ko zx41UaY4a8MJ+yVI@5HXxZzB(IS^iranF2aG8*wN$g7rd@FNgR9u#vy8I&ZQ#P>1iD z40SM<%b6$Yyc+liOmeA#O-<+cRjkf!mMZ9YlQDj-ppQByIZ3~o@gB-RHoX=0-Hqew zbruF&#GKS6=A`O&)f?aR?W*0Z)tRza0DSQGSZEA|{cNm#QF{^V+mE82i$+?Vk9LAT z94%!KzxbBP?Ud4fvf=pU-y*fg|7CJJ&F_}HJV3@b8}02Xx=r#Foi_dYA^6rLXPms` zdC3zAxt81)Eic|AIT!aeIg1ZO+h02%$%}JL&c%yO&Waq9Y(H(1mmUbWFG-HEKa?D9 zUwWX2z34YF_WWn1_WYNm_Qm)1vKP&eJo&$o+KY$wwO68_i0E$h6eahy-`+*?gm;%b z#s7o7XOLkXcqc=iDW;)v(eox})utf1nDYJ!bp8(c4)nMGMUsvtBY$y4e|sMCCnA5* z*H#bp4-X@M1Z18vBttI#oyl2`cCYaMC-R>LpP_N~D&%MU<9z%tS(UPq9+vzlLoPaC zW_-V;d?x3Ti{LRR%3d#cCr5dCKf1~42}11ZQRE>VONSMGD?UCcBvyVD@)!MEa^|l? z`JZ5ZdOhTP1o*Vy#Dc!~QIm8uJJOnz8)+dQuS0u_BO1wpX_SM8bnjJY6vfKx(5AW) zjiG^Q>;{eMnNm7*VhdQ7m4oGVXjkpefX1M}G@b^H%Db;ZV?wID4sGfT(HIz*#zUY{ zHl3BF2z4A#mc2b?YFGaxI(_}=tb<+)K_~wX*f-k97H;RosQV<;eHQ9o*tz1DF+LLZ zPva!Teh@p}6Xb96G)~gn{8&CtQewx7t@nqm_o>*1{G#t8TUY2}PDpX(%|sh^PPR=p zsy+bKec?EjJn(;r7h>HU@}>cf zIfxT7WhA2AUrV0EA4s0w2J%F4`gyG#X(U&XBxOE;-)EUp2I=hk_}X^FZ79!wJC0}s z9ku;oe9p#RTd)oX9o7;bov)Q(r(0l4v|d^OpQoO0(Ol*yv*EACO6f(Xt}=}ySAn<8ulRTSd>rh2nuWbBq%)6e5GR+ z^~XeyXeCOUW)vYGwGE;ygc~%CH6s*;WOp$0F3L3FW8nK{!KEG|=iaQ;IQSFw%V9 zpXd3;wT5l^z8|O@lU3zVx8-cWi#FwqEsvDa_M#4yZws&=2eQBS-S^k~O+S#l{Z#Ur z&@R05+gZ#9^&~#J_CtENTt)9e-$U=;Ax8Q`_yLSvy?#Kg_gcWIExNl%X`gkGGH5PB zW46BdVGY;Tg0}fd*I_b^;qLD@Ox`;GWx34+&B>EJya0bu=t@Y{q;Y{>t-ZflB zHX3urRM?SUto2bn9)L^=ZS=(lh_aa4TYflMoBcwYSa7L2NJ^G5bH2Q@kF1g%ShZkGlCaY-HC-C01*rnEB$z zJ9gXHyNh(I#D)NFpTDjJ>gIKpw0HHINs5*doN!Dg&wg_lg2D`{;O}r z8~B!uK0Qb(=|M2^D|KiiQ(%9wLDEqLKaKhi!jG-6`{Bm`zjwtgz(-v;EG5{B4cvXe zqq6BT^78bl zcnIGDl|KUIp|MU8<^fl0YaOLHr#i0(+F?($$7salaYi-QcRqCrbWo3aobvm@a(QY0 zHk_r5np#|(6&4=3jn#2b@

rsb|R)_sP8xQredh(kkpxo`w0m%M4xREFCBpjv9^g zrc(6J`1cz9_c@*9iTGX`gg6T2$r>$JkC9|ke94T$%^kaY^L&=hI=XjtlFNFUPG)1? zbRX~vK%;j!li_N(cM4prBa^)EaJmUpha@k?Q-W?Ta33VPp$zxQE?ya23NF^*qVWB< zuydki(SJ|FIws{u->Uo0bfYRmB6wkL8cpMiVA4bXTjb5qT@Hge6j)-00=rp{2OZw4 z#{=wcjz_e5s-F9)o)h^q=(`l2C{wN;hH{{tkZg_|$Uw4eR>@9nuMKOA`%xdKQFmjn zfqNFX7}L<%SsTUclKvJXPx%|-VQ3@Nj<`$;ufgECSyd;*>kH)Hg!V=JsLlQcaZk6h z7WII#7FBJQX8j%aIg-3exkzs)7ws|AzRT&6YYd(fU{ zprwf7p{3qSnWvibzmvdb%i zFb*WkW~r5b#(ueuJnqTbYe zKL%e>W`*x0f4;vZzDX*+3I62`2H(mUrM!+H*oty?@PpxU9pMHjZ!=(TurjlC=Hp$x z?zwSZlsD~ym zt;cX?MP~!6ftR14qIJ-pRwZz(-E_2YUPn)B9`GtHDq5xfvI6nNGBRkRBI zX{`lLWr$9$Q~tW|2|O$8o7$h!26Fvr6#ysSth0%>Fh98@;Fa}J(Ms^AbtiBnlaAJQ zfBQN+19-72T4sM*=0J5&qveeo+^W-*gN7NN2vz?BCzLBj`s8`h(>< z*z-XHdEOB8vDZP7Cw7Qj2b(p*w+s4Wufwk1pdab=Yx?s3LeOXZZ`ZC5}Cg`*N*JY^#K5U`XK%W?2 z?Zz6bL;7NJJ5pQN?~~-4s6UG_dFZ|3r<2?1JuJi`Q(L9ESNYb-?U?s^5GQg?d3l)J zXbzXd{t+wJPYuEy0Qh}}b>ffkd)4H`{La-d+~jFE5h|Ze!2XHX2iV=Eo#dn^2g+v( z!{oDLOdj{}fp#~g=E#cr&k*;n#aZA4Q~Q||lczBbdHb0>XCh6Wv!Qr@v!A`8o2lLX zoz(9B2EUJ__WEIc>@6u+pXck)?%{pB>(I^bv$f=A&?H&HP==(Jd&};(k+&2w{uyh@ zuy2|_n=o!%i?Jg5=nDOKaVzvhrwV{sqM|bK>ws0(rYlr#3Y9s?=5&Q$G2zc`_61n;~Ly82K~8LWP2K* z!v^TE0dh7h$;NsB%6|^!gpL{l$bAuVzk_k^35;{!<720}kh_Y@eGy~a-a5H+NbY@# z+^(cMMt?tfcR=1nT;4*Ryz>If+mZqu@-=!RU5~tmu6sddqC@sM0>8*#Om5$bJpw6s z9)(|b*elkvT{tsiBbz;km_h^Q;f)7_<*3nv^>1B-@kK^ z-1t^+``O{q_J+)E_HSSBZf|(2v%P+<$$9PwY;ugr*|ZO7q{(@H8`-GjynL{$z42Ap zA4XyIrTDFYO}&M0bMgF^rof`sow?eb~dmiT@Mt!^D0r?02gJ zu2CP52-)iAW_xIiMs2MDzew1HwEY%eKZ^Qopz%8DxDm2+0-yXZRI(iOmt_`kD&O(R zLVB@G8;JhS*x!bbEVC6^l=CNESq=^K$pRZwWPzQLEVa1Lu<{=&SxWt7NhNuIuaO0F zCGOfOs1dWpCAWPzP4vOLUWse~-?pQvOh^p~Y8WRaXqmMx-=Y^WpP z8p-lDWFb4PpPua@Ur24G0d1%rZHV^s7`2%M=<{lACW5yaH|F0IHzl8)1p3}{M`$Bt zr^T?@hqw&?ZCfb>9aURlchZ2*k2rm5Bj2YDPyd1Bx>+liuMMy5uk>N*#0NSG)P~V^ zTw%rGzBXJm+GiuGHk<|fsQ#nMK63r-<7wbXvMS4pmORaxQU;t#Pelb8B z2Ck7Te}^ne8=m<6+VE)T^J;ClPcx%igfa8A+VEyB!++a`SA$NVHoTP6r@k|3K$zTs zex(t1c?LfHENtRi_|tQ!k0$uBdTJNY#bQ&$wpUG^x1BL{-IipE+NSt6>Sy5J&%m!< z%f~f<=hgIc@&eOOGGwCl6B_e0nj zFWX?0t#X5=Y!2ERUk6+xKm5OKyll@XWphl)PPy)~WnH6eae>O_8sJy9eJGoCx2kMr z|FWH>J7w2u%GP9Qi^km9xNI+pvX%0B`{~f@E}Q8ZWpn?V)f?>67bgr$@he*q=u549^YXH}%-WG|nZB)wEG{rRWy36)g#$@G-XrHkJJIzsBEJ|*&gO)`+e$lmu+X@vbn^({!w4qeC@3{*00_! z-2r>stSZ|HZJ$c@mWQ%g^EG9|T^1pjyM!B-&C|-;+jymHwkdxbcHL!r;2LGi4OF(x zxYt7$TRefXRX(9En}5A^N7?f4)07SM248Z*xZd6sW!u2p+f(71vR$nY7;=rWbw=6T znO2HFHDMos1AUFH3sTH&ZP<%tqkR%xX)b{C8m+&Jlegnc#$lY1cw=aW96K*V9vd1f zf7cEBe{YsDDsi@h)>LWEcoDj({w}z^x?4zlbpn2w_)W!cUP!wY=RydN`e1kI@L$)X z&d&@F@tmC);;|nLI@#D5;cVCt;%WRY-1*l7lH8aO;rt-SBsWftaQ+SZI2+eQIRB2l zl#P2LoQ)?UobL}Z%dIgXp0-6Ho{JkpJnlHGaYTlA8ba|N2ipDcJO=br@H{!h^KCAk z=Z1J%X5x7T@Rx*ms@H_HH_isnQphkY(_YQbij)t}^v1}_&9iaUyA)jx8HoqQrtGD? zE|;5oA9La^AL`5A#5dKPMLGXVb18}`&>UZLw*kfugLvJl$6>TT1GqM-6Ls7{?AM^W z(Z?O4Ku7G?06!bniP#=Z6}LWaGck`dgW&r*L;3j zl0U}UX#Ft2uqH`k4L9KKS8#qZ2IC*AW>Tnx)QD<%|0m>R+LSy?xEtKZv`rD=I;gbfc~ZzQ<2f586F` zH1?yjh%pro_i*7C%xm&Yw(M%2!sA;ay78*PP+dZwa$7S1m&Fu%!EzIfo&#>=#) z;2``ewW(#?UnW*}h&irHX>0r}3bm<3$V7Y4-sdqbb(`ifT}_)7`%N1^r@gMCP4}vJ z3;D&|wixJv513HOgzsw*LE4j%j^N<;Aht~a!_npoLvj#<9CCO>g*a~ zsI(8fXN3&-CG0b#zMA@M%1eDY_07~rQ(sMeH{FebK85_Qi~8TB0U0u#Y5Sx{x?J>; zltFVr@+%VU|cx@FqW^b1B@U=qy%9Z|?b{nr%8b~kv? zIt0avX&y|nHB1loRL~d!KJOfC<7{rc{O!Rwxv9hK!QXX|s|yhy{?zQLIWYi!ae(~Q ziN5mrC1K9<34P^@!)RO(+}_+T%;|15x4Uc2?al9pI^7@Q`7Jze!1F8S_RFuEJ?@=& zACC7G=Jrbun?2`d27Ath279W__`PZN)EzNne{t_)?wz2O8|G}t4aWE-*wdI2?D=*$ z_*@8eHpL)480tKG0rH$Pd%peB>^c1zXwM9F)*Li@8umfv_syQ#KLfte>}lM9Z?&DA zwM+1Ak=au{2IHr=;P$V8_cHFzanChly`+=#%xvJ!G<&eA%u_!z*V7I4c5Xk`XLfdS zx=Z`W=b>ZwWOMtu>7bX3-$b(~0{!*W?%1=7=SIM=?(E6K`BnGCT;&WY<%8}WSQD~A z|4nDH4>l@jsGNE~?s7u^W;rxKeie1lP#Y_|$C%q2mvnMAq(IK$W}JVOjt&RydOSA* zwivXNLA!o9qg{_YG_Is^;r8?)SYrZ>IMB#NS%!f|Km20M7*9z@BSFWF=SIMYP6Wvq z!(~K1vfJ$aliR0~EC*xc#(tfgHIe4_hEUKjt7O4*BVb-x%v=`a@yha5wA^5hk!xCm z+8Zx~IL~whc^WR@cP_{$3!WPR^U88Ahv^IX6nzxp&fHq$e?P?8@Fnto7Ua_ho*M!4 z%J3P>TaP?m8H!Wnh7-Ny>JLHVAY}LyG~UPW&ng-4+z6OghCg!|kS{)1>PC8PNC1tu zKw|^scoX@LsN}$NBVb-Rj&M1U2XZtC8rxe^5d%YgzXCclA{Bp`o3Fp|qRq4h4;#kf9rBnDOhto(9oY@r?UiyfU?dWoF-D*{pBr<&7|2$SbFHm-t_Lg;^xZh4ULSg^ zB3nIh(Kopyx{H9?4&}wTI&VMr3t=uw^Hw+Jjc&{vadt^L^IwnW2Ec@@?tZr_=l&^g zJ;ukkXXD;Qn(Ja7T93JAJ?5HjvCjg}H2)NL4%DaIs>~e~z8LG~)gUkC$mfS659X(C z%ul_1@!SBImv63$FUGC6KN%#`+`4HZ^325#b6785JU0O5iP;14 zQHT*gPWMLiRU3jm(a66Qb=HjMdcdg8wosjIVEOBjPpK=U?%FV= zuBulAx7R$3{5$d6fqbH_@Z12HSN0u~SX~kC$Z%FyrRZC?Aeb-(rX$GnA%5?x_~N+% zFfZTtReVdqm+I>6;P#e3BTp@UU#j@xxdAXQ-!CziqPjYPdx<{^Zg2Yx-(L-P)-OSx zMXI{Oa|2+4XY(Sa>t^I5UDwY=J9sP1dA2avL-TJpV3wj3Wz6Z`5$1gSESob^j6ix= znicj4?#BzA>`Z(DDcVIuwj?(owh&Q+6!C7tPzP$0XXz=#w3Hu@Rk1klhYCCBIYA8RJi<6*%#~ zRcN@pvfvzokmUvX?o`nz^{4SQ$$L~sV`Voljc4$^@=X;DvyKMzN3n*F3GdfZdNu0T zD+|^bge(u?yA`rhT_}2~rM<}7dLTOQ0T6{$FP(7u4*hw{l^a;k_X1BkfOdmE zJa4R&f!G$EiE$vlg}BHFCB8-H;|dQ=lsBL0t;D!4p&t%@nBoQ_@b?Uc%co%nmtpgs zZ5Yo_Mhv11e0St}lsF8&*XFA1q!jkHA;gnK^5On8!S5^BT|j<_ZCyg&?Oqp{--qBw z_ogZQC=O%5uRr)zzIYw{5Zk(h{{904en+T$Kj-`?4r9PiqVny&4t|JjUBVc_VZd)U zmG5VqAH`t|_|*Zga;J|U>JM?k0QHC1)+LN9b{X*7K;?U$^Fz$jh+h@(thVdmhuGF7 zj6v2L@S8{F+rjzuH0Jjjm2bO`AK43wYXvAHVq0#ETjmDfc?mX}16n29IL~gzJa>?6 zTd_|AF{Ps3-hGNT+NX&AM7alv#;9VwKjmsi z7>a4p{dT=7Iw8MUC#a!zK{0jQCoU0P#Hfo|jLSuRZWxbIQ{4LY?kE%R>)_+Xz7~qh z==X)kaQWyyKg|c`@HK;sOV$w zoSyy#j-SW&&D-h@s_>5``{92h=wlz8Qr=P(zN@Dn{%3+d_RUkh)fKAnkNL~!4CpedtWXYrU2Kj+w zD9eQZQkD@0vJ6$p;@*+#tbae-llXYpp2Qx~8!ur@tH_78#`i5>v#xs>%ju$6uUJ2% z7@;a|%i^yKoD;~a`&@3pG-5x^gWa(p@ms`!s< z_=E3APO>j{4#jaTe{hcD(>YK@o@@AnT0tLiex|PTcI@sCo2$Z|1^A})>B_echRVGF-?-hh%8hmK)6`Fhe2FpgUgYaa-}iR& z|2}7oybIqu;k$;Wlq^37c*6;tgZLDC{vd1P+t?5I7WM<84In;*z3sVJ&&N3LBf$5V zJ!hWD#ofWZ+o*li7n(iwZvuC`*>m;?@Dn;Y8!#Vp_rzTI^=!|Q@DE z*R*N%dQl6$>GuRgf{s`(B3nm2VvjZc-a*zt#Xp;g4_}uSeZ4Y&-_((gzh96{?FxR= zaT$5Bo@5hiUaeR!s=7r=qcx|ua2}axTQK*f@~%gDFXA0*P>xUVcMh<=d#`NR}|B=>!%U7gE4Uf4)&a%iSJm4I-3ibCSnaL zCD>Cl9Iy+a&U5{OJ*Q*v{-xP-J~Y@ds*u>P3*!!XE!@@dWHEgVTuG!Nv+3d-OEmY&Kx0<=R zEWU$y)M>0k)?yt{#CO^d8+MyRJg<>`;SM$>ZbNv$k*3AU&7jjV#_YKR->sRTHCaUq z{A#ggrl<9DvcY6NcP3iEk+ye}n?a`~&g>b7?`7$rHAY1X{A#gArl<8ZaHT{ky%l^r zf~4~o5qD}u+$qt_;!f1=6y2chy$IcSD`#su&qxXAs-{nTl+=l*EQlE+skzee=p}th1i@D8Th%TSokj;^O z34cdp*S}zFO=H12l*c<3M;q49C91i6sx^PFZGzt2>b(cA{g-9gt1SzwXN+Zk2Dm7L zdqpl{{%DhAm(&lQxYb#YezYF8)qsAq0reebij71cdaNGtv%JT{_qfLgdm8qbJ)eHn zag^$Z+GW949kV{K>p0qqy-77VYgdc2c6B-Y{*Q0b=9E3;{To}cpDPAu?fT)~FQfi2 z9QdsN4BQ_!eZ%~L`^)R^5AO@0%m1W5oD4bv`@^V-ir<<@eDwVx{7uzAJJRV+N75H! zzG=VJ^?7fhOzueBBaQnp?B+SjJus$G(k$GcA-8Xsj5{@M!`&LU+S}$$!d)A=xN|JW zUZ0S?tZ{hNQo0|7&qZuT`$zf&C_n9E(YFir*~CEOfl$y9`&pDeoBqxd`fL;6Kl5~? ze~5a$mMj;~u=-QC(f_0_>OeO!e&3P)ZF@%y#f<3=w(!OH`yYR} zvRiq_-z=#?-#&SD$L?eeY=oQx*bok*zDc>~3~~PnfX8t-m0f`&HlG1_A3Z)|?V|wi z>4Q7O-#tkNyo(-=nD}78Lw)pJ3O-`o0|7Vr;EXR~*0F$p_j^_z@;wSZV$eMRZ{cvS zoQNrR2K<~JA2H%ifYe&5-51;Yfk}Nj^{;3bn=%5e$ z7VwikI!y|F^k;6s-`C@#&p8eFAN6qbBcB8QHsG|Tq0|fgy%+2=`UG&N51+|{yJ1{_ zAK`HF<&1BJ{8zva>+x}~%6ovn;=^Zh;@*=#0KVUcZ&T>t4wGYm@6qGqK9b)6{!=|1 zcZ<9R_zoYO$%A`BehK(iADr>UogZa@mvXqbesO=ti-13_$H!e9y8wUKhtK51y&BH} zzDf_r9T{5yU*e;~?>fNm^T8d8esIQiCEzoC za8|B(i@X@{JA81)7iSz706v++y?k-z@W+5>>+x~^Zw}z&_4qi8Hv{l=JwDDQO#?hd zkB{>_w*j7{hvO`a74QTf9VS1{k&FX;fDg{(#~F+?z+-%HhoT3ZUl<8^cRd_?{}Ta^ z1Y8+&T}JyQd&YjaL4bz<{pU?|^LvQpE zF}Qar*4}RJB+tQpOJRtQU%;JQ&}CDzSxUB4;%|eRB{^s?<|X*MGrLb**$p30d-hFWrzdB9W$lU-`a75R|EPA+=HL| z4br&nrqm~6q*)|KE#Ai>Uk%a;NWVl%a(#ic0C$Kf{yRQh?)=S_-8&H@hzpid_xGZE z$RtPkFK=@e0tfR{M^0X%92PEh$2}5DYH)X9HE7#$zhL<zV)nd zE8lR3BmWkMZ{^9j0~9Nb3e%(hI@{oFGb$t6BIb^?4x^XGs?PF-dxhPx_0x ze$>*OsmPRqzm4U>-~H-1fxnMcioHzJ@%OPd^uT?4_&X~5R+J8C!#&iQi8O{R{gE<;w1iMOW}qDS+g7Dk4`?fLpQW>^!6nbXnoD=DO7das zTcZ0{Uww1DeVZvrwkMcQrlCE(`sxI`#biCa_1;OC=LS_AIANl{d6is^xJnKE9j(#{ zhYx>sW%o?f@lN1;^cH*;?gl*h9xK~vv>nS{3v|%4Vj=LJ1P-;EKjWF|Ve#OO-8-qw zx#p9tQzg&E*%6R666Nj;KW3KiMcJxwH{w>*Z41AzlkW4hoBOx@7XFKLOukL2+d0F% zb^AK*0(={HTGARE^bP*e$-P4M_5I~Puk0p!RqzK7^Wq-_zPqQ@8Pg@YhdZdp=(s-C zRib^-9Ww5IxJ%`HpYt`Z?CY!8dl_q=WJkM@wuX*0 zB+k5K-??eV0ki3($>dl~@;vvp$}Y3Qyt0&%k1{{drskvY9=5M9(OeIjWVZ>5eaDec z#$B+{_?wAw6}O>$@Q1%9y^uds2Mv52$C{DG3`iC0d#r~t$dL0nZd|yususQ*XP-Rn8?x;RFKd+C zJpJAB*2-orEbf=}QR zZSaB8jej>Za625;`eQmHRXPnlWz#88%oa`?YE{0lC}gS=eN zb2=dd82)z7zm4O6HIBi5lg`o@uAg~azKh)%{0|dYx{Slada(D!V_6!<<#>vh^N*a~ z(_Bsu=d*tl!;cux(t}){A9MXWlNh`#k)?aN{LA?FH+cSmJpU8CoI`sv{Ik6L5y9;J zr=0&fF6RM`Ulqc>OZ?w0ydGbOV(`~_zUjOk-{X9r=XUWCe=p#Pd==d_&W~2kC!Kz>*=q&{w{I4 z#r*pWZg16`Z&WHP{|>IN7r2~%-8S4uP=B#wLQW5lS%C?jk?0p{ya_OX&O(3Tvq-*jiZ4-Mp@r;XkJ$2^qZMO{-B$ZU)1^0>9z{N}3X)V3(+#ieYB3)Xm#rq@o_u{)q z7u0F-UgdTrzKe9J4lj%Q?@*3#Dlc$3`iI9W7?B*W>Gr^Qvw44q`M&b6d}?64PkDPW zbG(wf0^_~F+o{lJn*rWa+#b4cdSQ15rq`GI2T|UrnSt^8bct6eVV{TcFL732JRu)P z^CoL`dfv|E^CIwvl5e#2RvH?wM^WCJb$FM!Kh+}_xwZ6;4+~81a~+_ZatcdimBiXyto}*RLMWt;3th?NpEVK%F_$I#Op~Z?~~4G z>51~{k&o%{wi}dp#91x9=Y|C4Tc*Pk_NV6?{jHYXBm=!{(c%5VfbV4;UNmnXdcG^q zY5Ddt;M=6bi!i%`PQD-(u+5c?~D$wgZC$TdJF!orDx=K z-8wuYzgu)cOYd1e9?B5# z#TxjbDjnVn2L8pR!?PIpzf(HACk@J5t-~AKE3jVNI=sIO3yjyK!|QG|9@62Z8}OAp zT0I{!&`X#OFO84O_4O8|!|P;FA8sAq_L#usopg!GC-{`{cQJl3@=KFKhbUPDo{@iu z4%6Zp`G?nZct0~3e@qJ3()*Wze?6wdi{s-XQBIMLh|tp8XyDfm>F^{z&k=M)8XKvl zCvp1`dWxD8c>HmwZ@eI(No_iM_DF^&=*1h*GqRuH&RY4#8nnL$x@z$z^baiG@t#_| z16)2$A@K5^4(}-gJ%6CXd&$5KKGxx#GO&Zsb$H>O0+*vshj+I@dp@JXGa4s1>+p>9 zQWve&U#&qoKJKN(Ga9G7*IS2YFg`jOqs6;#U|_u*iq+yJ@p+ZLes}fJ;)(Hxm@mcO z7ufE5@cIbm1Ya<~6Z0iu_gVJ`ruX!az~#8BUh9XA{AR3f9N*c%|1Z$t8P)Gn9iEtn zNAU8N7|cgT8ML=)-)YNnAtP{kr(M?KB^cCOnGVmW9X0FljQW$!?OMJ@<6qYmEuK;T z@qwhZE2I8nuStvdzDy7n#VRoMF+1Hb%?!FYBr zr!VZgz+ikF-$N_E&0t(~NQWon6ZG4@r+gE1E*Hx&qyy#W3=>)^ir3m#S{LkFSn{=KMLGVKR0M^ z$1}9_o;4VM9?R6?8TDV4Sz0`!{_FTyEuK;Tb!?m#@3=ubKRRBEH_^avIwolGZZp7p z?Pe{W(KxXD7A>C9IIw)87VqUDf&D?bRf{**pdT*J*5UDZy~erVr94NA_ag&;Ri3NG zOE#DnmQT{+jWxo%Rf~6vQF(9E;{C=5Z?YE8Xq;c3r^T}x%;U;$*W$$`8PvxVE#6cf zhYIG3t||!J-UehcdIGO#eqg-kLK&VIe}okV#(R(3y}*k#z!UR^a4uh>0iMzPtbD4r zygRvkoSZi)pQgoIWH2r(ze9_6$Y9=Dey0}CP`>F}JVW{J(&8D)cefVLP`()qPbi># zruKdN0xca;|6=@s$FY z6z$5$PO4^U>(|ImKE6kbS7hLqtMAp~{mLNDb!N5}uQ{06i?BnHw$0JvwHx3C->=2{ zl|g%qnybaT!=Rt+|9}?nE$+WXz2!X^xIT==bBRCF(lZ**rTtioXEdI(&eP%{^QTBc)F4f{0)$e=Dw0K7KTeVz^ zXQY>^6L) zNH4C3wRlE)ajn81J^EuN8Hs<&wIjPz2sRf}h&7x%MTJR`lhw`uW=^y1#G#WT{2`zKmFBfYqv)8ZNF z#l1s|XQY=iHZ7i!UYcYro{?Ufp4Z|T>7{9>7SBj8O}n&sMtW)bsTR*jFHJww;u-0s zdAAnNNH1+KXz`5n()M#Lo{?VKUew|l>7{Lt7SBj8ZF{wNMtW)cg%;09FPC4^;u+~h zD%0W_=|$SF#WT{2bU=$|q!;O+7SBj8(jhILkzS;iwRlE)3I3%P&qyy}uP{6@E|sio zJQ~H*M4mcQ#W+>WPgojX$>BU*`{&8Zl;!i=EJ`|UGD{t|GQ51AN_-#g`u7;Tgr`+c zv-d+hE#dUm^R$e^%SF0{;aBneFYxy=o{p+w{9U&)c}sYjcar7%U=jOX&f%LmA4Pcj zZxPG4@-~KFIghP&*%B$dHyC2ujKe=IQ}vIef^JF zc`LcRMf`mMPgn9ZpQkono-&?F|6=m);_oH*F}-AQ`o}okySRR~a6Bvjp2y+qxt!bi z_d`5g$>CHdH~!twzzq%D(7+81+|a-c4cySc4GrATzzq#tT?0{Ucz<=I8((i|;D!cn zXy8B4fJv7}+#@1V{dXPujo1In8W8p4jT3TtMEzY$y!cmGoUQtoEG;Q#sgJW`m;JwdPl)yV;s3wFH(vK|lvT z`LqDASiqK`Jr^Mrc&qUI{M#KJJKz8W3?Q#BkgW7}?SBpYo|kK5{{Mp^{L}@0o}J1< zHXP5-p`GF9npX0CetNiQlj3`4zW?czyx@mY-v9=4mHg@j`U(H(qaO;D!cn zXyAqhZfM|!25xBJh6ZkE;D!cnXyAqhZfM|!25xBJ|1}MC;{tW?ho1agqllB-%kM=I z@OXaiTHLGgv?X4lAnwHw=R3vuO>sU`oWB(3E5-RqaXwONUmqFm~jqky-JHMQhit zTr(~urGQZwxoE|TMN1d1S@F>7!i6IXS1e0eyJE%C!lHuW#j}{LuwtMyD^#EJ#nym_K&xf~+xF>0`$hW;~b_pXilk+O&J` zV0_oCTb?9MD_*{2;p%bm)22-l(xl9PsCemu`773@tX;N}es|nGW5%inub-PVIX8D) zeB$Ig?@5YJ8##95`uNeKQqxDJjvf=AxN`Nv)eDy{gl3W?$*VEQKW5aJbU%&F52Ucn z|6E~NSy=_?Sqm3tEy#FqeriVMsD-JSqq7R;FC0@iCNpbP#^|)M3m%+b7+7JMio&Fk zYl_yaUA?wozBF?AinR+zib^Y9uyFZW4qLSRp^=IfZz^6OjaD}+~azzfQo??3(eOB_C_^M{$^7344;&>!r}syylQ zj|P-tr7)2a zu3F?5=L`~g{%i4n8hMrSb0%3_{w$u~mC8TxMDP{Rop?~^4_nR3AGVt5uTmI>L4LAt zb$+RY>#v06pEE#kVrjhgReA~UB$Ft=RL1hp`31|rInkocp;O2~d?o&)v{;hJ|22J7 a{S&Y1)XQ6icgsG;|1}ki9z{L@{{H}uTN!8o literal 0 HcmV?d00001 diff --git a/cruel/dtb.G970F b/cruel/dtb.G970F new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G970F @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G970N b/cruel/dtb.G970N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G970N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G973F b/cruel/dtb.G973F new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G973F @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G973N b/cruel/dtb.G973N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G973N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G975F b/cruel/dtb.G975F new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G975F @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G975N b/cruel/dtb.G975N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G975N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G977B b/cruel/dtb.G977B new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G977B @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G977N b/cruel/dtb.G977N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G977N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N970F b/cruel/dtb.N970F new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N970F @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N971N b/cruel/dtb.N971N new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N971N @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N975F b/cruel/dtb.N975F new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N975F @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N976B b/cruel/dtb.N976B new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N976B @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N976N b/cruel/dtb.N976N new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N976N @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtbo.G970F b/cruel/dtbo.G970F new file mode 100644 index 000000000000..8e3447ff2f8d --- /dev/null +++ b/cruel/dtbo.G970F @@ -0,0 +1,27 @@ +exynos9820-beyond0lte_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond0lte_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond0lte_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond0lte_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x15000000 + +exynos9820-beyond0lte_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x17000000 + +exynos9820-beyond0lte_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0x18000000 + +exynos9820-beyond0lte_eur_open_25.dtbo + custom0 = 0x19000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G970N b/cruel/dtbo.G970N new file mode 100644 index 000000000000..6852531ca57c --- /dev/null +++ b/cruel/dtbo.G970N @@ -0,0 +1,19 @@ +exynos9820-beyond0lte_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond0lte_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond0lte_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond0lte_kor_20.dtbo + custom0 = 0x14000000 + custom1 = 0x18000000 + +exynos9820-beyond0lte_kor_25.dtbo + custom0 = 0x19000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G973F b/cruel/dtbo.G973F new file mode 100644 index 000000000000..3c36e23cd44e --- /dev/null +++ b/cruel/dtbo.G973F @@ -0,0 +1,35 @@ +exynos9820-beyond1lte_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond1lte_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond1lte_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond1lte_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-beyond1lte_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-beyond1lte_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-beyond1lte_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-beyond1lte_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0x19000000 + +exynos9820-beyond1lte_eur_open_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G973N b/cruel/dtbo.G973N new file mode 100644 index 000000000000..536816bdb5de --- /dev/null +++ b/cruel/dtbo.G973N @@ -0,0 +1,23 @@ +exynos9820-beyond1lte_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond1lte_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond1lte_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond1lte_kor_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-beyond1lte_kor_21.dtbo + custom0 = 0x15000000 + custom1 = 0x19000000 + +exynos9820-beyond1lte_kor_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G975F b/cruel/dtbo.G975F new file mode 100644 index 000000000000..315073f30479 --- /dev/null +++ b/cruel/dtbo.G975F @@ -0,0 +1,39 @@ +exynos9820-beyond2lte_eur_open_04.dtbo + custom0 = 0x4000000 + custom1 = 0xf000000 + +exynos9820-beyond2lte_eur_open_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-beyond2lte_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond2lte_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond2lte_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond2lte_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x16000000 + +exynos9820-beyond2lte_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-beyond2lte_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0x18000000 + +exynos9820-beyond2lte_eur_open_25.dtbo + custom0 = 0x19000000 + custom1 = 0x19000000 + +exynos9820-beyond2lte_eur_open_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G975N b/cruel/dtbo.G975N new file mode 100644 index 000000000000..049423bd19b8 --- /dev/null +++ b/cruel/dtbo.G975N @@ -0,0 +1,27 @@ +exynos9820-beyond2lte_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond2lte_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond2lte_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond2lte_kor_20.dtbo + custom0 = 0x14000000 + custom1 = 0x17000000 + +exynos9820-beyond2lte_kor_24.dtbo + custom0 = 0x18000000 + custom1 = 0x18000000 + +exynos9820-beyond2lte_kor_25.dtbo + custom0 = 0x19000000 + custom1 = 0x19000000 + +exynos9820-beyond2lte_kor_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G977B b/cruel/dtbo.G977B new file mode 100644 index 000000000000..94076674c375 --- /dev/null +++ b/cruel/dtbo.G977B @@ -0,0 +1,33 @@ +exynos9820-beyondx_eur_open_00.dtbo + +exynos9820-beyondx_eur_open_01.dtbo + custom0 = 0x1000000 + custom1 = 0x1000000 + +exynos9820-beyondx_eur_open_02.dtbo + custom0 = 0x2000000 + custom1 = 0x2000000 + +exynos9820-beyondx_eur_open_03.dtbo + custom0 = 0x3000000 + custom1 = 0x3000000 + +exynos9820-beyondx_eur_open_04.dtbo + custom0 = 0x4000000 + custom1 = 0x4000000 + +exynos9820-beyondx_eur_open_05.dtbo + custom0 = 0x5000000 + custom1 = 0x5000000 + +exynos9820-beyondx_eur_open_06.dtbo + custom0 = 0x6000000 + custom1 = 0x6000000 + +exynos9820-beyondx_eur_open_07.dtbo + custom0 = 0x7000000 + custom1 = 0x7000000 + +exynos9820-beyondx_eur_open_08.dtbo + custom0 = 0x8000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G977N b/cruel/dtbo.G977N new file mode 100644 index 000000000000..32e9ea85994b --- /dev/null +++ b/cruel/dtbo.G977N @@ -0,0 +1,33 @@ +exynos9820-beyondx_kor_00.dtbo + +exynos9820-beyondx_kor_01.dtbo + custom0 = 0x1000000 + custom1 = 0x1000000 + +exynos9820-beyondx_kor_02.dtbo + custom0 = 0x2000000 + custom1 = 0x2000000 + +exynos9820-beyondx_kor_03.dtbo + custom0 = 0x3000000 + custom1 = 0x3000000 + +exynos9820-beyondx_kor_04.dtbo + custom0 = 0x4000000 + custom1 = 0x4000000 + +exynos9820-beyondx_kor_05.dtbo + custom0 = 0x5000000 + custom1 = 0x5000000 + +exynos9820-beyondx_kor_06.dtbo + custom0 = 0x6000000 + custom1 = 0x6000000 + +exynos9820-beyondx_kor_07.dtbo + custom0 = 0x7000000 + custom1 = 0x7000000 + +exynos9820-beyondx_kor_08.dtbo + custom0 = 0x8000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N970F b/cruel/dtbo.N970F new file mode 100644 index 000000000000..b4d61a7dd5e9 --- /dev/null +++ b/cruel/dtbo.N970F @@ -0,0 +1,19 @@ +exynos9820-d1_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d1_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x14000000 + +exynos9820-d1_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d1_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d1_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N971N b/cruel/dtbo.N971N new file mode 100644 index 000000000000..4bc1c136137d --- /dev/null +++ b/cruel/dtbo.N971N @@ -0,0 +1,19 @@ +exynos9820-d1x_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d1x_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x14000000 + +exynos9820-d1x_kor_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d1x_kor_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d1x_kor_23.dtbo + custom0 = 0x17000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N975F b/cruel/dtbo.N975F new file mode 100644 index 000000000000..97b6e49f1f53 --- /dev/null +++ b/cruel/dtbo.N975F @@ -0,0 +1,39 @@ +exynos9820-d2_eur_open_02.dtbo + custom0 = 0x2000000 + custom1 = 0xf000000 + +exynos9820-d2_eur_open_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-d2_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-d2_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d2_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-d2_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-d2_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d2_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d2_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-d2_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N976B b/cruel/dtbo.N976B new file mode 100644 index 000000000000..a720e48a2efd --- /dev/null +++ b/cruel/dtbo.N976B @@ -0,0 +1,39 @@ +exynos9820-d2x_eur_open_02.dtbo + custom0 = 0x2000000 + custom1 = 0xf000000 + +exynos9820-d2x_eur_open_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-d2x_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-d2x_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d2x_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-d2x_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-d2x_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d2x_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d2x_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-d2x_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N976N b/cruel/dtbo.N976N new file mode 100644 index 000000000000..273ac9a7c960 --- /dev/null +++ b/cruel/dtbo.N976N @@ -0,0 +1,35 @@ +exynos9820-d2x_kor_02.dtbo + custom0 = 0x2000000 + custom1 = 0xf000000 + +exynos9820-d2x_kor_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-d2x_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-d2x_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d2x_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x14000000 + +exynos9820-d2x_kor_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d2x_kor_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d2x_kor_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-d2x_kor_24.dtbo + custom0 = 0x18000000 + custom1 = 0xff000000 diff --git a/cruel/unxz b/cruel/unxz new file mode 100755 index 0000000000000000000000000000000000000000..b8ca34f889b33beb530bee5c54d0a3904a71842a GIT binary patch literal 627688 zcmb@v34D~*x%hwHnJn)lY*`5jFcTKd47i{PAu7!zU zBh0di18=Oi)@{)I>AJ+K=lrTs)?4W#iNBmHt@kt`NLRne_HFK?I={gC?IwAsH-8NX{(%|WGh zDCIrS>0WREd^PF&QjAv<)ZTI>*qHN)hRC7*(GP>M3;aQq@=Gs#bRUL7A1IJ>q(fzBh4jsRKHgL`Y z-ty4t+si^{lE2y~W0&^zHTONH@|E$r;5$j}I0;Ppn&4ydE=#GX_nv~jn}O{L?xXI^ za1p;<<5l`rV1UUAOWmqO-W7X;Gm3#v-fiMt5SVf`HN7=SrJV+r_uen+I{~ax?$oET zBIZPCeU2x&V}@r$$5SS6WQIR$ zY)#Ov#jTF2m1Cf1cKGc~)jUDqRl$zGJ+7LC#+`1}rmmV{{0Hx1uetXXz$=kis$c)7 zS^cMPPXSk{;A~jveYe{CU;ORl?*#mC_D&D@91b72pr00q($=kiQ^u(WG-Ir4tI7k` zoKH%P_RBP*fAxq^|2nsUgvy*!J0?_7q&j3gd%zcC>X0^_@?GxCKsRGQ=*|s)2!9-c zUoYLLn)L)|`mwgG!mX;;D>W)*b=^IFPfDXt^B%Bp6YMwwzea#fzc$#B(y8YA7w4t> zwUoeO`l#rS73zs<{x6p&8m%t+uPSI%iNWX`aMWcHr}-sbw`)s=5bw{k6(C;`W67X?i0trfs0j0C=hcPrn3~d+0~z&7wz` zH?!3aaIw#`v~EV}<7!#iY*j5Zo%q6ZS4Gn6Y4--QWCR)N^m}QT;y8 zro}s-earLE^T>zk5vYFW(;;|EFX6eS0=3uCrV!Q^vH|9W3!*4^MAb%et;p)u*6E z0qqNapA>kRd-ASZsp?ykm9ZFDfZK4M*TiDgi{bmo-`2diz^xytxK>qnRtBRVb*koy zPwv|<^Xw?sot0|GS#Z{|F1L^C=&z9#QRw}(WVH`@l-V$HOrvM*fU>EF0#za9IONOice#Of)lr@e^l+r1Rr_ulQ*1a<74dHV6=X9 zNncHWto;Mmm?O1wRJBgo2A;+D(|tBp-LdZ0zRjzz=sOKB3jYg#zoeO>%yW-wTkch!r6F(<5MA+3mQmTOj52NgMv?Ia z=!g>`)$ALoqUunrJrO>L2%Q;&XO}9;i-Z3Gfq&)~!@poC{8yeI{x|3&UM8e6SI>SL z8$E!(T2*p}duI-8h2HAobLYU?i-ydBrcncP;8|zReaShn02&VTy;ha{k-i<;&0_4n zqAwo1Z^+nvm!E&^M;QA{@nhYhN;W(*iEPNN#1RjuiU%>A)8AE>KU z^mZEtg$(&UFw|dQ!JrPQJ01Cr{OMQq^;fC-wO3hke5_G^bc`v-TZh2on{&G~iVgWzJQE*wi+gZMuX=o_$N|j#EW- z;PyxGvcTqp@7=1QO4W@vVcP<1^%<(@pxDc!>Z`7T|D_$76Hib_WQo~#r?i<9o-h53 zu;GEm^6VO()i}?5x#qJuJj3s@;J(3nRxWj&XZ6;znLKM5t%@4x?|!>K(cdeitmj%la2(ig?(3N>%(aW(%wB|qFucBhlJ=qVxK|_6et|9tjxA5ND+0g=Y#$&v* z<{SKRj|IQXxl$W`vG3F^Dk}D1LwwmkT5?*-7RHyo$trtaeA%Oe?fMRjUwX{y-|UB$*f3E$f`ev{bvZ}ZM9 zmyl`fbFSY`H|OMkIQMlK=KUV$zAMxCE!QiUGZ}WfU9`*G65pXo1*cO!BmD9}`7Cp; zbZ~FJe_^tD-|oE6wEEl5z4*LafLnaY{FRdo8KdI^7iL>ymbSk(+PW@g{IPYeeG%|@ z_Ra)wl?cv~z+Eyp)bT$(A$_}M{42EZXu*ypBZD2MDJ$*!vCkG@tKIsEGWut^47Dp! z?8=VUuwg5hFLf!Zs3Om@=Tr)QBlWxcuV8>i=lIp6j!Y*01IBk9IW_|uJI~uKe`^7G)5KP z#Tb!+(~+=Mo`P6lZzoC^L5We=k zqeW{jQ0@N*+wmHFNg3-Cv{`d9*51E*F7&B@PII8wSE1Y0@c8Tyv^4p9^ZhDhj(tA( zPgKi-398!bQOlHzU&gpAQB`*xD2vvlS+MLGX~ELK?@(B}4rtNfNY!3q*|2T;t|UYC z2cy}*-ZIRp?;-XeW7(Z@{`&PPl%sxY3h)5OI^dB0JCUJ+3xB#*KazI-`YpgYhWfSO zQs%%`nVZa$^3Jaq1rA*KnsEZ3LTxXLj)qr#SytPNGpx1?_#Ha77RJ_ew3wBflRncVZ-<81qXMTmJ zW!%9`3$~Cw?mB*l!X|k40oxy_r{IfwUIqq%_X_l#_+i*qUY`o7dbN%BGr5jR+ujP* z;r%*)JJl*JBN&x<2Yy`Ci(Xk2P|d_)0>0_0NOZllyU=deH{;8ddbO-0UC58!F0~_)9*lOwJFUbjx`>ZTOitvd*b@hM zwmT^pZQ!>vB}fdn^njb^Qtnw`b7&KrphDPHQQtbX%}0E z+AwAGSW89|qipYDKC8~*#=Z+wbFYk>cR!+SVw5}TfEQfs)$?`3KW(%T`I#1yXL|1# zBQjn^o&Fh1bm%#HbVuR1)wq)@~7U{vTpjE}K1 zzifJJ{!|N{m~7F^hutjna`>b*4?7zDC~~;IL{*E<-JM-lBKl2qMLxJ~{XX$6;!mOP zt7t3Z>}gQV!h>1ZK;4v+SZ;oco7l6Lc&azN{Ku;K9b$memdM};zj|%3L}1t5s<{GORm^yv%Lqj6*GIUjKuB!g}eZlBc6I6RQ zI5BOYZ%;H{MkkJC-nXI)^!9)uco2K1mcKgefW_IWW5p(`A8dnZ>Yr3c4f49>;;$1< z&bmy>uH{eU)ndv?nNH#!C;ms3e4BP(07kL-RyA2RUyJCRcLT;|`Yoo8;7stvJg;6w z`JR!gqkwqm9_#^s?PNpnTacwUMzHNg|Mn0^iU9u%^rWthZY&~Z7eOWqKFa9pq)Q!% zU_0&nL9p*#cw3%dX+J0SS1omSyNGA@E|}qcSuG>3Q5_i>jB1*CL&_fip=!p5-JyuL z4&bSC0&_VhJOY?MfIkK2+2Hb*xzc`icp~kepx>Txz(@P7F3&*wL7o)=r`3LT_$09Y zfj)$fgw91{S?d^!Px!;KkVzBzI-Ws$y!(~R1$4_B|2ilGe2Jee1ANGUcv&bi*^z~g zEZB5~EemNs7>N}+bMF{@F8USQW4(Lt?*mpKHHiGaM}+W!Z1$wXPZ2z-eAz zs*DV-rJZ_wjI;3bL3nWl&tzn2Oo7mr;LLiDq<*J zQ|?7Z2cxonqiLHr%DRWFO$ndhD)cAjcyfg5-IENh(}@Q!T|6VzJ-ShNN#=#XTsvIt zQ_YhMcsY8)of=*My?fxXBg}o7m%d@DFqW=*L)edzY_%_kIJNM%_Z4J`(?%L?e0{Rn z#z}Vy>sG1ZYvbF2?}E}!uIil#t%R>5!q;0Yz8+{Ni*~+!ZaY^FX{QC7O#1f4_x<2x z>_AUg`o4tnheapgFNh2g-W1;bI(U>hvq;*5|MxKO1dal2aDH_I+rT{f$s}VYye+)H z2HA4X{2$Kz2iMF+Gj_ctz8-SwF}t4A_fhB|eF=_54mk5S0a#={*PV^+2%e1&ZE)cK6trn zz1Dn?@(P-Zo*Ya40(e7sSZt9xu7!t1PE~L%@R_o#o0uo-D6+QG1+BB`OZpXAou#rH zrN4dj7yPQzU$)ubZ|JX^wv;=&@gO>(lY8Br)rcQdat}C+fJ=RC(3GLzpjznRMb@@p zLkqu%?Dgu%f8yLV=<)m+YpYzB)>j=OUM;d3S~S%l<2z^YK0DZP_f{8kB|CgEvYfbk z;VI_a2lzqYy$PB(Ir4fJeTjZ!eWAA%{Z_`kg+EibH95LX=8fpKe`SqGbla)!n||=t8z+B#Y~J2K z99z05!urs>O{x7=kEPs)uIjI7EXYsUR58ob_icEn?qW-)-;$w9HlzRa{?-i@v-G~X z$*N=`u!?Vuo+y-le)oi_-F(ixqk;3@zI;wCn#eT>#OCjlttg>USMzLS^L4^ z#)Qj}#rP)?d~{nbg|hd~K0MNt6@WDE@W}w#%0O zSoPKLP2Iv^$r1WkicVjQK3^g*;ai`At{;KNKKF*)Jp7iPL}HETdeNP0MUGHM&vzM~ z{9q_doT!r6Q7OK1a7@|vuhcfbU%?ud4)1%kyd$gEHEjIo^7R|X)1O)I9elCgvYkR} z8K3xMTP|en37<@8UUxKBR0nTJU)>k0>bhSO7Z{$I?#&M_E4#$*`>wK*NMKa4;8525 zL}uQ!Q8m}yA+l6I;zPgRiGG(cn0@Zkj03=V09<;AL8QzdW~43~W_*n`wZ;6=cZbl& zlKGi|ZNn`Zc~7X4HNYDZ}@_8>I!W?*k1=e%-{{#cLwSc-iE z-&YF`CTKb~h`^E^o&kUO`jRE)*A(I2pFe{9g)Hdr4HO7|D36^KUGu1R&u@^0jxE|r zTZ4R-9ln5ex=th)Tl_J?v=g)A>PtHUpYIH7io|;SZgrX1i(+I){k03Oh?wbO?{0`AZy;{H8#PM;NN^;N)cLmP8lLHMtv2eQsn~fTe$D4s2ctrlQcXXS zw|4zTzn1q#54QLJrQT2+A}?j#zHO;L!3dp+eJ&2)wfoJp+ppyQBXou{zeP^0$G`Bb zTHLpE00!0jaN@%o^;O^QTL7+lMsLntgB&a>Q9 z6=|C{Mu7J++86vtJgCFE-dm1MCu{xYb8NL=IM>)`ZCs0;_W-i}AhN6jIy~>x!{%yr z%F>?Lg-#pfP+8Z|V7PNldnsH?dw1CF4cLXWHDDJ4PdwfH(5;Am9lB{<*z@4UgDpJw zgP3XO>HL0?op%(OBC^iWZ@xXT>YI3WH#&3mLzW(`u;Ci)KRf&#`mdqPA9h;)jI4i2 zotcAW+tiy=Or5&WF1yz*E4nv8dF0%V8hpEMxBiCkav^_7Ppjrvc_-JM{EA#H=TBr^ z>?un|)sSys^51{M1M|ywioBL`;*a@PWu*tStjk1qloOv7J{CNYIA`j8pV*A_pY`9W zMC7E5UF3fOW$NxwB@Q0CI%CzcjwbJ%XF;Bgu%9_~op&{_#Hy2dcd?DLGuRmt>o;*m ztpB}K$>&iLD{sU%J?G=@6|Gv)>3x2^joA1@F{VDw>*01F?8Sd!^|bG<~b6%aC5FAe`jdFF}HhP z-vXU@vg9D85cf|F=_**#|C-CFz*cW%ZsE%}skxmqPi5n?|9*rK`RfSeri%8A7)eZq z_=#3OQ(}^l>qZ(qKN@K$?jye+NtybYJtIb0_t%ZG?tf(8|K7eIk!HV7v)|kIzqjv4 zq}%V)?f3Tm@9p~$8TR`O^S!_mA@@ye0>-~g19$N;4Uxg0@NUIcxAEeGsyQ2+OU|6& z*~*pjhGot13GBX_4d`8bBUz^tdqDUth(FJoMN{)aWjsbd4HJj~`;4j_EhB)xx?K0I zt{y@)RF5Yye+n} z7yZi|sB$MZpa6AZ^JSD|SuCqx2^* zdZx|pC=lB%Ex2s0Ek88i0WWQ8#)XE!RchmL*x=l4Q@@#PTxR3Ufu$3h;aQ&;qR6_4C_wv3Ao^aYf zpO29PY?=00&Gossv2JCoE9tL`d8MiPbdjb1M_~)XTa*n54?Aso?6_P7JnW{;(RP~- z{)9Gyzei~EN!nbIOw1N|m1lOxW9T(}&khgcThBWgpJ@;Kv4`OY6PG(KFk=pDM_PR5 zjLCN_R(%P$z+MULU~ZhtrZQ7UC6qR4SM(!-Z*))i}I3(n-DtnmWChPu(9U?e2BWb zo#f*NHbAH2ZdX9)S(&&wcRKf7@KKq`!&!lTw`Pzpys9Ua91%D1(LA;4C-4|_m3+-2 zktHG@1ovW3#Mj+Ox!w2z5tk~GGA-b%lNc&7f>pcm6|yM5Zk;7JyrZy#i3fV8qAT#n zHoK5P*dvjQhK(zh)b&*i(yL8e5aCv1XW1fH>%-hzrlPJA6@wRQ+!nM z9qaKYWX{Pmp-s%KN)j}1LtGVJ9^m=ib}V`izPj-IR~d5-Jim+h$O&6!I6S`>ob80K zaU`bS#(1PHXYLGbr;v6;_DDNJ=gz;;j>sN?S!@r%e+{yf-(^02Ys-TjF6L<-V`;&c zgO*42edI}Uy%HO0oK2U*%t3peG^tS+ne*!T5y*bpUHH{ths^a9+E6)Gy^(gkKgQR4 z%BuIz)T_IKxjVcfMf2pQ7kE^9zDHfIdB$BX?R}HFLA(F{2Q0pLJ-!{^#b*DFw6l1^ zoRHwYXQB$#1u8<3DOyO{`1A*sO=^zkzpdCz7rImOK4e^fvD@7d-|nQTX1njvu7mF_ z@M=81cMn%P#7Bv5S9spKSB484P~`qv!8P%rhsdQCoGbh&flIFI1rG2ZE*x6trCV977vDR2l+2`me3 zSn}cFf3f>oNnhYk^mLP&bg8jX`oRv?C8t`}w<((j&NkWQ>*C9IUTpk`@&ov=VNm$& z@og*uKW@fp!nF*4QBzf|EJGS+GU=Y2ti}4NB zI|=Z)=$)bY?#y#@zAvS&GuMXBkrT3Z0FIubz6qnvFKf{g8_+Y4D^={^ex8l{&USk| z_Lysq$EshWtnjs#uhNCqzo5^qo>QpIfFVw*r3a0mlO}pmQ30 znGqfV{e|EASI=T><;dBx(5d9veU^-M?KAO-eIYqG>;(wRIEC&qXC)^`@@>k5A89)X z>=Ab`x}G?J*!a7c9})bB^}_4imvJw!?uG|_$cK|_E!pr|JY1%qml2K(SIy4)gp*71 zfW%_S^*4E4S@8eQdCJ9J)ApUcQzI9}g>H2-rUb^u{*~29_(+q-!aS z5n_~UTv%%Zu-3LiNs4c`iuWT=eBR@TOz`j#rG$RLIb zE}a+B= zUqTzD(@xiQl-R*Xpb4z^dlU(=8d{o&-b}@Eo-abDPq23_vPj~r z9@Y1#&~cbKE^mOmNuz<7y957;}9ciHDEK#l2_^)9t@edoqf5fn|}ws8(2H~CF`fZ;_qE}em{RA zBMyA!BIDF`egm1;oAaS&tau>PD4;E=zlFc8qId7P+{kaf+$b+BG*-8$=0f4q)#ROi zTD^bsb-`mriR(AEG9M_rS3Q?*lwajHbC)NAJMVMp=H9YC>gmr_m3aSMEvrc0n9E!> z*9s20p4cdLB-i;v@c36L$6Ow4FLOwHnI&Q;9x~gjHrs1WdVHY0yId|q+LZRXXzxR4 z^&VxN@u;sCnqv|ANq=*ACwR-eiTo9CBzQRQ_*)9i@n1>#|IPS!n(%({_B&lx{samJs_J9GTEe8KpGMdtX=Z2rf`FL`?RnDAQTFGzlRfL=rCe}|O& z663$kZ12mB{}#%gGk(V6jQ@9xr&ZQR*$*Xi7dv!WH*xR}h+~|+^Vi7J!9AklhdI7i z%$>2XK(Wqk>DIs=cqpH7N*e`hRrLRnXNY!cs(YU(*^*MF#kh_EVf6SsQ-6WB=3UuyVE@D`VH%_>{I#PeU^P1 zBzI;d*P`2WbgJjx>`lK(uQ=^zy+c=9s>D7hWo@K#t&3Ri`i-$zLTEw13RMvIj4jei zeu*CIHuoxIhi|*zgtzyfXjmX~?;`s%Pv7wZG zMfl^Bkf~;!vsRsFsZ(#4sr#Dh80)+rHOg%B3EpFC?5H62T%bCSNnEkPtD@^^OZ*|T ztpclTBW0yOr>s6j)&|Wod-K0L*=+9t%G6k8s`MLG#{-hXGn@J!N?uHMc>B@VlUo?8 z`Fy4I`~{v_?=u4jk)__=*pm&cnXfNpZ5^LP)-Bhl>y3skkN>qtF;W#BzFBJ+&>4fH|#axHP8z+BaFfa}+<3wB%q zK5|(PSVFsR^NiSDrnH&Qd|Jzx^8c7?2ye|xQO)LkL!Nmr`w&`*t$0{ZmwE+h!AE7j zwr*F|fp6xeFWIh&wlQxXrj2b|W*YU^cFuf(wN3bA?-bgr>`pemo&Tnp*S>%?*I;7( z%xg5&A@5u1d)^pV2WxtJm+qNqETx{vnI+V_CXrlt@D!L(Uo{+^wQbslT(JYM;hi^2 zEgP=aSB-#QM-nHyhBjW;9?6ycw?~jOA8Q+OALhCAQMpVvuAQd*^VrAXclAwwgJ(-? zbwk>DgfcxM+rR_)b{*fLOlnSj6+BTbeL(kWPmsM^z%Y;Zb9ry#c^Yvz@G*t{1GF=B zS)!S1caf23T)aKWnCEWX*vLHiIe!7_^|*=I-WX_1Wgf|x=Iv4Bc+m&8ir9<0fl1$z zBzpv+3$)xL!GBTJe)O%*oUX-AOJ>}qlFnQ(O@g}dlkgj$Jn;7FkYE6&^HXNivLQUnq2>t9IG{x z$<_F0RsEk2mGvzpHvNh=@`&K=g=f@s@Mt`e;c48qCm8*<2b@lB*tmI|>ivjys8?8* zD#O1nO;f!Uo1h1{K(EQT-6l0O!ZZaQyrHEELp7{w`d}{QTZ=< zje2lM9%JApXd^yiJic2V`||jHbO^pXjlakDRO8Fzd-snmd^hkb_^#kL9^bcnEqt#W zg6~fFvxaw_k9;=1yB_{~e1C63(1<0e?TUAT6FH}%2-#-Sz?74%BUDj5etaXoJbu0f zZqMOI;sJlhkEvfCKNZX^lOOpt`H?+^@%VZ6B5*)1P6hp z5$LbpUwUou*eB@5uO)8SsIzup;<%gmi>#AvIoA(5PTq=lOL~dKU)ui}`LWfqh4tj8 zavO;2$$V&~9<~AdUHwMRx=h2RJ#PCkIru8s;eVl?DTj9ZY<|xPKdp-$LrmY5cVzu! zTW)T*{Ke21Rjf~Dt|9k>V=Osb7pvYcc^uu)?HA0iy6M4=Ol0*U?vai2>!z!YoBh@t z>s%%0ClC))d1l_`O5Qamsrj2(FYjhO7c-c{Uo`%c_V^1K|6Pn@w>|!bq~_e+ zjDH7X7acBmXvQBsa8NTIa6P`UbF1SU=7evTdc+g&0cLcn_?vB=ThH|mKiHaCayfl` z!|cQI59*R0Gy9OXtu`}le={dsMLEgSk#^TyA3Qb#U-s|El6{U3YKhxpsDSou{w+eohJ}rNA40Oy17g0y(xCmc0|GW97P7#0Pkb6vcGV~N$`bS%|M8~`( z`@lTdG%1`XVd^P(cmc7DF7(6#c#>SgYSE$2`+Rgl2KuLtF=bygY`NqO-wlp^%q`)s zcfi|W@_S^S<$RcAw7WE;{(!BUmRNBV(Mw|2d<6YB+dAnm&z7Q-D%>hlu0O`+TYyfg zuT#}OM<-RHgEng;Or3P(c1s34j81wOorGOt>ZCR3q$B90&2c)Z;aiprsNk3J^y>VQ zOA?Msp6Yl@K2D*}Q|KdKvZaq?9_R3`<+j0j>|1xPKI&Ygsz;Oe>(mi_bjp^GT?145!sE9Ik9SS5d3L-_np@cGhY%ih*K zKkVqq3q}uL7>r&qTyLD~CZ9%22}76mUn8GxCC+ymzs6ZV?YV?~v2w5J^Gv&o*jT}| zV8!9&@$t)SPETf$jK zK4`XtybT|9>tyc^?QNmlRh%*P%rC}^|F-v+qf}7`^*$h9Xcv72f!WkM4GG3~R^yiu z!+(cz?c}^j4C=pF3n`;rxqgQ0Zv0WXewyoi`}q@GZ?T_0#+J+?(>JD^ z_|2E|UR{GsrEVAPO5KY;qmI0nIv4Q1rqZe-c#}H$pHWBNOPw*i@2s%uwAlS*enuU6 zFLj3T-Z#gpqripqll&QVpO>R@^&{ch#)toR~u1>Y}m|V#RG6iCYvbF!unq2a%6I|2gflm({mGMNN2h zRdhyZLz~6S1ErWhtlxAy;~_sr#&U+dMb-w%f5-}#axG($YcJPbjQIms&eB@=b2WRc zB$k0)P*_aACzo6~qk?`{bB@v)&SsJ|!`d@z_KVCeL!bQ&cxpJ4#et`lG3N;msOzn} za)!b;6ghzyc3-A4WydZbOi$&w}7MW$YF(+z7qd@Vi@vZAdWIk^Q{FSafG z8@PZqfnU5eH}uN`6`@S}Z2%r^*PQ+u*P>@1;7{c&9It$}JBUp*828Tz zAC+>_$Mx{nK%3|4I`4CtrcV8n*`}rIoHns#EPc8#f%yO98|x=Hc4J2PJ<8W3A7=p% zvV&NaH}IeM4Gpp`1ML^t&*eNRdB^@X^PQ|URe*c+Oq=hKVE=k(kPi)98u)*=_I`&3 z!*f(M^S1u9uYkj(&z1xunLQXS)KNR98h?i7 z5@VAXn&`(vtXcet@)2Z%mpTJ9B=+HkU)DoYX@_{3q5ZS$r3#izOv7KMy&hn!yp~v+ z=PPC`Li(6MJVJfrB4ZeB1(B&&^ZQ%il=cJKUD)OE>pU^+9kILbr;Uhh+lxIM!LC9^nzUH~kG~3S?s~wYjm!^OpIPR>uu}47 z$R$7y)cl|#wD3oBL$?9PH@}8$01w9F;k9(rPi$)j*7XaS&y;g~vYYV7960NTz`5Cm zvzk5~A57qE08WR8S{ZMMzvkQ|L!SvQ@yDK6GQyaEytix|**l&U7JQkw`=h{}9o_-l zk3pLiE+x5|CDMk_#9w=fQF+rP1N4xy%hY!-F{pA*URgCDS0XPJ-_Lr?g!IDPd{BXyZ<};*FW8{yumjt&vRdB zQ$VXvpWMG_e=Fsr&%@YV{`csQc~<%I6~t9HY!o;uKek}NZ)*A~Fq9`;YG}mvWIc0~ zGnPv)HRah%_~W;$to&5r-KpD<&F+jq6FRb+d0~#5Ty6G0G$Wte4*VUzXPa_9i?ux| z>v}HJPz@K^ydKZz+2L=2hYV!0&io(1*+mx{m7a?YaCEnpP5f zlz8?L)=1W9`jW1U^|=e|wIcLn^XgTWzT1gTd7Ic_z39}jKH~!LV_tt_oa`@bUS^m3 zij7C>-DJ+4l-y{$Y*&%Nokuuo0a)->{45{yU^d&PKucp_^YM?X0B z{4CBx)Lcd-I0#MrO6d71Gecd;YTK4|_iyWN)I(dy;XJ!eML&GZ6Z+Gpl+a(${|85E z+YXI(ZTqv=O}Uwb$}Ma2xBZOgTPQ1X^KV=uBTEF9{L?WL7KtrOF4&eR ze&#&`xnSlQJMsC}Yk@^_!6Y9m=5`I><_x$weY=}Hj)ai-9Vc&21W)AjtWG8$OEaFE zZrctg$bXq`#+2K1t*k`!6S^wk*T@0Kw?Q97{lnSoJDNQM`2MN3U68_lmx?Ur8L`_S zWz0Rz(Aqo)2cK5w9SzBH;&lCv;oqj^BS;hK=%g!eNKNq_JP<4qEGU%)0%I!YyqEMGrs(pD{XrqGyDr|!v^7@b}QFO z`o(A7oB#Gz*npYgH|ci=&#XM6(ePk)_$m$miFQ?s$CT4=!iQ@PUflaC`J3>-(dK`< z!jLwF4+h$cSnchky`Jc^Z!KQ((y`FW{m1G)owa{`&LQ&*RlR@qe(*TIRLfcF(N#6L zJGz3l>dxC%M);?+<;;btz+mErc`EavHs7L)%!fMY((O~#mw(QDc$sxmGdB`GZBACx z9|s5R|8}Ku9rq33RPCA5zYv>OSHF8}7r#O97)fK#PfpEvjq{?#4!(%>3H|V2-tthU zhj-?4y>@)Q{eCzx?$3Ka^<=EN0zSQ+KK*G%l_zcc@>F!|lH~gIMg8C0?=73L?3?J< ziNxB%_dHQmS@`1e#rK9b>Gy><36D1>vlc`-y?^Qc6)ub3lO9RUD<5{1QH#8gT&fRQ zC!Y?@1*Z)sVnwny&fzI@%wK}1M3)IqwV#)#vcltOKb~LOkafZz51>czxsD!yrvC_D zhvU0_9^M`yR{nYKzl|R$ctz%#`{~7x5_~y0Di03g=a4%w@K<=r#Lq|gx!_0E$TF!@ zKpX4D{{D|S{n!X?GPnFaxrWfbd838z4}Z5gcM>tA@{b1R*!#5C0KQxQ5-TdBy`k-h zK5^PP@R{vohj-J?mzg^~;F*~D(Np00c!HYl%%g%Y;HyR0^+WkebW}WFojNaHjW+qJ z1^9NHO)gHMAJc}n%MRjK&ZABYJVCjYXOfG%kXxeH{t5mNIoUuQBa=0hUEo>lsvC(z z>*n|kreDYT9ONWg`OC<|a1VL+1UbxHWXVUfa~jRE z*YO-5t?dxy%AcBPbWIdpFK2vYhhO9!vZrYU{+|nz*jsu+>lNAi z9COdB%6k{d9Msg2iTo<`imrCQCGsy{iJrOrS&;{#TQu%fPVIPIts0L$UgpliRxMD= z)}x!&>}Gul9;+Lr(!%%<9?rqufDaKsepU7_++XQQNu8^C=D9UbD)X~R#<1ezM58=s z=79W4Hpf#I5*&V#T;3H&>O4QbiT|RM zyZ~}lr~MRUtA{dv`ksi)T#SsZ^Lg@|eiL|S_I>s$<3jj(%?-uN8bsHBq|!t->%15F ztkZ{6$4xtCo#0GE_D<<@jjA6&*JBn%N¾Q9V68Y`#|JME~ zwChH87kE6WsnYf;?~NXOE|KXQknK)CleVPfnf**1#LF&pVgKrL@uFY4p{hW&FwPnC z*KmIIR^SiOcimMp(&p0lxv--%1lQjkZ1bncD$n=_t3u>qOMU!PQ$8%FzQDfprpK#< zcJpp(Up~>ZJGWxb()|Lj!0GS5ZGQ?eO=(l;C@`0QW3cVFrR^KmR)v0IwOs&g9?hLf z4#9k9oYJNV(}U!?P+s^_cyj1CCAZk*W#U;*uCC<6*y8tlDD>+QWr6YsY6j$+$4iM)!p>$c$^$@-1i z-*Ea%E8B_BwzrYL%KS;j(y~cLDfYCEO*vO<+;};*uk^DPc*K?$Ie3N~U$eePGZJdc zce-jT`rKQ}clv#1{CCwPa*ajK<&XUeGA<+hP09<*(oQ}+Ch&^A(;)Ugv^sUyfc-xJ zulR5hn_dgNroA&HciL>f4>?5p*T%K)BhKmM8asK`u~{zm0JxEt3CPVvYrG;!v#acW|D zC;;9H=LVyTIa_59XRFM`PG5|To^jvYO*!l#xa-rZ$HMqE51kMU&g^u|VV=9<x|YZ{=*Nz=x?KpWUQ4GI*sc^Nq(u z-Z}OcG3Gd%zA4_Om-!*`c7dUC;c&=G-eb=LGuwJiB_9E#KnpYHYKn zc>CvoWz)2sYgbDwP7l-on}c9*4YtiAIS!WHlM&7$ zj)HI5w3@SaSN%di$MzY)yFuG0E1bf;%vsa+xkluIWBW)9*Rg%F!Y3D~&@JqfSu=&T zNBT75@yH|5=XK;uMvyJSi+<=LGQyLnl|0XWV|nJ-N~TPN?@bv4-wu$H$GX+;$M2>0ezOzcVFmuBEUKpdzZU;9|+v(8;XDRx%i*)Irx8-F+2D#`vUww{2BQF4bL6?50AtDTJRl@e~tUB z&%ys@8~^cm=sv|h#4kY)Cq624iRX8r&6aidZu>`Q@dE2z@h}K2L>B!2L5p`*eD+*e z5J!vuIsN%_!T%YwsEwmVm}_S)oR1bS0?TK~tE)bT7Iyra^IyMW#;@lCYiyQ#-|5?R zWSR#)O@Uuiv3-Vx8lZ>B*M7HK)?1doHqzpAom^oLe<}Zd_0B10$=$mqcx+{27y+})|6ghO=&vxdIer=wb#C64FZ3w z13i0m1AnHDWeuW|wIq=hvKCQ}O=GPmS?gf`kNVht*ezM?ExU#>&0}npjBzeVa zli2Sw;EuhB0nTs;U&)z6x2{VtPW>RkX#UC!*rLsKDt&vx1sjM*L#S1CpgpcIo3j-BlqDP7!D;G_wioNCPlUoCyg8v8&_nF_aW807Tt9Z zZGS?{DrF*PA4~~$G%^PEJM3M6Zdqz17!Ut+g3*Zov;h8vr}z4YtNE421fvBS-NN(I ztJ#YUK6HFK>NP#QHpx8Kzj5(m^IU&3##v4-nC3lTURO;p&mm#0qjxRw*E;%-huO{E zgYscX#`VCx9k`_*&R1stCugwIj~kv6*bB(Z8=KXTyXG3T9G|9o%_Oxvh<>B3y|VUR ze(y!*xna`wqI)xMG@rF4m@ymcS&H>+cY^VIWY|yWN1i`O9}a%YyAt5Bv4N?;BJ>wJ zKh~Q(-Thd(KhYcPNWPG@LUPlj-awtPiN>Faf02L5IfE)Xj=F0Y2V-w*e>B0^YhCYc zUpT?|59fOM1mkt*`hf|?@3=l+jO^^4<(#Qv*>J7E>slL>wS%_&XOhfZJNd4O$W3`C zxhAsiC-W#Dys#G7w)XuWRz1LiVcN~HTTP9FgA&6Nq-u!d_p|XZ09PSn1S_e$d9PIKSfP9pFij^F0|WM zFQgcgoa^H$M!s|X(QsqDbA8PSBhR^hFwMvrxXv)Loalt2Sm~&m@ zGd#}q+Y^i==X&a7!|hz(d!eD6>xzKU58NiqZBvcY&UMSB#wq9eyM@Ma=lalequ04s zGmO7l*LxNGbHuq$7-mGA>uW|De{!yyM;Zs6>qBYA2hMd@y75P@#oj!?FY;k8@a$c2 zL5`vL9*V@rP58jwTUMPm1-|~%xn|67I@gSOk8{nKf6Mhid+FKSjWl-gzr-M3vC8dL zQ*(`%oa@Xyqr<)yeWZ@(8Tl`aHSUUg#vItIMzO!KhVKVHIL^3(cjx1oAGpY2BnBZe z*_B|eRe#ah1~tRT_vO4a_$#b)UYfvpoPF!gIR|r>4SuUL>8H@m{4S83ACVKrcMC^X zXw9dS@e}a7ynF|67xJQyZv^I&Q|3G){!v}PdBwP!x&$vei ze8gOKXOC_in;49~#JXZVvaltYbAFK@?@}g68wKddQ~0`(zuvpQera7F=K-7V_wqhM z+tP;Q%SrAud3c<~XSc_<0$m>VK*`=C^ocxovpy{FN&9jZ&_Tu^F#@UEkTJT^1sqmh zL>{sHG2s?h>484Jhazjq5?6_ua_RHW9_T{f^{@WyGEe@*&OKybRX4Ew;gp@!;PG>-d>@P6*|p|9i>{+0E_ZeW&qAm0ailXnBWq%2-Ke*M33rU&O8j2Qej&)T1u zc@sM%{vt6D#Xi3L`JFROej3$M1y3I(Mj~fLMRnRy52|hXzVSwR%>|}^FZo%Vm-1xY z&&vIr-J2@==lG^c_2R9w{n~K8EvacsKSahlanj@0zuGGN{RuP@xI2fy{j4*_-Ckpp zbA6l7c#3P0-=#jaOk{Tj>$?Ikwn@OhC&}0l_q`m^r*+H;eHZ7l`#H;zoWR8i!IC}9 zt zH=URNMy}vPCDhM^{&M!d==Y;3KGji*{9i)3huPOSkG+f=s4ICgQ`t)>`x#%D_Hgd7 z7ZUs_Q>9*CasTRFCO%wa#m}^_Gvlsv;;swguIYE6JpB$_Pl~&y59j$jfgRcmWB=rE z)~-fi2c%N(dF|m`_Q5S<|3ctRtueQe=L7JaiA}Gd-UgmYU7hkNz#uk~;6Y@C&b~>V zJ~^LcpsxbP>IquUJ#o*wkV_itYeL5XUMjy)p2Yg}56HE}R$}jCPIyUHaK@V{*^LiJ*X+Mm^E8I@z*q!u=RmOI4qHbXaFRb?oU1An#;<)n)vSspoq`_PYjrC|T1^ zVN7C^I?s0#2OlS8RqH@m;3zb6N$$xuad8{CsAqpFvZgKnZT7P<-UU4Cf^O!smt@~S zPWV=y-NK&ip!Ixj{vR(etoYvOa2~M}Q}$I~XgqQ<_T(6HdUW>P>Yn#D3LaY-%k(wb zn?7)59+}1U2d<1GI=>Q|jz4p*Z$ViontXMi$SAYDuNNR|$-%@A5m|RPZJW=>%2|au z;pN;*K9`(L_bYfV!v3$F{5m;Zaz-EL4VmZkop8GYIg8l8!5Iq9xe2?$+Zb&ge1ac++zahi%heB{0-+FI1FGH@ZKAy9rkBj zXvPpGddO3F3je}xLvsJTzw?-9+pc6Eir}@I_2hxR+v)p}!M=qy&w(ddtB=o}bG{Gm zQZZHG_4^?K;iwQN?d3

Kn`HHKP3yPcdON-z3%qY%G z4Hj1qt0-T^Y_s?ST@ZrTYdV%^Orq3ST@Nnn;E_2{AGRq0oeH_wTaKSPZgiP>4hyvJ@nf7wxifwBF@ zfwyM9=-kiWxqRE_U8_~f4lSiWm^WzKIg#u@%PSOcJE->ryO`cT2n0F z5cu3Phh~l1=lHp@W-0SqY{PDM0{S-5^n+(`KN?(G(HFCb{g)G?E@RF1x2E3|89Chp<@;)`wdYSrfbcFlMO{pQhv9UJ)mjrhFsEm`t@%pK(~12o@8sLE4d6@m$E`t^wAjz}TJ{Y$ zrAFNpW!&w)|HasIqoT2S*hJSi%y0yr;WxPSH@ugt-pn**Ou0hNWOf@55YrQU$+-Ye9N}EaE6|Z|aDEN* zRrcUV_`X(YzS_>7;_BCz)a{hDfF-e5;Spp-PjN6BnAce4*Mh4g-=g*_if_x!Dhb1{ zm9+`RN1PecLq5KLT=3Dks}}D(JbG4%|DROVlyNG3iI!5i;g%&kkL1lNnVUbiGQhJ+ zE!BGFJ7x1Izic2%GI)kTt|M#_XjUm z(a|UF-ybO_F3n!f4~uQSyG3$uRPPwhiFu#>ntazdI+t%7r%uL?A5s5kN`JC3<-XKS z*YwZbKVsN}Rok@GHy*xk#3p!n-z)frOSNH*Q{_y~{A*6@PX|v&O4q*C>TNmJ`ue_O z`swatHVA5(A6b_g$Pj(zE&o>{ia|(pY!l z*GC*JA0BtUAKbNOR1~|aJr#bE@9_0(#unpQS5V@V_2>U~EPcg)gRh%D4$g8J5@$Po ziHgR)2fr;H6@6+f`6iSNmZ+iv`mEr*v5GsjlGB&)O|W#nFEv7?=eS3|A>ZySwZBy~ z0b5P@%HgSlw~vY{_^PG=zPkMezAF|qlyW1Zyn!4orx6^yac0q|=xp{q$-Z?j`Jycs zLN_^Et^it5N7u%L^$S&dg#Di$`twgy`*d==W^hL8#K7P4@ges7bWKvl*v5N_ff%m> zdw%CsKI+3 zqFJlh6aNb97q7j3RrhOqt~zyjFe-9D=Dp{iiM{Mh_ps_ILq+cQ5HJvy13 zwnXf_`6^0)a7PN?15HU!X{_XV<#1(Gp1A)}k$)*#%Ja8jn}4+J>d?pF_+i!etB3pN z?B9lreKQtoe}P}&(WT>*v6MQ@!^Q>hd*znR?@Ml+oU7rq^C#wf3OrP)WtwID+nYD` z(EoZmH;yr;rJgg^Dd0rT1=h9HM&RB46J&SbAM8t~FRzyN z{ABP|!FRt-!~b_XIIzc%$hu@2IEaU7!$-q`yWcj)LV386nf ziw`)J^#RVHkhKSa(U%;kj*9xZcF~S!EZ5}cE2SB8N2}iqw?362d+@UKblXpbF(w86EDYBxMb|ZJD zMSH%N7VTY;7A-)YM%ScSvY;l_h2mF!~-rVwKy8 zGlL^LX9neZWc|#*V)ln6s%b6s>Bq*DIPu*4J-P5uU@moJT|ur3=BbWnf#vi(<{`2` zbp0t;W~1m<(Wfu`HdcKC`NsN3_*5x--;jM-7s{FA*mTImh7o0b1$@sq#F@OU=SxM$g$~k2 z0d2Gnw2@_g1J!CHD?FJtV(7dp(2qe`x1t4IUZ|AUW~TO95wmN{7!6E_#VpifFD1)K)w^J^XtP#=myq& z=G4|j+P;Br#ojsJ{8p^aw_@YzExcy(5;&6iEb$_d+uF3v@9%bKeKgh{1_t4WTlm$W zwa{~q^cP1@f;gEbZ}IIj=|jdOJS;RauhEghXAVyb|I6I@s@>1yAII98XkT!%o%5+2 zo>;*CnRuQMd*u8)ksHSox#1St5S}RExqdy=G4TdorsIn; z*NV*hZZjXRY3NydC*WZ@YfttzyicD_PM(|<(n?OA#2D}=&GldBd$)X>$Y{ndzl z6>obrI<^&dr`XK0CQypqIf1^D(CMSG=}g{2N7UgXNKB@Sn2gK4XyD8niOD<-{Ysv< zVlu?I+DiUl#$@8oTNxAnKj6=tEA`XK!xEpzS=W-ikU8c#WPfqzg@4;Xev0rgF^=aN z@h_0!dTpv<`bxwDJoyRSCm0I3F23K%zhO(^>-*$8eEi+P9!T$4wLCvr^7)zHz%2d* zaigYo;zrG`VmZ$t^Dp!tK^OEqIX}vJQd{M;0z>EAmPRuMATg?(uxjsfYXG@hT{gi-&$<%Q|snO`_{~lMjPixPycX!^vq-PqbGknKf0QH zz)tX2YvND#E!K?}INs#DY+`5O@V2#E@hme}PS#W;t~CtYNx2imvSb`GhJDD(2r&l- zFM^LUnctirbde29H|w&<_O=3WVB%488^rb6Jlg#)ieDq=;zTY?3Yqw39sW6V!7y+W z1P7br#wYFO*lm(4Q|*+WNPJ3k>_J=4mgCF*f7HEsd{p(h_TPz%-t7ZSiFVGtNg)i3mxfVE}@QD}*h-rf?RO8`Zw zRa@`vC4kF>pb|yMSa5#t=gb@!klN*S|M~qfuQTVI&-pCR=lML(^Le(nVXld1?*q#Zm|vMkH+5R$ znc5$%Ik>a77F6%I^l|F7o~u@w`7$huA2UAD0Bu`TjZbCt~M$Vk&SIxk~uS^8V;k$5`uPN179V z-V9xrcOnNo%6qvNU2|6hxeh4nhAvb=4=SMtdywO64udL^8=#1E_ zWZY|@4=!k7@FJysE4m(Qoy*#gwT=yZlbq>h<#ROsta&E9{PVlm%b-Wi1BXnxr+h%5 zm>l588`vv>$D)ssJ`}3C4Y`Y+EVz~S6YG_}B-VR?{MRP0ir3v1)9sr%xP)(wpf1t7 z7ouw~00vrI&tA-Pjq@TkbnU_eC1>NA99#t73gQho@wj_ts!8=P9nI>-kO?BU~a z@A)A=&t$K0z@*)|p~txY?DTzJXbJZpa4)#X`iUstx&qozz`7ek-v_&eCdPPBj{AMa zbAs4{i)c5DjPd!BGvl#fFa3}?l{{O|O1XHPfi<2ma*g1j%&V18GzKeauORfaRrh0O z>h4~D_S{Cvox9wu`|P=ux}{!Zl$BPVPmxokUOBTWOr6E-N4l3>e@bQD-h)0}a-qd? zq{Vp7Txjb{d@p&Tps|}`G&a}dHS?%L=&SWzc+T~a&;8d6ja?}`5`VRhHold$B=q)c z&{vsn>N(oI{%kyolC$*N<`~Y#BWn!Oe{47_S7BrN5xIF|Fp_wJg3xt*7n{%QP!{v| zH2KPyyZ9XEhtlJ7zRtvp+>n!d!HIGc2Zn?OaDPn7o9}Z%HtxH1{Kx3$dfHSr2@d4B zE1I%J*XxeWe}OswAK0_qpXmy^5GAtyQD6!TL^deAQkhpro&*h@_Az2Uy6!KWc@{k) z50C7r9P~lj(H6B7C>d7CqtL~-^nI~kfl|q-Syl`S;EFY)T)H7#|;y**3USxnC0nxonALk%DT?P(F&R4;;Tc6R*TyZxZN==xfywLmf z^QJ>ud~N22g6Mj_amabrJpGhs=Gu(ULo4^;yy?LMz@Qu2Cu>qECjMAx2=gI48b5uv z@aNB-QGyk5m^{GMqR#?@W8~TT%JHLjwBT*(0if##8NVf;jKJTyyF!^_;qnRL3*fQX zWUX~Bepg~6eV(>0`>3_Xi{o@#a_Cv#hz)j~l!b1e*kILnW?jeX%PxJ|tnch~eRh2o z&CUxw9&JSJ8Je#m(UpRDKc0h0-mZxNBsE~KaIq|iO z{~Y(Sc4O;3CzQv%&}&&kF$k)g!af^JMppwvoFumNh1A>%}EXOA?t6%LC!IzPlx~WdSrj}4A4=pn!l$^XelxsHkCnQ=Yv*lGLXd# ze3pasUv$|r=Xt7gkIZk&-&mW!m3dE)sT{~uF4m?sr)9!}6Xx{HTFno=6vvs^y8rTg zzF^jU{`r)-92S?yWZgeQ*`5aW>a2Y$_vOvxz6`$^75&6Yjdh@`JA3W+o4S$Q(p7wW zK3S_39%IsD&J6Eie__@GT;7InSZzpdBgwI;#q=O%JHm_4)x#v_{J)R(y3Wn{Pag&U zeSC{teDv?-+x#ANnCmm1Z}U&Y$jd%RzGW_tCg$0ca#o(ry;6>S(*I?iO=;80v1zug zKDFA)uPLxNH@{|y)%SRQ%^-cM00$il!+#itPb&4TJe^yX{1W1ODtZp#n+d)rE91T= zn-YCbim2O%UXZ=~N{K6tttE5~=qL_+7JaG%IR;<4jsxZz8yxx{p2_-}&H5Bs`ZIoS z^9e-VlqwvbW?N~L!g_o z{~hY?AHg}_$T`Q6i;%4%-vbWj+C#_U#-m_P=G575_-F!EOMy z4d{U8mU@C8mU+y6)YGTA4)kN}OP66Q+)dv&Cn_lRF|G1%+sH!HWsqM})>oD4>A3pa zoYjE6Y_00(^{Pt8ZOpr@J8aPrAO1Gf-_Zm9&cdexoRN6Yjyj$T?z{2P@v54$7=N2% zz^-Oew0$%FF6KJ<@nwMl*#1TT{UT>|S#~Blx9cQlOPn{iKaT$a!!jfsFZJEl%7VuWP`@)%8rjmJhsy!gOltMMH;cymy5F-><4|qb(s+w*) z@*4GsJ!#1xVhM9=sX{`l5UEo`4!`zaw;KfIuF8ujM z^dZ~S^xp6g^dV}h4$k%ZC{qsXgvVB3qcQkzep?Xx)+*;fK9=@sfp-};=S4DZ?t58F zO+0HMM{*(fvmIMBNnScOa9(dWya7Ls%^vh;Zeo5VW=H(4#jj{AYrshia4#@lrmBID z(eXr8r+M}icJa{dob!=GU*!K$dunL^NTqKWU8r=l z-X?M0F&oWiTz#_E;^XP2jdpyTS{ciKKZz|OVJz4a%spP4;4=8)qOBa(k_J42&puKu zG$5GVI$z>!9X+;=E_8r74Zf0Q)>jL;xvh1jsX2Sd+vXP=ak8wh6Dd<}BxnA%L7as_ zo!G7B6)|^*eu&MLIv30S2fbp&uarO$`jo&DGxu=zp%LXBO~i5t|1#gV#oo8@KAk@O zkaxn5jsxSn*6V>)?O}Xsn`;?o9Mr-~eXPAo-mxCGRFxO`8)lc6?jiO^FM`hFOQyrG z@Dpp-UFc59Q=^adMEr7{Abf!9B;L!l)YPrE>sRs~-?Co%q%AV-UK6o5aF%&R@KYUo z;?7tcm;)ZzLb-PsA9OlW`Pb+c>Cd_4e2;*?;M)1>@WQVomeEVxz+CWS*@Tq9hhIc9 zmZPWK=t%d=`f4T4us8`DE_M=N5-V4k5jo;%kUxFj-A0F|2c1THvh#o zU6#sufc%#|gurecK0aPxGM8}_LK7vQviMDUsYm2=A6MZo&0M{eO2@V(h5ZsI;X@zm zQ9Y4=2FA<-fb7Fu?THTDJ*MlyEn zS)n@7g@N}4^w|MiCEl=&@1Vh^pUiHFFI1iN!Y2wD54pUvck#}_|8wWead&xXkoXPH zdFC@1|EBP6we;NYT)Z>s#(CZ)o~tc5t$?rDLt|$lpc~sFHlNwplH2gV2{V=!Yyi#B zDsYT(#r#M95#N%v-%Q9-zLn{YtVEd zrC*w-sk(8Y5^O9`IvT-)TARAjHyoXmp>!OJ*V~p@Z%(XU=xTd5bu@E^L>F<|(yqiP zOPib(l&w(i-zDD|XPC%bm~BDZ3m;U1g(s7Ph1a7~y&gH1_)?`w-KhDHbEu>6q#DGZ zC{m9-zkx9s*h}l_8~C=RDzo#}hD@c}MLe#^f=VX+rO!>^QKi~>tBd#0o=nYQ(x7?5 z$8`P+O{ibQnJL&=>upO{S0cB%p;MzB$kfz<%(2DG|Kc=}s|DV%T$Wbsa;;4XN;~I1 z7n*;u3C}`e(9Zrw=1As8V6A{#A~#ArYb$NBmRDAEiajlrmKQ&%IxO@=0AZ;9EJxl$c5ep&zYr)~f=O4$P%NV4bg=0cf zrOm$%Ea(><=HEL1`HfNr?| znlhtb{ImoXj$u=S8uiL|x6w}xpGOb%`-TIfxG$v*UrOY!Eycov`9}PmWNphD>ts#0 z@&7$|UNover5;My3Wvu77Z?8TP-w)@1%W7u$nUw*&le+Oz!| z(qnd1M~1`i{%%rmS7>@r?2s}K26MJ6lpL&cR|Mxy#s^h=F7OZb^X_@x)wyl(IeT!e z$Wa4__DjxWiP5w6ZIc+26_Y2tb@DAra58H?8ya>3JUg(wy#F}YW8m0qU@q(9Bi2f- zt@Fz5{JPx03*I1pp`Fn2I_MPqRPS=f=$GJO7j(Xk^(H$kN%kUXSPFVyFitlwZ{+$zmi*qIU zGT6(2tLB-6vM*s@)$VNeM9^2Y7fTuBI*tD$`Jc-3$LZHb+CFSg3vHx7qF0c7=SzV@ z2t2-q_Y$KZaT&F;$Kai`32wKG|K8QW=Njr)k{{l3HGbKTBL^+xJE^-C`Hh(IZ1LR|nQH`lKk4V;6#7p4A^I1leaE}2>p_0(#JmDSMh9vIW9faz;{ANr0>1dQ_pw6pMFTc^`#ti?`M$UMQ%jyH@wY$1IWAgT z2J;Uur2lE2B+g6%X6#AjOfHehidic{w}fU@^53yiiAW!$E~!JlvFecePJa*1I7Uic z&fZr1VG8-q;UZ@VFm+@ok%419k%7=5k=q^6s5I3RNrP^^{`icog`BN=)V_Q*?MGf` z-ZP4o$X@oMs)^%57&4pJB`NXDnndo-xq>F?DAg zU--nw@R^UH(F1J*{R4+l7iITCU&s0G=uf{0-*QEnEOMfZaUgjz29_iRmqk;iT#YVn z8MGzUt3-(ZLbgivYgZ_dUhv=!Vr{&c>3)qdYSf<^=VMpXW;*iTb@WAKjgKf}sLNNU zUX6^w7<<80p<~YLmB=03?^T_9(rc7R1K%y5pagcolf>s9y~v)q)Vqv&)A?4vL=QTO zwV*W@>EOeD_y#9b_d0?k{B&O`pwx0-m4c-@ue(2)W3w%dpU#9yH> z*)z|^``EtnOdQ?`;tznm8Po3|FR;r|>2{^@%j8#7Nxp=pCtT%~?zTT9_j~2+O8j57 zc~=Bn6V~1ud?I-(`oG|@Ml~316#kVHYGJ-)eGa272e4{mUe*D(FtAUY7acflTdxEM z{hj%_H(`E&%~^iVJtI89Aa!(6_dz>(8ff2|cge{nYv#9tTXxCAef~Ux|AX8jIRw|* zQ+g|q%MPwkg6|GgB5hd`dzpf7=>T-^$suIL4$VtU1~`LWK9pOl23}@9x;WGR8>7j) zZ5z5LOdbG{m4={C{ZIPTY8&d`PQ1VPlT4efuJYOQ{SVrQ`U@v<#`tI@@?+-z8PAsv4nUc>G$8uCx@7X2H3l&D*MoLgvu5m2SD>>4S3Oi zX@5xxEJf#`V>dJ$y5GYXHO44B9C~y*x1#7;yZJ_FtCx8YKSv)|C)Z}K8rL?iq7&)j zD)LyEtH{~KT6b}n9YMd#wh{FYon-l53;;d#MMTPpoZ+xYhX!Y9y$??|ol)m|$lv0U}v|=JF!MR~Kc3#=@ZQA{O+kE70qw5{DqjP@Km9bp&v794` zbxZ7t)V~0mNYrlMcl>5Y@GZp?IeF&*;>lC6{b<4cwv^su*E!*bd5loII4OAb27f!UKH_nb?b$C+d}>moH@TX5B(yxk1%$?7S@j&UAD{N^bceGd~?>ff_1iB=-d#K&gDbr@I~Ju zK1t$(p{Tk3FtkurhxoNx6+guxeuq7mn8mza?;$190#1zJ98PJYmg_8@{g~W7-$EDo zBy%qLkL<*aZ--|QXEB3QjhrWrg`1UH|_i~jp%07{dL@^a)GKvu$4=&@|B~Cx zQ8YE!f}H4z?fo-%AtzR{mvSIuiS9x6?S~r|{=5(wwG9|}zlndV#Dboxkp2D*VvneN zTkYtgKN3T}6x`(fAc-rlBqmq(FviVSmHrCYxXy_wulnXyrRT=I zX*02{Lp{+N@;znCdzlXhcEK{S3$izJU>DqlPlVVY<(&&06MNEVc&P*0VF2S<#3>&f zvGyH#UJK2b1)PPiRI!E{@I|Swt1l7#N)PrziLVtLJ-DQKwTF3~DzK14U-%h&X z9K5u@7vt~3M}!s$9}&I%sRyc%dky$TCF|bH`oD^N5;Mpj8_S1m;-t!64I3!>vM=*# zbOK-E)4A|z=w%ygG$wofV|=<`njf3Hx>+%K<0fZtDJ}hnTGafq%AsTVnn2 zg2ejoh9BHb{jKNW0loN*n|zBgS$s=iEo;m71hj(rI)FSSKHMS;hRJhfJ@+ubckf6F zti#5KF0)t@h+=-t9xFB-um*uP&yhM)usy!o{`_po) z$XSW_`w~3v2k1wd*>e?VljCdvx;JRG_E7rFwk*!COgGPK-m;oGx|JAMZ*=|};X&kW zh_0>nl7mD$b^RMH&Dda*p)csFU8=UHOfBeLLLPGS{US9m__KwdpzEwr7^6~~3aoQN z9W#iBz>lB_{?a@gJ|(X89&hV;k`=7ZhQY^(YIYqdCA|IM()pa(P?0BJ3?Kr zsE?buKhfW~KO_(6GI<{V_ORMNxr*CiUH0>X5 z#4ba-F4~ZNH)}4?!=Cjp@{Y_w0rA7+WBS7bj0GNY^d)dNAD@fC$n*~!8WZ^OU0OhB z^jr7`bU`1)FT;s`Z&r)a7p9$NWF&t>eTl{%Ly7M(WKSz{y|uTH@p|d^KfAYZ+&*|s zaT@Zj86OtgOBE^wy#-CHS2vMgLGZLd<=N!SCn_O zfXnc@28s3Rno-^nh8~GMB!5g(_5HNh^%Lo1zCVfB6Zx<4-?1oppR!H~5O>}&0>1;R zOc}9jQis@2@<*fuB;TDKy{I>qhdGpA8msT~h52hVwZN||N)CL4@4tNSsJnlw3wx{e zPUW3aG$1f0UcdRCJj>zX`D^66ixS$BcBZn&mUZche*X=DPY?Rrm*J_kjG>+D8|d+T zz@eFIt+h9zKSE1oALKzF)JD0OT%Q$sF^Qc0Ut|UM{|ucC^*Si~_Q9lpoD*Ki+8B&!Idhrc zprE7PzL>My8}rwgIxf}(vd(Ee+4e_~%NHw=w}&SMe7y6*JG0ET)ldd)%X@hzWq!eT zQpT6?y{6~;wZ4bmAg{!{DE$#Aw)|R*DCk6hz zREhklK<$t|?SGQB08U_Um%YH|U6-mI%wNr)7IU5kJ`7oL*i4xgD-pxd& z5cr5}AvhrOZl}*~B^gtM&7A>w*iQrTJxqV1T z>~;Is4+!oDcAYkDW2ez{rO=^C?^`j3xi9R%9Pr`3UA%DFZ^1$Wj)eaf*>N<{Wt-Bqk3=;-8W$`r|SBDonpyH4;kJ9HB<6ZcOp?}*KD zcIZjUowyDgV+yvsE0CpFixN9*oteXVxsm*Yo+Q=%nNH_nzV9 zoGp@%|G;2mhob1#?BAVhdYJd- z<(~crpA@}8PG}N)*1xj1cc?@6=+tN6TPSv{%O@;dE%{x-3-vX{oIN0Lm-s1_b7+RC zIsRw9sjg~=*gfnJ6GU0WmYaeTWx*fl`8!8cs( zkg**nPg5i1Bn~fPjwvVfOW}o-H4>f)tYzKE_?64p>tBUjLBD+==z(o;ujsvd9z#b} z$e9V)Jx@_SOzs~au&4uG^r^vy|E$z|RD9=u27bxfLKe|BQV+aV-i@Wr`>FGCWNr^< zpuHz$xktxaoyM53K}g<Je9VJq50V8vhPKn@gXN6V>BGV zx5RqimalBdabP2WF1T46qnR&k>Akm3@UIs6O-qi=lfoRa&qsfj>n~xhq`biGX6EW9 z^k`9=qc<1(*=%4pk@1QDT^JaOj1fjJAaL!$#+j`;oYF7p=ejsN9O$l&Av0AXKUDIq zz#tE}2z>r4VO)O(cN>AR$gWb~!B3(!;v3q6AGr0+=7jREQr?==#eLD56_geF_XFfK z`qd}A{J(_+2_ObHr+x!<9d>#K~ZG4CO?mYG(+~36i za2|WW6lJ2>R=u`H$_U>m>`*&q^1fE`b~2V8*68QF4_~aUal~o8)PWq+k#)N*Son}V zSU55%n5U)$lts3U$^}hVl1ndg+bm_3w7q_(y3a%3{`ehb6*B)O(QQjR6-On8wII|7 zkHXgAUKcMXdaqrSMb{25%MO)DzXy6E-vFmg_yK?6k52=83vSxss44gP!14QK4YEFb zGt`a|%(pc@7kzLrHp#~%xHkjb5E>$FM2R60+A9A&Jij$ZnI!nN2wL+wIQ+|tm5J!~ z=Cz{V^|21S$mwn3Hv340XTqE0YSG&6z4#`h-;>;xDs;kkX?e%7Q9{c-kv!JD>?7)a z*-~AZy52lPx^_o-hnyjO_`#+@#YN5`PH%JdVa{#V9&!#5xCBQ`34Wbiyi%o&~>3- z$Q@_V_Bv?0l+{MQyhek*7sCU-0B2{xqov>7=>4SM?eV&VK7I%s55(d3S)=R=pf}LL zHsmj%XhoMzbGQ9IWs1Np%sFJz&ibh~6OWJ4 z7Wwe!9fziv?VXIaOIubOcJ@z_8%_E%nb?BSjKShzGES?_w%?Tvvm1zB+V^v0rwZzvY}qG7c1g5P3^nteMC0}e;n!lLD90a0+Wd;P1i$?bY!SuS zDDFq@^L+=|f&GC-?pw=dp&<{UFJ|4v>=kt%!HbdU>%eXKM)nC}x4?#aRBRS5$tQ~4 zqT!`j4%{)^6YpbG-O{ji?k}g7){z5uG|%uoi9P%Em!DZ>5&9 zyc71?7La1?qyW+~sq-w^{6vz2mfa>jU| zk82})UQ-7Qt-6~%Yob1UDBoMU?qTd{{}COq$RN^o8s8??U&Q+Tj6Gi4@~is0ME5ZR zdjWXZh72d=ML#TK5&ro{p2=85zO(uo+b^l16YQz9I8SV3&y%QYy&D|Oq3(pe@g~b& z+cCluCa)ridmfj2c)n(GIeE+3!eO8N2<_|qeXVN|m z8QR&Jv`_3$QiqrOI^N5@)Kv#flKK+$YB8S@IfG;>dcX?wfaT}`f7AAzhsnE{;D0K8 z)uzEm>A&e;MZb%QQIh>uF8z9nHPdYNE!VHpH~G&V<}_WM0$p_jORLNb`qE5a&|d`J zg060h(^cUCW}PLm`b1Vy>h9@3f}HZD^5{ht(avSakhg&&2boLRS0|1|d__Hq_@JK| zpWv80{~mKAXJd!)@i}?tKy;rOj6D+=#Ll^veQZusav*!yfI#jrTOhB~78puS*CCxs z;C{wSzF1<_;^%6AY7Y)E@jWLrP-y=kY=^osuOFFPS(hC69l8%6`u%+N6Lq|Ers2Q8 z`NrU|a%_gsWA{b{xrw=Fu8KD+k;XcQwuce-;jMH4JwK>#t>hWm7hG6Km0Jv+=X@OWGK%RwAwnGq!WmZ0w8O z>`i4q{Jtlq2a)idbnh573mq}2R7ZA#ts|V~^L4?`Wnc(3t7z z3omIdxMa;pF^3iCTqTU zj-O!7ee_ex-xu$fhRt8d~J1`Lz&{mPe5ZH((CRlX+E*=3E9Wz6Z5(Yew}Db zC)yhXU~&vkMpK8Eb;3R)@X!vd+8h12%TslQ(vcYPvE!UWq(-6MMm_@;o=xi%*OX-PnEQ^h10Q@I|Nt=jyLz z-sm5(VSb%_>y6m-7sDeTq^#A?y$SuS1E1@VVH|OsKJwt1wX4N(`s4`c1ACfU_AN56 zzn1T1e(=@o{)^y7Y%TwVZ+nrU`}5*(0?x0~KWTf|$>^kB`Y7wdt+ZBu$XwOYPoeh? zV53pC4xIo1*kbO#h%xu^zHF+e?+D+OT()$zBSqQgKIMHwr#!KYkx*T6iHuw7J%CS) z`0mKqMb|5~t$NA?<7MP|D{W=ptgia-#fqTREA^YU9m=-xPTKFmW*lJ;UCCNTr(ni$ zOFyO0(hth-DJ=K&4Mj)QOn;p8XY}d*xJr8B{gJ#OF8MCrkG49O*;lI{Vfw4W3yrW5+)~E5Sls*;O9ldhqvcSda*Ai^W?0HIk%r8E$pD^b&f>Y8rxtIS|Tia->oOLLE zDLkJzoApyhu6>1X4$^KJageS2F9WY--x~b|XAgp>KJ?Jq_R#_GdtjnLMTeHc9GEzp2OS+8`X%G)IaCppXL7IEiQ@pqTX-)2 z$@A~m?8pr9bq=iY{o$N0*4Qlqdj~YU8k(D?M4%~of;Wz$nE#8E>w+&Z$AgxB5uGGB zFVALS$Iy9BKI|?)dxJIENTr0)D2)f~_YbY_-CTm4}V8s8W2mcp2 zw_5Og2KXff{6>nehK=@QKsLiiQwi<*0vk3sgl?ccI>>Zh;Tz<4aj4jcR2m=Co0K#6}S z=TU}apCsezVo%qG+_#Q>O`Az4WWN}*qd9=>TRiJxy*G31L}rqGq_tmcK)0wd29XoC z;hR$kzgLzh8x?!gm9kHK8QU3fZNG{A1T++S8mq(fe;yjM@pobqZiAMh1It!;_HF8H z!e*<*>~t%&B8~Wmyg-}f``kA;(1r|ZwcE-*jJzj7rHOV^(bWz!eP0J5xA%d2Zl|)c z+Hr|mT9>3Gu+|mR0HpgQiDB9WKSB4Z#ZsG z!WZ3+eaePyFXkKkz>AK+q}fW~$0LRZB(_AsU$+N%m8rSCi*xabL?5fs#$mhD?Z*dZ zE^Dp9k?OC-9_S>;%&bNBd0xKRF!t%{J&QNauXkSVj&s@fN zvQ;aTj(S(>kkP*nREtk(!_epAWoN%JH%-?$>!XTq^tJTCcrnA(mJ^GYb-}j{;w5?n z)@oMoA!P04%$G5m{S572eV3=>8vG8frCfc{tE=~CDt+W7avR8u*CH<#u^!=X6Pj2L zRhN~QR=h+jEBblr`rvtc{_R0eo4i|?gv>BH&viCMct+P`di zP};|i6sV+qgKJ&UuGO=Ols8R+o2P z{(H$&RLK7>z6~QkIX+&&K9}YajI$9O9*LuR6XUHFX1chS3S z!7%G<8?mF?SXW-;CfWB4qYYVCUgV&1WR_z7$8uIL$25%f~WL2x-{2O1ozOkur4517|U zUfM1n&-mcQ%3zVB%HCSam}Ksn!DWH{2z-UbPvF!82J5F19`9vMUB%j(fm|>>$a*tr zo|o}@s1JFw<30K(u{N>1gOaCQa#P0QYWmbRrt@WE$~gS|j?X#-!k21>2>`AJl;C*XMw@ zrL?yIJxJqld+E!ZG3~?e>iF@6Z@e=|$;1b)VG(5>a3af=p>GBs?`1Bqi!skvG0!uY z=jqJzH0F6K^IU;1Tx_02eidGWp4`3Ifj+eUo~?xn_FQsEwGHr;7N%V|U)Ee6^~oGJ z54vQE^#6ML!=5|RHlVY#EvMgn=1Z4*PpRyQh9k>NL(h$F;E6tPf4fuJXG5NOc-W-C zNHurQ0O*7xE7N}!w7MG``v<>r1PX1ry@e#>E>o4raB@wVamaitxd`^yGWI-g%kV#5 z_dv-DgFR2|E!w@h3m%-NmP~FOq4d?_2l_w?IvsULFLX7q9r=DC>kR$DJg04lpEVf~ zJwhXT1n6EsbP0{n_9={O2YLt5E3~95lN_fOzEPR3vFR<_0o9roljlM*R_N*RZFYw<%U)ItWsdE^y(#t7Byhosc{y-P^eGbpLmYADc z>DPYx)k}ZY(jUqHg^u4T-!(XuDTX=(d%2ykW&AqRlzp>^^%D5QpCzu)^ug@)ozDl; zc3ReZls3G~!$0eT*_@4BgC5Vt7+v)Lz*IFj+!6C{em_qM_S-W}n%0V)rjLE}`+1&V zCSyD>r#$Gj5B8VY2J|+eUy^VzpgIJtN|{_R1jT2AS7c zYD{0XmN*RQi}bJgP_zcUsM9zUoh0kp#(g0?P2O)HKWOtoaz$`|39&F9;wIRCu8i$J zA1D!@P~lhB{o=pG5CN6U#aQVTB zn9cjY*}Iy$DAu&fygTp@`6~OwYl-EF$96oVer5TciRIZBIv%opW%-vA$~*7_;GEsO zATg!*g)@G+3O>bf(+OS`gHM(GOgRm@=gjex2yQIpJW3gtif%F+ek?f91s%6=AO?>@ z_si5Ty!!towEK_1;qk=s{{S2oC6@mO;Ba?Bd4WSK{X94Jvsq*ta6~J0o;UUr{5k3> zniLq8qxpp|xz33L6}m3Zg-)DX)_0y~&F6X6cAjTl=Xn-B&$Ht5JUe$EB{8F6E0*-o z6l8cia)T8YpWn-Q-e>Kjx|s9r$U1Gzbxa0}#mmS(>P?=t#rILI=j@{f18954u?`Dq{h3*KCKL(vx%k!J)UpLP@v@82Fkzai5 z%c95@(Lt2w8~oBD(F&zQWaC!mC~ zl8`whuR#j-Ks9h1=R=5}uQzGx){~rndYpND;2>wn&rt%u;w;n|;PJ0G54BMl()$zU z;RJiM+Vx7{U3=1=-)(gUuA(3LoUJ;P^HSe~Uw+2#i|Bz*zR&Z`{N$X*9^yL7E?_?} zLTI|f+^4;AU;Ta7J}rvAwgH(|;wT)-;?=!6Fo33ei`ZkKADzqo<#AQtbM?qqS3f^Y zX`f}!>peJ3sW}Q?SgsWKSJ(>tN9-A)qjlde$t>_ZF=6TIq72^ zeMA=7BD~HwG-ktc)3++svBx=b&+6SnVtupmPI6%`qi>JXx5M`I&cQQ^pd%AllFFaS|yhNBUI~Fx{ zh1b|q{iCs=ZltfT0Lnb7|of(4G-n7sfXS)es{p;;el_NXAIZP zMsLlTHR2~5M!u~B4u$H4z0L_gSzU;1;x1C4Pb&)&`#R$NsHr2=Skn&jhlwsv%Te~p z9@vXK+ib40w2%dp#BZk}k6`0=+Zm5sWj)?j!5Kuh!Txgl;5`ik#PvF5E zIBfsSyCc+9Wlu+@NJpk;pG?`c)U}7YO#EgJ+NtYSzBSLmq>T9b-OY6`*BiM?-i^d@ zlK*B^7&ysTWqm|#1-T?MY!5o6TL+T6g}+`xL&;Rj{leMO+k#QS3K zUe53>R?tr-5lh4M5dAAw1pn>5yO4Q=hFN)gto=JVf8mp{z5KZ{i1@dh&S&xIU{jYX z^hNlv_`zGgBbF{VEVKofwb5=&2KmS0Ec@|G)#5Tp6d7cHLOzSN3zd12)1r?4OAOaH zfvv=@-9&DS^TqtwHpOHK={xkf{aVT^JbRxu!6?hEAb|ulYd8I zP9#sG_)+!4w?uCyIWQ{mTj?Qh#Q|*pk{h@90i|X%yeW(<(GYP4;Kv#D#IIO6ES_S# zTYrr_0nQZ5Iq71ThMwmVt0H!VF#5vTwmfq7h`k}-f75m9sxJ0hZ}UvzRGwna+koFQ z_hQqhY&r9?gSnEk)3@>bE%H%#9wOelV1}G|u<3l~rw1>b@ul-e}04RsBu+PaANp~AuVo7{| zrQJ{e8l``??>B#snD;wdS6MN@G{2QQwI+5oy_x2VmZoaOLRMH8;Z|{IkM(t-uZ#08#rO}2w)6| zES2^sE3zi`l0IJ@x_{PYnWFVJeV4hjdpeqb$XroQV!jt)yAWAP- zn@wNP(aTyczYRGUna)d_E&R%W3piJBR{MveH79{z%Tt^aF}^aG8qZ}XXUjLBS6VvW zVa`Vz`B}vODxRO!Jeyu((?1`fMEW07f^v5K5oFM1uSoxn_CttZ0mneJ%K3&2hAtPuRa?OJ4M@U0Si zi^yy6yh!kV;u6?)RdIgR>h>80{r(y&<_}_p}o^f+ipuL?IypAGM*K-Q+1+KJ-Yyr&0UMBYk`0h*3sN+k| zs4wx^HemJ=F|d=EE9t{h+B{v~shD2%5$l=M7k}1dJ(K$4&mQ2J)OXV~>~}IZ@U%AM z$5v$M9^!puPDF2V2zy)X+?oPxZ$sqV8s_3mo~80D;i1)>QN=p_CAfwjd7j9+GH1cb z=&OL6BdPx-O>h$YD8@&s5Z$!A4>LDz_Vgj*zCxEGAMp&@XXXfU0<#Ub#(bp0aUZE# z_A25d#lEDb7Zzi#FtR~A}k#>exs|cwkxmfe_4l4I-!%j>?s z^GaX+VtMd5cy;qLN?<5-2^wYQB(B<^o~BE`Vtn4KEV`7NK$nL0J`G*!j?<+K)|Hfd zi*@zm1iIwC%A!kop-*fvy5tpHo7DeuCAe0{)p)KvC^YmWV>%4|6dKfj8S$fW8Y($& zW#4Q)uNOK(Ux^P1xPZk{a#_oo*Dez~v%_CFTiN{6OO={W!HHw|P?~ERyb*aY_+qrC z26;y6_r>u^p7*>Et+|%xBHzA;A8t2iG>Wd%%Q_V~J5jbiOXmIxIlEQ%0hb^{GB)J@ z4xuN~R?CKH4YGDNx|u#%gJJ9(;8R~4*ArQCwr^qR8gd&ctl?DlpKsYR0^M>333R3t zoSMPA&TiH$d(mS2P`b%OB5PLOhwV+PyZ1+-1G#?d8_{ERakd6Hzggj3@aMq)CHewy zB;K~>E#M$Dqklhmf*!q@`LX!M?BcWe#(`01^NrbZ&%8ec4mrr}asd0g@UFsBN}m&2 z)>B_x67#>#3(aT#W$i#C-HGpq;bUn&`-}L)I73C;OPvz8D7hkUx*K{|0_b#}9BV^>aQ1|DWgQzbwV2bCt4Q z`sW{|xum~VKX#q&XKu)Qo_@;v^Y)WAgy)+5ga@1bycheL+0XCL&snZz_VSL$Q;6w^ zx8FP}*Hy{?dVbA1c~nN{xmpI6cP!<813#;r4PWM1>}GCg>G*ucH`uju%5b@%cifX9m1nU#_L8W)=asUHTVdz7V6=hE_5r7k-G|d zk)=exB63|3zB?l4!lMJ|fBRg>NL|oJX-DGvq^=_Hh`n-lb9~*%yMJcylybGYY8U|V{rv6?PIBX!Uas#B>aeDT|cMeKZ+Zsg;GqhfN+rV>-{ z;TAfubz8K(?-b|8OFS+5x@J2;tH#^S?_(R@#bLr#IrDR@>lu{ zZI1a2CB~(R-p4$%17Cdc5t2AH@f8)hs|=rf1;1JG$ydRD(LKj}c(OxJC@~$o!Pq9E z-^*jKCF}8z_&!K{tLSHiclFpL$69vplZ#tc*P8N8Zs<>jx=PBl6VthhoTptpmwlJ- zQgmz3MS;;TsbA*QHypVJxo>SQbQHS9emyjYK5ZKii#G^y{+136BffYaGS_bRg8F`W z=PmN?7T&j%9PlC!tQz2XVgs^g2pigl8J?{T#H`MRuHAm6Hgn81WBhk~J7eb9YsMl2 zUL6^C%{X)o-UzminS0e~kae(Afn*^rM^efLiGh%)a9edx3LAnlja zzVz2OTyhGY@j125U-)$3h5cox;jZHMt^|Y@3Eat%WWs$m^uh<+eb5^@2j${Yya^taxXRPAf$w6) z<-6E8EgRI2Q+Z#N@ZRR~!guTV`7XA*&gwIB$vA*T!Z_eZJ@(=5vWwV@F~3t8ccB)W zTW7YXWHfVBlw{7W;e`LlJK;Yvx9+14nR8pDA%lPuPQ~fhFWFvPM_!`we0K?RJAru} zSUKLGQF)2q#e8N~PVj%MeybzH_KgllrOQ9TR@0$7#`qs6X2zvn-?5w=feq@_9cd15 z;nYoUq&rMp=-~VSW7!1@p8y{^K2|f0)VjG_duYc6Uv#Ny{L+m=#n}rTH4YIMqk(3Sr!uWPWc(v_@U;ry9> zsPo~k{f}|C)_LnrCD}b1c>jdoWM}8CfxO=sQ&qeESJrceQsHup`jauro@X59JjrDB zLVZF(AZw1I>)NM}R9|M#b2`TFGTv44o%57T-FrjZ{D+k+-Q{S>Dz<5Qi{j8T)dHQp zmL9ZEa4xZDI_ngNd&K4Yj1jpzjU%>vBV}Zl5v1Il{3TiLCz`8|+47yK_?WrqL(16S z8;37`Vg52TTmL|NEQ`H{zD!NgN2!DLAF2+$#XidEwxu|e)qI0Jo%?Xf?~R|?b9K9# z@0@E(b{@{&ZG5<3ZS^whnsf0R#xB}11~1J@+xpUcscX)-oyHt(3H7E?uU#Lj=5clC zF56h=AtlZEUGhpxf1fFR)p*#Rqu1Ls=Ob#e^O>BT#tT1KQyo&A`XeJ>FchN`vurkj1teWo32iAP=&ah|dPMcGIy>zef zdd}-cM#0LgJFA|qept;i3^hZ)WaKX6HTQG#m#A6Ji_|3NC1YPVmMC^T!?`lcPzUPY zN3DAu@csOIHkZ`XrjB>Mt`2k#W^OfdNer{6=$fMG-KDP?-8r4csk>B{lHZh7^!3Hn z|CTq^H36K{RJ08vcN;}t-!LDYsPhIj)9D%8XP^ zJfy15|DX-pcW<<(=^Lf5CEJZlvSVXdT(h#;1CGn{4CeUYRX?9URdqNORdHr81|>d* zjjB!0U<~WkR9ykjQ*CMbhw5E>>c$GO@4(vVI zoyO*O{M9n1G| z9=EiAhkcsMKJ!P_hm`-Ezn8V}5x&TKfv>bP4tyP#yVuw|czM=F=5!o$GLE*Sjg8E= zw6h6(lCp+!iF2GS!#RMy?^d1ePDRzBExK|!W6td~#shEZ>p0;2BJ*U>*Vd7*8Xr;b z_*#E;D{#N_dbI1neJ}51j@JQ44|Dt)Z4}#vIni}EKLY+dccO=O?`3}KQ>MFS((ihk zO&8s?v!v5-g8zc=#DD0AN4{np9?QLBX%_v`_rAQcdfcNwoexja(}BIgH=1pnGgV1< zUPRxUsW-rymUX-j+}xM5&ycl$1bU?b(;L7S&R2CFRMVU{jNNA}V;(0s16cvy&E2}9 zdfB=Vb*Jj7zW zWqdto;LxUvers$R18#x$f|r38{MGwzeqnwl?G=MFdA3a8U+k3m6`a|mIE|ys!)C?l ztfXGfchM&R&#{!vEbTHfk&v$^If-^5dFJ8=f&G>lm3g&R2zT)CP8Y{+e z?^uz=_xjO4HdU{D;1}~>XI$y5*}rmis^h?ibmw7kW`!-!xYIt+xzd*B{8-ix{ZF-} z>PwloSD1@ea$Yf(4PM6jPu1s+`fp?IcAac>%jb9S3$Ox#Pm(U>s) z&#ZS{=Zr}3Ay?PN|H+teVIWKDnUH>qp{0GpD6!@1o5%mfd_E!l+s46fA~&cx`Xt+6 zy>HC##)yMjsH|~x2^O?p3+W@^l z8KD0=eONl?H{?lQnzd0$)00^{zA-xuBW;>7dhD)PzoByt7x)?9RO9ef16&Phm4*Ty zXWAWl4>{-?_-2RmEu(k5(lL5^OO|`gc0*v(;F@YwOk14wzxFI$>X-3V*>%05_$KNv zXPh?~@7Qhn4q#!ZN&3V5QlTID>le6YD0%vcuX#E)!Y>_m!J~_v2N}l+b%67PE#18l z-t>0K?~J#z-!@JZJe75qeW-DVV-NH-Prv!19mX>@yM8x(eWsG-+&g`F*3DzyHtw?J z=zCpN#tG+BSvTtM7(W3&@3Q6TKb`TTtS#`=+gS^5=>KaxTzso>*Nh)#wJS;bUGDD~ zNl)JIDzRnho1JeO6J`XmCb(}jir}jn_;j-})c9DP=v)A8*vFAl<5>?kC_|k)`PRk! z|5VL$3f*}4@;@Tiuoj?g59j{Icya7&#>4p!XUQ6HF>b!or>H~pkAbU0|C8YYjxN_X zj4R=#F8$Bu{RhCw<+{cA6knXrc)m-S;#|af0x4Q|QW ztlFIK!Y9pl&^+nOyH(_~V{Sz*xnt}e1KXs&S^tZ%_k!hF>PHW_J{U5?CF9(iRz>Qd z0)2w}+s0n-^hv(8U@%l2s{c+w{s9&u&$QUCaK6iWS!Y-EM{LRZ93`LknIiMJCo-4Y z!4G5yeW&(2qiEWqtWP=5;ZtOzKImr=_%LVM64r0N{w_2H8s@yqrs|83$>tQ#09TXr zY2bwuzPTuGJFb$8vYgOv7kDT18(%(sIP?~~oZe#_j$CDP&Y|6BmBIQm@YU(?#}CGP zI;Lm;)=;6f&*ndpmC1UqbG~7e!#CzA$@+BY_XPU8Nc)Wm>*>fWAMm~O)#aLL$}dxj zrx^zr!vw~6J8NYEI5tAt&RV`em;O5#=Q7sloZ@n03H=@8*kvr6zAWoy+YsOfW~hM#H9Ym_v18qXB?go4bJ z{D+53ndvEHcabeRNA5KiGiPsu>z(jxS*OCUzYCl^5V{aF=2>N+z5`w_^?a-nT8IoY zP#Ng%ga@t%N2Odv>2Bj=%00&#smR%3ROBztdJdWQW9C-m-Bt8ep`U&5p;gdjz6a+g zI6btDOy^#&rW z;|A~3kt03;pG1x*Mve&3AK~>Pdv?OJIt7k9W^EPOr$!kFJymosICz6H&{?BqJH2DO zi~xLAc+Pj}pYS0K*gdPH>gaI6vtnd5%_(#8yqfBkdC49yZR-)CNt-OC6{Fwl*d;orA^X)6}^>oGsK7m6{=L+@{B8v$RZ-Q?N zAAg>5!pobeZ<*j9^>tF$a^`6W?^5C2t&BHSNp-_h&ytsfmoH=f=E0|%pk2b(=Z)XT z94n^0By+n0d^wD*0y1yV-)fLn^OI7=(?ugbLCC=^esHv_a z<11Vu8ywX=9WC&sdb?Bi+jQ0l&+rR9!Z#v&AH{AZ^3zf58Bk4^eyPJX@N%asZMe?< z`m1B>qDxfHzOkcYOTs@oIhdnh??z6Om{iNR-SjD&ss?iYmVZ~?VlzLt=+rv#jhKp_ zN8#BW_r}g)>ppXK(I$;t_w368V($^ZaM78)$(|pZO@uQSIR9AP*qYmPrJTLcy72{Fy$=c+t^` zE>m=Lmolcr_cP^t%CNsW?Z+j$y;jE44J^cGOJaN^pQf8{<#{bSJgK9N@pkd8LnU8P zri?)evVZiK^%a&Bt|N~JHil;ECGL^jD4d(gH?5Qx`rcOuM^)rj`sgapTJM=Iius;wm$i+ zxCCjVZL$*CMgM}#U6<5dq3n}+iurSuy(RiT@q;_{z?t)J#RdlsnsJ_ArRzA9hwopB z_=Be4lQsa~G>Mx=A4$BmoV$cy@s?Zcz=pHj)?>?P!w>G*4qM=};ou5(vM}-2;q^At z|1ErwD>Ch|kFmWjt`I%+BnV~pG2m=|j~z#zGlq#zGM@0G6{V7o8~y3Trzsakk1x+P z=B-f8?cI*v@Vyb!f+^tMyY?YwJZd4aOX17Wn*nEuTNQiJ!4(z3|Bt$NkB_=K6aPP- znIwGXk{b{p;gSSIXC~Yts7Zw~1kg!_i@8{(-7dFQXEIz&)hlC| z`YR&T(=9PmSl3s{mqUD$>iO!{O!m$Z`=LLwlf9kn&6YjJ#=q2$UZiJ3p94FU!iVrX z5q!y+AsTr3NPL5MZjrM2m%X4pMeHT8@ofy{Nc`;I)8E^PxwL~Ay+)hAWhvZAn`7T3 z{{j5+1^*4d8<;VRywZn#X8W@#Q`#>vs-*oD^tr^hk@iYkgO+$)j52T`c$a<=9Q2zw zxY0Ul5Dp|p(kO5+TrU6zoy5oXfrD;v5HxYn4Zcp%uGkoj;Xw8>t8qAx=NJye7iT;; zV9%z9cwNx`mHF}488q|HZz?)-^KdJK>pcP)96J1O6}aE*67u zE9J`nMP~VzTi1~Pe?b|)V85!^p=0OL$k~KP?q~l->Zp&pC#b7G>6}T~^D{6v>8Lhi zP2q>|jgOFR@57H}JwyCipE{V?;d0_bHJW%m6Ih2+ZN?sEErf51L%HGG$*fB;PQ*V- z_F4#k52j$N-pShAy-j6KXfV6l-4WtluqdIq(!#k^*~H_k&g;W3tvSSa+^BjB*E}$# z`4P%kz#65qIRL5LUywfU)v0mcoge4@@!G@`{m6Rnqw;-m^4tsMr}EEQV0dwT&5ze4 zI`xINqF1L>Pwnfa{iNR?6V=FSsS*$8Ur7ba7 zrJexw82=>B`Ee!Xc&c4}8ubLINBBVC#wmrv@0jbB{*CSdAA_G!R{M>~tcg9T-lUbW z=Bl)Ot?|jGvdTvt&7q$=ngbS1Z?_Kbvll;BwrfIun_~&(SZsP8|2eDiU#cJ07oaaD z@tpj9`0KswWwAK)$vmfodA>uQo%$4>AL*_uOBdhe+43$~pUS(m*Xzn$zrOXn@qW6z zPtm9G-u=s4&+ozJ61hptMsAk(s{Rkw5q&TGdRFrXJ3rq%p1j{X z)>JlbUv1g%GILz+?=#U;MjDRy1f ziGz(weJ5O$R<2;)l_+QE9g z{M(SH%t+$@hs=NN_^&dS`*&DN`(op@mY8B%f>Ih>hVMv-?pqRAY!l|X87Jw|9~v-X^YGhwUOGRa%EK8;zKIQ4;@^D$dBM;m{)v|y2%*7Xv+X}_#1s>udEbgQ@U zMdAra8rX0OTP1EH}?%c{X1s2k5#bWT;BhV&!dSu;8>iUHZ+*?M6R^UhYI?wMhKN7zM z`G(%(37skm`@#Jx;GQioQuik64pF!Cne=H5F*mLF^y%MF%U7yOnZQ1tGB_L3a}W8Y z-7g#YQ;q!buuJ~`F6=uCl#1<^gwSh`ZO?jm!^2t9hN0BY`IMf}nTz2UUz2$F>+mfR z_~!xhgqtYGu2j`etIxW_%zqF0FM=Q1ya;~i?!OEFN&Nc+{zbrWu&(|>8`ivAgv~jn zYq5ztIghFl+?RlR<|U#1=ymP%brb)rqVL5=H>Bb_NgqoLg*BwT&_^BgD*h(oI}_`7 z`~-}Cx8Ub4bqA&Wadq#7Caf~%8OH&}vBp^Z=Om>nAzevd7Ue~(Q_Go6quKM9;1M}3 z{NZ(QFJpfFOHvY38S{ES=!33gUOx=#0Lzau06${|-2#I2%Lx){F3l9Pqpj zo}#!LW8iKY_;MvJ2>0U4p7OQYvZmf9c)O)aWT@4nRJbe)I7>*G5%Vput7^I*eV}B$ zTD|~1ViodsQ}FBSdaT5v${aHk--{}VDIPjM@712lPyy@Z4)LL^v}<~CaD0cG@8ze|b1eI2 zzU-0@ABX6q@(;d&-`yc<6V6FVq-SP90xfjjpZEJH>-hvb2e<4)y_T`IoDEX%)fpP zvF?=V%)LrC<#uJQxn&ZunVkL?6YX7#vNz)^l7h}(TlUD3HS3;(H@#y`^QUEv8OpkC zNiTe;w{mU4bMUK|sv)wx)TO8@vg9TD9oNiE#4IYs*%_t7J~G2|369 zCDuR{^b>rL4ZdYL8+~WtT@mYw@;w0=Iya0R!O^HDg>$hF%uaHKT_p>`*et8+k#Vi{ z<$dq$^@%5{_2$9l%<^slmIPw!b0TDKdX z%GyN9WZHFv`_tBB|JzGet%IIQrm(+NzMrwC_;<5zb0qVjM|acS64L3))di<2i6h88 z^mkZz@h0xyu3S}sFDW*ES<_?KY1iyI%Gu?}yV)hqaBNIuK>IO$dl|9oWBT?? zQ{UchwS{D!GFab!6B#pD=iY8*E&;t>S?3;u&OPQLog2TZ3W;+?EIea;h)=25UWB%# zp3CXe27S*-sKVY?A!BJL*BEWz2R*vbvtKv$Xba<19o?OjvQ z8`^+b#_4j}8R3~dScNiPPVl}_v3L2&BfP3uvFXrO^F6^pK|U$ll5XfkLNn~|3yDrN z1bxU3{8CsCO@8TkMkYFwEh(fw*C%z#w$x zS2RN(cU1-pR-%`&C;ft6He0b7Ft~wXwmKx#Hg{x)oUbAHlrbj!axd?y&_#7My(?hU zsaflde?mFJZDYv$o`qR)caI|(%c#d72HM2R+Ef+1t0hr8}){$cUS$$ zj@G%BquP=OI;8JvR4wFV9Awo!o+b8u*~hY%e^#QqeN(k|wJOqXEtHFIx3v4&1c4zn zw8MxOVeOg+487FPdYJw&^*0387R)DQtBFSaV)vM>q#E`2QvVL>e)izV4%eLzbPP~# zz^wZQp7&akLiOmrqJJL8UM#v)O!tiGol(6hs&j50)K5}&d|!#~7vER=p~1*QYI(8L z$^1p||Eah)%ea7h%J7q4@F;B%+I}GaQry*nJC}(&ky`@CR1X?wv*gmT2r+sakwcNy0#qoi|C(4FuNR{XIW(YvS+8yN~9*nxqc#M=F;W+?w;Vtf#K|Y1UJ$ zw{9P$RQ*65b-j$Y*81Tcho&hN!*e9wPl`v`S6lYQ2eZrn&-hy$KI?G%&IQW0PAluK z^O&a;-cfyE%#iYvR*S!A)R>_@MJv(Ztr29_PuFFaA5gMNj(NRRZ?TTLm9>8H8~z4- zN7^O2iRdLVUy*stZPGq)C~|cFzYbJPVGohai$#XsPZBwLIXuPWZHy+RT$u|=*`nji zo&xe=YjK2Y&?AH=%J)`m2W^}|I2+m#8KlAYo}W9cV=HmwZ?4;zRcFz1ty|7UcrdBc=+(JA8F^xj2t zyWto5{`tA(9fBtrCvqR-;V~Ra+XK)*5c&`snGPMWUSbc9u<4a{_PEV=>Dv>OeC8l} z$quFYiTV*8Pu6F4h_AKi3+!1l^8Me?{cYInUfJ$s?t=a%b~PzeY;0m*ld^(IHeG+B ziTyRvvc#6>T50TiGs+ljlbe^pnQ0~0=&P_PhbYVaMp1ZkwWmYi3?(`ArXMtw$vk>B z_3ffQ`DY>Z0dJMqiYxg?Z)?JK#nqOq*KzJa4e*LhdX>y6`FEq_~|BkgTqPk<9!cizHr+k&)f677-tDzTBTt)9}Hf~|XToShh(amO0I zi;X-~^|4(%8TM4CkfN2e|Os<|{l#_~0JZArPV*idW3tPGIPfEu83-)jMo*HNW zp2|P*_U~lu-;1h0wtr8Kvwu5H`*#ZV@A!HIW@(eywwm!1L$`N9V*7T-lo6u*8F91z%1Q$Y=lGlsw`xWfKC+-z_ zDr?7@=kO5Lk0?iM)~wdzh@op-^yI$80BoWWR-0F!@O7DtGut^ zEo|^Iw`i@e=ny%1S5ktWJIed>H;@S;%T6GRepr5&dt4dqPrtq}vDq2U`o1&V0FUWJ zeu<3zJiOe3T=9`lWJ@x>WGeVm?&d5pdCE%mI$R5`<^WL^8a&T!iO z&Tx9QGi+V&471Ov;zRbk36BuoFbWr;mGw1`N}kuOY3-%K z%x#iaQ(DVh?00b$jqO;eDM#~Hs7LGBw@jS79))`Fsqb3Ezw#gbS}HcC&iP|ITG6TR z0k47&J8MS`@XOa|M+)~V8G8Y6q?!0BWnZKQjudqNZze0-^64kR*Xxy41zKhlzdx{+ zcRYcg<4XD{ve?^yG!WjFU`y2BK+lyuZRohA`9D&Q?xn9EPE(E+16zaQ4PR&RhVLTJ zF=a^TBz->n4~qL*+B3T-yFk`^e^_7E(Ex2uK<1_a+d=w5;OQjA!;_!o{do)~>F@kt zcw0O?0*k=m1_uI%*b(F5xE46R3LJZtBm<6#z~CuTK8nFXV8SZFOR-tE)e^1W4IZRk zH+T?SbTfaD`d6Yiy;ixhK={A#t%v6;+tTqF6x;-X&1y?9;6WCZy7wqYYk+5^iI)I) znFwFI2A{C)N}>VJHH>NZ9N?J~#mkfROFD!H^Z-|V99(wbN}-?P;gUIR47L)pzhr*9 z8`$*8DcgkB?eIo{tsB^IP&Hs<4q(7G0Bj-ld`h1MfQz|Zcp-UYoJpTCzYe?NU~7XG zWNsojwtu%VYbo>3R^(rI<%$9gIv0H^gv~m{TzIMK2sJ>9;_GRlf1pw0+iL7Je(b_a zk)dlSYi<7Kti9*esx$CF4LEwOsh+jhrs`?f9D0%QvKHAJya#?EbHt*m4e+j`= z(Rim+y<)8`E49>?Uz6V-1${e4n<3}|Vzj(dZmZqn% zp1G_f$1MLF9ch7kI>e_vt%0%^rfn--7~YmvlBd`1+gzqpF6)@aI%YbwG!;8ZFFtPr zm|_nA?s%$QdC8jU(%{sNM|j?iPA|_3c~1UqU76TVmIfymRHmfp3&Bw( z^RK1Y%5s@k2<|v%A+(=3|84aZeZn8WbAHPn_$4w?bW=s$H>-s+VOKt>Ag^Y4zWbZH zGMV?YRvPXcsAJD;ZP^Xf(ORF@kpga7>u>9j7zgqn_JVZkD3x!}LsHp9- z8?t6w9HHx|OWHGqs|B4+))s>)HhnU*AU1OB3OeKW=qc-DBPKL<5`B3@f`z2* z;7I0*z$_(>!qTlCHP6_2m871TwoMTIzQEAE0{dt+&R!yGc-n2e*TM2vVYOuw^^DCAj z`|Z$oh*XlVXNg~)9oi}h-rupC`KhcE+M&)$Jx|qoWw@( z;iC<1+Mr=8_i3D^Or0mO4ai#Moz&TAwpDN@Z53Q~v$kzx4i?L+g9{t^UlgQ3mHR@WLL(lHi(-cI(Iv4{Z)ouOB=+!I@s1r-%5q zYl){rco1uGAvbihl=6Jga5v*u@T^UAYztuH^spYFqT^}slNRXGg3asn&Fa=?>&yCt zri6}#rsSMPfh9m*H)#gC(Foq1h8FKaz6rit!8Nvxp`tep2j4O#?c zhR^U{Ds7mCJUO8%{E8Tzni)e1Lp_ zUS%E{U_KDSp1d6Wdpvsh97WZ03+||f#{3%gSdoM5zq?=)P>{)@3yuUQl3&fKEjtPC zFG81e<@g=*a5SGF`aHTvz_c%{ME8HqYW1(gz7UABFUY)LC4SR2==)M`t}?-}FU-Y8 z8d#9I%|ZO~AhPWUeES&vZvkc-^d;YfrbVyLVy#E$T%HyBOySuEy~=Yo=eFfQSKZh^ zM3zb014&8x2J})bxME!zI?6lXOKeAp;IP%yO`lZLkQ4S$129F7Dd8Q^Te4ZN$bw`w zS#Jd{@gY0}f7@by6Pc$W^IT$Qpq_i^&kg8vMqNoxeFM5}TIGrhy6wqh-f$Yao4|nG z1$o;FA7-x^@>=Y&(nr!Z`Y0T0^FMJG6?^nJg z@@WaSLVHN;v&d{uHa;e)*EeSMb`cL4AJjMSDGVeiH+hWlmKM^!)oA!{h>o}W*=26A z%htftL-|cvz5iI$7J&|HC{K75I*Xjic$BePx(i;r8k=pZv7Vl4*e~7ieHp`IJKoKh z?Cs**Lh4~Gmx}H0Ab32+-ti4zCT)`c?D?>D`TJ`g)ku?X$lSJb z&e)Ef-%RY#_-}{sWaLl|{|9NuslWL+kJ}CJBYwg7f3tM#=f5{8KOSz*L484deBndMw}vwv)_s)lyL-&QluM3%*Pd{%H`hc%ej zA^L^z{On*}ft*9%3hlHr*1DnP8b#uwXbxrgBhZXJR8of?#q~7nGZq;`&>FF~JQDBi zgf*pI)&p9RF}tC+Y{r$wxDvjb42`$em-ShY#S>YBSjwD3^eEaud`Np zt9FBL{7|*|Bg_|nz&zKAk9p*W_>?18y7P$RG4g9h%)%aE(-=1m3$<;_u`2>|sp!}% zRLe#UJU!_P790w$EqID`hbajb{SoBPk>J{Oy~Hh&dH8bnIz57a;PBwuj!2ESzn6LW znGf(o&FRdlyXHt%EWz_YFIp~!87+>)B)|`2cYuU3tenAQ{A?bq6dBZ2zoF7Jx zY*1_^+p#&zUY@6vsLeS5es@{R`r24~#7`^z4fgslMX?8N$<_653y%%0V?E0l55`=J zu^@B69%x_m(*9E8mg7hL5F> z@=r59V&7$4xfxIKV*TJ{c8`p*EO1QR%Ec~i*9gW^g-OY|<0v7&=9XK$`a;~XOB zIe*>Cj%Tr@oJsZe?>cxJd6slw+u4%A9P*f&8j2vdi~e&~v&?yl;V-hL7Qipb#(X9O zJq*B?Wv;3nbu?qw3`f9uwyK1z-y5iSmOke!7{@O3VObyB3GWqJm%jI?sa^1bvjJd` z{uO(V%$-}A)5;hXUMKHkV={hx3Qv@B*^AmG{O~mW_YC~-nK*tZ?GT>0R!!B9Q%8h8 zkK|@<>t)OWb59EVkXYl)1+i_Tvph?G3y)5Nu7it+^+EfxS>u$p$^MLd#zDk^EkKFd z7iAtOd_r`SR`AfuoW+MclRXxZyV)1;L{Xa+niD-h`l1!wO4-@SlAI>&gVd4094;BW z3I9@=QxB)#k^k_21wE|gYj;uC46eJdSMFtPDVYDQtnDAE#yWHa9u~M3d5_IRV{Rk( z+pa|C_(#as04y@6+pb{SVvfHEo&3lS;;b^qe}p-HWJkl}o3AMWX9Z_;*qGPH>xgaS4Ip1d&ezgkiIfkoO-E)K z{9WuPE%>nEQ;Mx4(INl2;O%n$my8>kuXhu#TE4UARu|r{p*OV86}}$5uT+xo!%fml zSdS0kFB6mTKV>|~8nbH9!$i+;#`AV`xw*vT6?rB)#xCaZC*TW{sk{4bp}&MKWQZpt z2RjUWye<1{ZEdP;<4Iqrz!6+t5J|Kdyjv6A9avs)0{$&}!duW7aYpSY;N21UcCXm$ zbN6TYuIbD|r*()v-a3AoDSbho)g&Z2hib# zU%z0uNGb` z^2&v+a`PhUfM@se+zEZBK%){fM*b0>pV{!M4bYz9vkjd$@V|%gnjG|WY=q|JKAU?x zJX-GapcO~Zvu>mLoF(6Aql^prZsS>ah45eD*}{W`_X_V8{wusYo{yaP?LfsBem^ks znCs&Cax(NQddIW$e{cjoM@h*tFG%lIwT-eK5C{@;Juy|6^-XLI##~6+d7O3%9f-^p zdHvc1&LM{e=D=&k=LFeg$nAHms>f=vb=5!vI~l9;pUg$&J@Gk*pM(y427k77eFa(& z9-Rn1jJIWOn-4wAhaO^jYmJi7Bl$(QijZIWP~?=zXYq$;FTH+J`rRzgO&Rh{`aY)9 z#^>|VuZQU$Y4d8@E^ANNFxKs+y_=a^2rca9xf@<4{U4Lt0orZoDO_!4zn>O5px;a2 z$EoyvG4eXv_sR5qYG^0;h{JTciDn+r?7i_CVzkInvF7n8}mJ{lM)w#As;QKhZ2 ze?erwZ*kZt>vM~oVFxy!Sb1VU5*wA+Oa1Uuv5z(ZR|s8j5zh_KU<5z3L-5v9tf#NS zPF6BKylr8+x3C-E0Pzt%MGEDIx2@v**q6=^%vj4mXBa!l$Ur~3=xwtyw>42t_E4qJ zF+vHy%yVysw-DJrV>SP-y&=5qCFJcux>C41k=XBZ)IP~8FbO>W1;3t9xgdPang!tr zWzO)VK4P$aZcU-45%c~a`#``&;B!+7#Xh?O{$Z^7VSkbJw^ghgHn0vjhqYs44Q_-( z7hjCf{9Il3O$AgQI zFi6}%*=Hnc%^Ue9dt2mR`Nls+Ing!6=pL8B-#*JTcE_#gS2v2iLwu3MMo>e$*=N@L zN#|U4Pnfg%)DYiC64$Gny=R`LIq+}xWvKR^@%a2Fr(rLaSk>9sH)Depv$!YYXs#K>qkz2G^OH(K_rDp>+NF#rO zazl#Z?SGjwfP2}K{4!+@^joVM8I!4;OCmax*ynuwBj=JV!+$FIHg91{nzvA5Ax#dr zu$@H9_9=>SmWll~rBKGGmVh0Uxtx?Q@XHw~FQ32j)l|Mu1t&+oJgR&*DP^Y8CUB-` zntsIX4Ijfs+JN8VJ^ce|YlCYFu(IjTU?Y2Wy`x;QWdAaSwjQCa>69z&?WMhGBX0BO zTT=DiYAWG4*`G#vuh8xT&}0htr_p8ia-U2dc?NbJ9@8)H!@QSuY&mOXHhXiW4;=hw zhmP$0D>z>Te%=N@tH9586F*MyBYPKQFXILLpyxWkj}!dhyY9%l3j9c2f}2&~#tCjF zftzHrze3>0F^%)G)#M)OyMwGRSF+~mZ8|VZ;1jsB$s=$oO6{!pHpqC3ogZVPya2jn zHnvxrX45_Ruyis8#n;2hH=8%1*^a+xatnKKsweeHyPQsZJ*p>l*!iA!+jxBy`?W%u zN^>f{rg`SNxSfBVWxZeiSwwv)Hp_H<{uCYm*z)8=b-MiLrz{8mHO`-`-v_RQCo-Q1 zOP+vBDV?ODg8`Sr8QqJvypi~#{BPHIj)T(?2WJX!PBQmUO4*0mGb!aw2F_iX3C+$y zFurKQD7Kg9(i56ffzxrzRQ+w>kdxs8qx6H|Cg4*_9n>j)6tdUyX%jzd zz>nZ>@+hr2H8ZJPa4L;|D-2G%b`y{v7BUUv>b31508{1WeC2L!P5fjGRg!` zF`O6kPVkN$bEsW&=stWWH2$|UHigd=(}x=0UssfyT)y$x{2I}-g0hxS#oV>S3xAPu z7Txb_@f-%<@89tgVoeas@VuN}99HSgv!&Yba_ zL~jt;tKzqLzFy9gvZALd?IX|~<_-)z7WhVTc`JL*UFcQXuVyvditjBDei`^iW;w9m z5U~#PxaHgV1+TjK&U#gWZMnixvFzo)@a@izzwKAjH>J?u^x+m{NxNMsO@3ff2YaKd zLck~AWbNr?_GE4xXV-OP*JAWo`mb*)eJbN>Qwrz&kmnKp-OPUrg=Xl}MewA<;B^xH zT50+kJ;J}v2c~vNxzY!d>DQO}$3wqLnX&Thly~7D`c=xAO21ad{Udc=_=kQq%Ax;0 zu^jsQCCX`FPc1asrvv+Jc(V>nb6LNh3{Ngmv#)0?lrGABF>Ce>N>wHPUY+R0O}9+Z z#h<=yzPn3&e=4#22$mWP4n|~`EMaOkNx|S`R_gW+UxVD z=_T}8Ph44t&F{(f?CBe6Qjt`DWb!pC%AWf zzWGITd3gMcDag=NbYJnWh8DMRPNb(QS4q$19ISlyK#qSPyWE;)*HCt%AKJDk{xCIwfq z*HsJ2J^~AJv1XRoBk-3Dt~TLucm$p!$Vq|eEc)r?VJf<6m|kK1CI-_Afr-5c0+Yb8 zfx35tlgZSdV&KE!!49|WY3i3a;cNC=)6tcN9)T`jVXP4sQcttbtHy5S?`19S81zZZ z9b+HJRP40q8peLKMJ7I)n8QomFC`{~>!?%uE1svhhkL`qqht&$J?4m#_PN(SsVrVdy}K7ay=^ysf-S1DyG127k7^-V zNeQGxQW9whNh8@vc9MhSBqftlNU5YWQaWiUX&7lZDT6eEG?Fxmlu61WjV6sDjU|mE zWs~Y=wIq^kq-0V$DT9 zUnK=et4UR)`$^TL^>woray}w*Eq(O|12e>)=3#8GUVXMTYBOz@`LXox;IVwUcmDg= zKe~x|kL-mMJhPvty;iNARa;#`Iq5dP(zxzV{&Yoq|KUsBltzm8T5%M4hnqNZJ?V|i$)D0g#i;d~yy2kNs(T`J-cI@^ z>4nSGz3q>vn><%lx0^bbkQzzh%hbL7>gy&JdsiiVZ4-G!=VO1P=z`UL_!`e=OH96I zgQp6gJXN1`y=}5mZTm8*iS!etT0u4`$R-8Zq#&CVWRrqyQjkpwvPnTUDaa-T*`y$w z6l9Zqe?nM6CMw881(~QI6BT5lf=pD9i3&1NK_+qrCMlDYMH)>SLmEpOw|m(Ge>ZE{ zH~wx`Bf7&1{N-h?bfdyqzxWI9AAcx|SQA?t%x|{h$9Qk!ePx1JZ< zm8Lv5v=tq+r6}PeV(eX_i^gJ45d+Yeb7{WRr-i3=rqSqyf8^_=Km*kI&5 z3pvMBU?&#e3nz#><>EbZ{P`Ttc(VNV`(?S{aRs`O?BnB~eq`VaExa3CzQVuD_;1F_ zXk5SJ#I_2cuZgdX7rB{@&bA!CJZ;*P!T|Ab0>sJ+4#m$>KXJpnJ*ku9U)|WI{xkzljb|a0rW-Qm-?_{xJXh@ zg!5x%kEU(LltRvyFUu~NQrJj4Y|MdkzPxW%koqLfR&7%4ti~klrj*e*lnaxvk*+0g;c4B6;hE0%uAL$B!3vhs)v?7sq zj5r-FL;7Z5u9Y)uoozPJm#tPqUlt!mJNmLlOi0m(JMpWL_4C=>2e{Y9P#*U_<{8>G z#Ogv%o{gR?^9@^$G2bekO?^Ijrrq86TE*JT{3$HHNhhtG#XY2UR`(EW7sEM+1wHx% zdQoH~&)E7Rqq$z!pDcz&iY&qwqMQ*6gba6xS$RD zTRZJBVi;cTUH22WyHELPmuk~({ogP1vGy9IujP4ge!+*dmoaJJA_CnmL$4HkM4%}@ z_^9E&fqTw13ClfwUlFo7#8<~rl5CIq94OdlSv!z1?n+ZS1m{wYfqTvijz1$fb_TGV zA>43;wL|8fjW2F0J3lv}`45a0p~bfP!Zz?cqmr@GQojV>s_b?L=jXl1+1(3SD@~z) z_o}vCDsX2)*Qril_%_-H4`@!uhRoVHYp}M^`(GY4o&6$5M>w_cJ=kW`)3oq$;FrAM zwBmdCv(@mQ${h54)-RsI(}(r*zK5vyHGINGB>TeRYb5Pz=i6UVhrEA=eF=E7 z3%|P17yfCfFMM!`FFZ+2?#i}0d)~l4kucr=q1~Fy-sNP@IUMe}BfatQ>`b-l(Ak`3 zp`p{?R@CaKKe|gS2vi&{l5d0`ccmg$}3>(&VYl5MRcZMD+kp1ls zE*(|w0+-@zaf-O0MnA(RpoQ>c4ZD{44quJ%G{NEAJ39n^{ABIfLzF!-CihxK_Ox)G zWso{-*vX`h<15vQL(~zXe5vQqUyqtD>l;1R(FQzEgF~^|tj12iF$3EzdnQXVz0C`- z(f{*$=ja>N(ILxt#wTkR#AiIq`2hZg449p^C=3enQX_>E92u^|~yqzYz z-M||GfBrk~UFQMTFRgcs9sz8pkW+?_EAX+_=MQESJMP6cQoPQy?tO4+l+D?bf{UEF zmzFK-b2qD_{bJt^K9D^dv|0 z&sTR#{-3YzoGJgkx))p+W6sg|lSjuGyxeAtF{i(gF(%`S@lt*YzP1?tBx9%{V}UV_ zWbDXTl5e;2?LodZ@(sH#HoF(Q@{jPd zO4=D;ws*p(lui4bFKsGYDCL*3r)>Uo;?6jA;^AH3!Q+W}5v|MV-$-4;q@#F=T~3RK&V>3OK<&?BA5J$RYLo9=R)ZtQv&#meJ9EkIAN3zl&^= zeizx)O`kVrpm!jboHI9-O#(0YP*ewv{!cdge+6}ld=eT6LIZgu&TjL!BB$2kgZ@7C zH-IaVkG1J4ek+N{J{R$ZwVqbyQQh^M9~a#XJ*j}$2+gzEcbfLS?>?>tZYVfswHdPe z8hkFl1P;ZvS_y3nJ^zk!gJxg5-y4{5fmW`fKM#m*Y|zf-`|}V!0*3q^dTD>=(-#j~ z?Z%k8gpO&S$oo&CW5KE8#we}ns>A5Nn4FjKwF13d;A&xvGJiH`O=w1Z>BKj32k&J( zw`VN$ALrdh+Ank@`pp8qUkOKz{TJgX0Ih4#x&zwB2UmB}uYUUVL1-yY#uxK~cpTZO z3*48_Wj*Ov#I2LMq#p+3K(Sv14hlDwouR$N@zR^%C((M7je5t6u5R>AGVLsJY^UgH zm6++!ZY6Xp`qNt0ONCd7ekbE*2l%aIjLZ1*n0lSps#bK-&oVxxuk#Gu(P8Y_5jwoU zuWY^x{A#D!x0moMn^qkwU*H4JQQA|Y<1?n8dW<^k9?|3Ug~XI&WRHzcgcPNPXB63^=`KqV>e&mdqU{#*Nj>@on26o+L6p#+QV~9nf|F zzO-$qzzmtgNV^*^ zwA*I1J4n0Tw0i_|3@lfL zlVMkZlVRrgef3hDeD_muVq;8Qpi5{cHg+A*(B*UrY|a<)ybC%7HrRhP$A#ek61mRz z|9cfY*!J^_;n}qMQ+O~j01?1rD*H#AxM8a1>nyt+-5DQ4U@T$9Fyjj>y{|!*Qi?{8{jkv-eUsJ_|nnr{M7Z z^Pdh53w|%3Mjw%sD4j0*$4lX;`m{0JeesyNNCq>02Fu`+*gB(Q=3*Hv{(mvKC-N>P z`wY3q`28LBz$W%cUx}}|PJc`zSEAL=L?h2$ij&VmBUgfB#Z|~0UrZkEx)hE{pE70w zXFtZrm1y;|z_ILypB)bEDs`s=S3#?-oWEq~wNFIh2riA@hvQ24_$>6% zvOUV%1120|qZhmMmFP6eQ=>L~!zP1X@-})jYexD)^k|1MkB-h~WxR-eBxV!wRZm(c zI;)JA(dZj8R~22cIQkwv(c!U%t)=37Uc)}dO1>F;^N_EORh!CUc6;AgjhG=0U3C3; z9jf;Gm*Q3Q$1B;0(^vnYa>Lr{F3I zM>2T9&IwM$&MCOM8ChhpQ5fU61MN_2hE&A*P0ApgbY0)y8RBIr$5!h;1_ zd?h@@AYZ(<}9N{5Tc6x*b}R`Rv^CznKwnp zW^`T6w~o0Y^hW%c(rng(C3biT^ya|7H5*@T^wsH|yc0X#UHA!<$@$=7nP<< zal9V6DZE1TVuM#i;e=P%nde7gJ;44Sk+H%LvWr|j+6bj02Yc2VyyGmn;df;yr2?D$ zBX+Hroj~ldVt;DKC*;8zmAN#&6VzqL$K16(dl=)=&YoXvyEX^whz?m-Hva#^fWgOG zmS5gBa~huoZS|OPsXyMH7SF4eeLdRNc$-1|e6O){lVP90Z$X~~Uya$gMqo?29FJ$O z#k#0{E4udRGxg@$doQgA-<=CK9hqN=ed1~MOuxZAU2MxP({7-}>(Bo~@GM(%<38z>~@F0Urh+if9)(eSuZD$=p&W%#>@yWJi^gMU{mdBr951-VP!hTE1cktY}W5#N9H_NPapATKD3T&XZ>tz zFaA^Khzlcj<#(*UHqM}Je{19-_nX#Hp4Y4+Jtt>#4(xa4w|%(Q8}7^V_MfF)H&`?D z3-J6NxrC2FrzZQ)@G-&9;Pd8~7a1maYRys7MVA&_N$fuHztHfx;CavSxzVvMenHZ% z*Ex4A2|Kv9-uv^Bz~9ZjgNSuxm+bTD6PT3@owF1}b*#U<`ADMs?A|1Iv4@zVv^R^h z?M?#!VVAn~c3>2k`#Dp-mi?@9&Y0J#4oUG&zwb)owaEB>E6$!5vp?-C|I34^cJW^x zOkV$A9!$!ue|a$NEBecWsrK`Kc`$iD7lkPpHy4bbvz#({!76<67T&pPoy-~Dh8I4# zKH2b*5MJP5jzvs&|E>(bL*@+rb(7YWn7kmSBg*_i<_$i+$=rec;je^F%xYe{WNk-u zy+GE=HIK1YJ%%_bhM!$p&oE?&tVy4bYu}gs&(ZZr(W6A)YB)Sl@g#no!XF>J zPTeZ}{p7)0+s^L2#Vx%5%}3_DMJD`#Gu1=}h^!Fb+n5Zf?;4mn8g&b7$K7##}@2cBmRL97j#Ye@geccG(B&h3!?6WW(KhcUnSG4Y`I zS9A{1(G1xh)3>jS!Y6wD!>o}@JO{C>NdDu%*@LcA$=R!->j*tmqVN2g_wrv%fBAQw z<-0GbwoKY)+!L2c^d#}=6dg&_Yv;@ z(Y)7~FFkMGySU%Oz0?<=Ua3cXEu>!Y+j-jjuYqqrH1Auvf6~0~F@1hO=$Gi`4 zzsJtX1Z{i~FE?@8kY1^S*)m zmF9gb_xQ#}Vd4JE=6xskUyS`jU3ZxGF6yc@8o_t%*Bt=#9D_nq92Gw&nZOH5xW#|Mqcz6H4taGzn`H*g>8+g9!+#*_Th$$hME zBizUOR)dbjKS=&@aUUBmE@)fm*G1n($LlcOAAE-SSC_;um9=)?!kh5J<~(rreMEpo5kGot4cXWfTCn$tjY4c^@|;Uv z+0WtPoszLVOMKOjzoATRqa8|O!g(98oqw>D{oS{*zk6|8-_k{GAKtyN?W45|+6L+q zj6DNVUd*@dC-5g}zYm-XZiJux1fCK9?iSv440?AL?|wY!-M4u6%%FEa;9VQ4x z_!ReU_Ep&7iTY7||M8nF$vu>Hiao^kKwihBr;FUO|0HE!E%D|weXZ(r$bO#ZuqCBg zp4p~Y*w$!<9XYFQR&KC9sJJXZHPQfE*4$8g~=gYG8pXW&dua1o5 zuo8HBruZbvKJ?T*)0>4(o&&epfhir1QC{NxIQ5iT;yxxP3%#9BJLe4dGXInEdpYNw zxTK*q!Kv#ulZvqoNICL<3jP==`K0BfW`(`%7w5T${ko5kQi_!7GjrIt7M~Y?hLjsf zUnPC_3VAn>w{B2g8+i&zLDKiHkoN$2_YcaOOrBY!)ug|_LSBnneMVI;hB=))v?Ha8 zwBrhSi^w}?P~Hsk%qQJXdg=;!H;}h(P~J@P6qBk+ZCA*9fV}$$nf>2*?s z^fu`o(tD&n(nloa`?Hh;l16fp(n!Neqex>&IizbyF482@G|~)G0cj@5OPWXWkrt3{ zBUO;@B>fd>8R;ve0BIHJ9@2fJb)<(#wWNnh4Wvz^Eu^jApQTQle%{fo!9Sh4LrK=1 zio~2s)u%A7Uxr7%GVxGW@9BqLWzSQ(H~8$#Wu#hC3n^`2V1{kSXk*VLGGLbN#OP+@ znmZzrGOv5g>sj-f zKiT|lUiX;Sv*tB_iuv8V?lG@t&1?Qt^SgQ7V_wgi*ZgVbck{Z(yq-0$`T6E|^SZ~p zo;9!e*PGwX>mKuZ*1YCVH@`=(GUv4ACWp0uZz>bHA##>Ir05;Bvt+-Xy5OdMxuPR& zS$W&|=3&1b7#Y8})n~>Wku@71dmoG0Ln(V)#df+3-C{QHTd_GyoLA!??gQ)%miqwr zwHBG5Sle?Flo#qO(fzPKuCgCi_QG2J6S&w5Tgx6&}v!SOAhT(GibYpueHQFlsOC!c!;OHJxANpHtk7-2Ka@kX9;EQ_= zz0cPB2yM6Nf`5fN|-hG|5suS4TCjV{Op$)sgW?SF3%}uO2V#xU2iTIl* z)(&;APb_!k+DChS)?8LO=7(R?#wtVIV~91qzMX%Uzv@Z0IpqAASlk)clNR?O+Wx8n zoZz{EtH3GnMRjqt{eyDOVZ2N~w+{7oNFU5*pYLp9(zz5R4BcGP7Y4niO>TA#y*Tcd z;AAcymuSy3q^q{Tz#%6iQ$rl{48;7?-@GW#v7jI$p!OQDjZ zG|T$KYwWfD@;d{M+8%hcWmEtQPj_TH0j) z>xXw+nLi{jk4S72f0XwYClN<&NZa{?YV%3XqggQ;nys}oN7xUm0XuZw&mQP@*H=qA zMtd#9O-lgB*7o|+&P5QKL~9LP*7b9nH7D9an}VUE;p8Pl~? z&FqmqFJH?4{^QHkF|03TcAX^e>hbEEOgqY|OlU z8I+M!RM@I0Gmb;6CxGvX(*utYafjG$+3mtVvul(#)~ds@7OV07&|Ti}uPW!d?FlXH ziN)_r{G@5$8{l5%vi+6CZDLR8U*zo%kiU&KYP3n#>qfI)_j~#?LffT{#9h!M%r|MX zo=(d`w# z9e7OIc@oPVIi_)(W>nT7xBsORfICNwyGno)`)A{Ls@Tcurz;2Sxst| z{tRbx_}SZCdEWPGd!kZ!WRBlaVy*enGr*V3`%TQRpW&O%^Jcy`*_6T{XCrmf*TQo( zcwCPsx~ILBHm_nGZ9DIM+`m1=8ee9VG{)2OZ0KI-x!dgLmhNcmGa1956z7#O zYs~9AE8z#6NrFwSq7j>7BXmNn`Tp6#nVgfrxk1E$kn@h;Tl_DKW3^2q{(!^@J3m)x z-hte3^DI2kX7a%k*z$AW&l`lM;DP5KoC7Zz*8bk&1Ff6N#FnD9Z!A-`#=rlT`Tk+v zOTHEGK;fzKkHmnGyj~^ymr_?yc$h-$2gX?dz8{M#su63!lOep#9$HRcXfAI!FH@d5 z-&^G5B%YC*^1Pg9!HvktrSup44LSO8S&~nlYxve+^s(LOV;A>sQ}#Bn_uq(tF^Tj4 za+nW4o#3{?V`K1~e{gTxd8^XAws+t$4gT(uSPG&uB`AA1UG3NBAKY)m(UZ2=7z5df z-m1tzU75s_*K9tImP|?~<&x(3qHFooFZ@U&UV&}>!dDyN)$o&7V(}uiF!HYQ@SJZNppKvsxMeGi!`y#D+_rTKQN@`CXEFF;qF z@OF*szL;JzGOCwIjFf%IqX~?0g?E1_Rkq5zQbBC%p*ikJ%`NfCT_^y z5lVjxvFKVh)Xo%M%)b9{n?)^bfZiIQsm55$6V60x=UrQpT1e337w~;EV4?3nhQ-u1 zIdi%HIIxJEA13hd{@1`G@0Xz$p+|1%9vFD%6e(VhY`s=(YX>H&yAfSd`YZ^X4ZsjU zhdlq_T<}zc44u>V!H{`ihmK@i^125;b>=d;&as{-lK*fBX=4<*ehEXJqbK2{m#N$oM3HJbl%@wfM<*W&q~gQ5ZnB3fc-=MkvQh-_`kt2x@)6#G-Gj0kFRbk`+!Fi z`*f5cTg1=jSI~O^e4OA+%#*CE62nQqCP&@sA&<*CN^i7gcD2)20qFP;c&KCyii|IY zPYQhs9oaPH1);${d|HkHSB%D@c;+l-@VxT$K*d<#lkxUzY;y*E^bXu;pOsc#l77#l z#PO~=OrMJU7hE5t{8sLdF)llk&<{=x%#byM!!8jh7gIo~(LdLA>Gmb%>1Cuv5$WOEG!u_=?)_6ZK72(p?f~in)!*U;3>-oB7=`=yh5BlmjbhH@(YK7)S=r~U zRazc0uzCBh1=p98r{H9Zh8;+C*C#hA7Y5upP*L zSS>NCgUqH5#$AQ9sr6c9W>I3{2aKDR4a8I3Ks?nA#8cgnNFtu<2I8r1AfD<5+wOXB z7y-B6Tfv!5oUP&=f*ptQnS=N50Jjn9jN*2fG5+$vp~$9t!Bc?mB3qZm(@*AUw++6Vtm|96UqaF=e$4(8tH=+ z)uZvcko1kr*IS2q!{@9?&}Sn3rnI}k+cL`k2htnxt~V*4`7*wfvwzvny9Mfqo?7P9 z_=L6xu0>}_o?ozi4rh`OV?bgohtv@|dndYdVj3kg|5$U2+Pr(L_fa_?^d({sPvYK< z?inmn3ttXSDtH?|pAyy@S7j&#_2{?%2Ht~=WvNS7Y+Y|FcFuWp_Jr~`J-%y%@_aA# zuA<)4)GP6gWgq$(>is+N^;mCgmopk?t5ZS(%M+XzFqQe^JIKCl%aqUro{ycko@M`V z#WZ!8o(61kHhO^PUf_Bs?pr_1q~`nVFOSXbt$&Dj>Er;~A) zKhFDTj^*QbjKhlOz=vvabbZy5rY{3l!QFC;st>4%J=1t!#hKb~8F)~7?&6>Kx#xUr z14qXcrRTVbBf-rvC9y|fdP^B$Kf&{LoD-U6QS9$nm7At<*zR*&kASOv;EGtxs1OGjjva%}gs&47w0} zr70=?w9gYu5&XRazOdI;aDImW73SKA9 z6S`uZvUD}{p0A|))xh`y&J{U(6nd|teCFsfUWj|gc@yxH2Kex7m3U*!2}IV5?2aGT zjALUC9dOR+vn7a*Wa~MB+%kNJxhCWwGZ;T7!9y+MUvvfG-BONmHTiuj=K{p@dFro- z|JP;C=RSewKX-x8mvXL)@Ok+E|DXAM+;h zYICfHCvAd1tU)$D&3OS+P1z{&@I`p9$V8EoPY0(Ovhi|!V6fgSaVBGHT)F6)q8CTd z6E5h**k?r7q2GyZ9$PP%!(1Y&Pe*l@xM%)9u#nie@;^2*>~rBD`m*p7(Zh-ALOjz8qV*W zLmT7Ql$MK5jlSUG8q?`EpubByWxZ(dx=1WWYqYJ=GbUp1YRUI4+AH~%(cV_<6#`QP z*aZGMqpcU`xspD&in1hMye@9cTT;jMq1SI&xokvp0zCRD>b;h99Vs>kl5x8vgEd0x zmU+hSh7=ig8ENYwi}GB~8@HajN5>MUi~S!Znb|{^?yws1 zd&L%b%9>#8;dvb&(gtEL#Q!7vbL}qIm~(O z@RJR+t%!MWUXsJUpM3HEc*%Dwd#-ENV*_W7@Rek>{NaiJ`uLNH%8d!hN^>G{sP7GY zr637-?hAai;Qqi}1#gYf%I_YRHvQY>2lWmt|g(4`>Mv4 zcd5hu?^#K?zsNdwO;^?n=sA1;V675a`j#@IMDoPHNB^(59lwO%EgMz--a~2S=N{77 zdos)qU-aCIOusL14fRhfxI1udftF#!bnOw`6n`*KaUbmzoDE_9K-#{KIzp;)y~u9) z$Bm6A#6KZ5?E+34&kaF}AV+d`QKKXj! z?t**AY31LcUw%x#+-n)(d0w^q@3joS&@Wx|%l-6AkDB2>XU*_ujQ{tnuV34h^-KEY znAtD)T5c$jJn`?PU%o)U+_z#>`TgHWEC2d8v~uZJ>6ciae4ReIH*no0{gPt#%P{&y z+J3oyNi_N;{i1$xxD{4jp3A4pJclo-TvHe@B|yNWFXdug*fSK{#{^mM~hM29C{vc8&qbx&uE8@fGL ztvZ1&y2j=$d@)fOx?SRP^SqdUo*Ao6KaG4k%X#ugzC3Pd8glEUIr!1#a`wE9v)U}l zj%U6)YWj2Q)5`a-mbOVv_N+OZT_C#hS=O>vv6gk5xrLlN4ov2hwm(%yjmEDv2c9>glx2AG~RVX6YAGr;st z98A)0gK;Z(l|K9ZFlD&$?VY^lM8;bZNqDn-e~)h;;Ons6tDKF^sbhUG(bz{<`)K!& z+DA|IZhthOw7lC*I;FO}+oQDn{B1|ok4`(j_9Lz1*t@oSj=gI?{$KApl>d6ysl5Dd za_!IF-JIDnw72>PLsz6LH=d;~Ia_r37|wO9J@9j@@^3#+R1W{#=u_q>`5Wj{OG3|C za2Lr@Dz0S?^0O^V3y!CGA04K6e^m3=%Gndl%YV_bwBX;LTUzkTeM<{|XC2c1AFnJe z_|CX>{~vSj0v}a%?)|Sl6Ox%s0tvYi&?HsilwZtGdkdg`gOzx~u}-3tnLogH}U@Yz*Q9l^HVa`v@P zJ?KbYx6R?`lRxvplV9Go{GqwK8Xc~6Z>5-#TOCOwGaad(A2`06Ws z)16!R{DkeB&KYJdvf&mS?*b=2zt`jB_e%)f42-*6{-P~{Gs$1{DsbKmoQLsCJO!M$ zIFf+VX~TKT$*=5c+%#|3a)*1}Tj{6aydF3k1t;zOZe+UWNCeJXW@m&Nf%C2Jx#umv zErV!Dz2-7Sze|FCfwS&N!bNP`Y={SrgmiyVdbYnPV~oG( zFus$+W4`@VW}d(30pNQI_}&7KzXQG_#9{K~H7&`Dzqot%OZ z8i|GRL1;wy7mYk{9djc}BQ;~sNF&Xn5%Btt(8!KYppjVmi2n!ZqYnBwy{;EN|6wR! zi}Ji*6OUxS+g|)JS@R6#cdg4&IQ;!Z8TSj`~68QHG z$d{$aq7lfJ^{mr8epdFh_119T0pc0%S>c#>?~0N0)~`sMw^Y0v9)1J7oHfp@UUVVJ z74jz7az(Nu+I|)K<(Xtm06L8=C%G33Lu~s=pV7YL$6kB|i(~K=#U*EpMdU}Ejms-y zd@xm|#*?L((p5~wL`NIh%h${pydz&{b4 z>xMvW;XBUre7jT6x=Au}{nX@nO9Rz~=TnF9c|+jF!u6Ib@ujmercFjSmz;iSSdu6C zY}P?iP3k>gO`moz`tc0`U*WnHN#NT(uO{FvT$_5<1=WFxh2#$oEe)Js_&>q>P`Co{ zjsH%#*0aBy@t+leZO-R~?XSRgN~XVi8t>6DiI0wn-}5p09!&i>eUnS)Bf6Amw-!2G zX<5rG`g{&!^d;#?(C`z`z34ZUeMvgocj;Lv)4I^-`#67MDKz{CD{O*^PyX^r;UKGF9Xjv1|}6g27UhweQD^t2}Ja@XxrF(^H-j*mhE7iXX#vs6OLuo z+)uLnzxW$C2tQ+Ok#qPaK`(#eS*-pg`nT(%j%Yv5lTJlHeojA9KGKH~K2J;^cG3sV z8a~yBcj?3V^x@mkO>7^|H21L2?YS3i+wKRr+$7Ub1r+7eq0&p z$Eop+8N=xK-pd$%aQ&}X6LqwH2b;Qofnqk^Dx`=zE||93GQS4V{oK7v=E#VY@c2~GOK}a29B|xxRv_?&aSlU zfVZh!9oPNud~a8Mjj#Wg_@K-0^Y;hvBiQAv?7q#^lgZkngWutZ-=ZbPZy}$B+Ay?% zuX*cO+7w?XUg$i;|MPuLH|K5o2fPbqVsdVsL9W{g_{2 z8l!a?+9)AzPU}QhWLu4|KognZXd^x#t(U9MDyy}vqv*eG&VtV3xym3D(w~NZeU~~^ zCOzNmT!jw;9PGq)qAe?Q0c|zK=k{NN?Xs@MU5bUB>v7 zFG}fD<_ykRp9&&cbD|nLVsGzSqQ#g;*6VYVoQ<_es9%0gF@DU%QF=BAlLt<;$XE*;?VyIi}SS@i!)^y^CcH;aB&(*KIq56j45 zy(WBSj^ZfwO$&2kC2I!I(fsia+GdTe*+Ja-3DzW811RVE0^>oBltb0yid#Fud4L$x z?ZCdBHPSm4o1H3;|8A?+0<#%M`}+?g-*b(AUlIAfT;F5o>-+2IPxSjg@VyH^#6EDH z2(7rzbv^t(`feiha+KIht#|Jx4@);~chb&3y4L-p^3(qDp=;f}#El02YFaa?e-ChI zU0dZ3%5 zwrMjD+7w;-B^#kh;(Ff|9cnIX-(VtiSPLD#10C*W9}tQQ(PLfbR28>+x3b7AWt`q}762{y-<^s7(Xp=G_R68*XG zyE4CzvBQ9|38RHB_hkerB z#28GY?kecu0QfLG-#61VZpXjv0tfVkJs?wpUaKhB@)P1;_5Y491Y5=`#v&Yh&4Y-<+SU%+TU>$ zn@)E1`@|$1fey;G$C&uOZsfG!Ux*$&z@#rWG~C)XxN#+Us-#bFmejHy z*0<{Urf&6b?f6eKtnSjab4`caeCtPoyN+=P0E6anE^!sB>BqL3fuE)(gBP@xhDOPq67i z_K4GNuOp}H1>*MPht4KO?;+}JXI?cEqql?p>>{1M92TY+WX)dYv%{oD}L62%y%OD(M#_b#{A~2zcS(}VtCItG#B8l(sOwJ zq>()J^gr_|xaDlKkoUyQU5%Xa_yW3=%Y&SS?p4kYP1Ps%KjVtL@8rGsn7)ga5iLnB zmb>kEtbt;_)7;{m%s=+S4~VgRg7YLpnaHRtayn%1ofIq@F+8{p{k9pJ&PmN^s9@gs zPTaZ6mteN~lFiNqw;@Mp`!V8pC)L&z*1D4#DrvLI%6;7h4SIp07#iPY0?yjzecbF#^66Yg%TR>YbwdG|W+=y_TU-gN>r=cw7m23c_^E{*glg`sn7Dx4Aw?xlHPZ6SIEH*(1AwFF^e0yPv@y z#S@z&>!9L!jODTl@ELCV)8|n=F~ln8&gb06o)OFHk+p98_!Z=JT#MeVcj{{w`aml( zdmVB@a#hd7gBBp);IZ>r3*S%+kEp>G^IEP3uSE{H!*~9o82zLZUMM?2Wdq!=h2HeN zhx-it_bqoi=E1LuR>qqmo0_kw3)s-CF;&-y>tavXY;HWRH;7u3{v z3&jIPv)df*zI^EOIq)Uk7Hvl|YYgoNC|eWnFDjsXg_UzbHSI;q)X`q9mBD#{oKwo0 zTE)@9XY9VymXq&&c3WZoD!Nd(Hohfj%GH=E$`B)5Dm`*Bjuy>U1;biX(F_ z#GLcgR2M1-sqpKM%((*a#om)XD=j?d#`Ax*=$z|Xu9|bLTs7xj;Ho*dk88oD!ym3l zNhL=h`UdkZKQix*1M7#*6lCcLY~<9?2k2>%rKWFi27AIfdYF$kE|44VI!A{NQT~{7 z400hS^a6blvj)lXM9Mt+_rdvIaHp|| z@+{H)ON{0EVIzVsyfK*4Ale6ydEoFkebt;6-T4@I>Fk;dtM(4g*E@~Bc!%yQz?))5 zbzjZB^1SGNE%(avqWf0v(e3Q}7q}Px^?V=qnp?W}a_99*F{Mfbu|~zYl*yE&bIP=*d_;L}?6`4_uXuYJS2Vr3 zgO^42Y)9gOfeBsxte}^@`t|7Ql1J{UIC~#ZTD@IQoA$(}#i5Tk^DDPHVr>ZR-HO@~ z(x2^pHqdBzZ%sHRx7eA`7rXB!x}WCRbnHUL#uZ=EeuraN@EvU7-PY(n_|t~>(V>r` zLmwyp<^aEg>`(fDeHWTr#q3q+W$)-da(%6Xcl6;yjOH)4^=x9m-0X9zuBk?i&*&RW zFau8FM3GPFO*I!6UMHFpo$~uDxPtF@s9YKHWu@@N_~?HZ^s8@6N1FN4L(5b8pQ(XI zEXc>sWDTm~(28BqUzdBr3!BQN^D@WRpdW}&%YU&h(pH#P=k$p;t3CNWYiOs0_Pd!g z{mvxcF|M1Q7~GgU#@uA6Lof&r_&t3G&_&((Wv#-G@bE5tsg`e}dIRxRw%*_-Cn52e zvs%$dsA1TTLI<;zOj&&T8O{;zY1T%kDigIR6sLu8s6T<6`5A z@>RrT)Klk5>Q)baYJr?F$B^QVO0iedfF)B@W|;TOL{ z4fy4ZvM%3*7dKUdJMqxP;=htJz|jzaO+0j)Bd3r4oWieoSrq?K*!4{n^)e2rQP@RK z!j1Ob;Ir_FSIRCBt%<+&g7aMZt~r!#HRXH&A6Zmc(osjw*%J2ryxcjsv8vJ{=IgWV zg=?upz6ka82-gG7F+PpGZ0OiJJE*f0JgHvcO!kQS*G%67kFcMU_7jOU5j}Y)yqJ@V z%#MzeJw}X+V$rqMp}zcuD?Bf>1D-P$y?-@V#nWluso!W{g)8)4nPLJyy5I04evQ&e zHqVRtHSGN$&wZL-!LiFI49{C@Sd$#K~IMNe?*_jF&*`lJmK1TA@MDi z?T<9->yM0_XizfT7e7TF{A?{_DEn^paGO5_3*Z$YbW-t@wT#D$8Xeizj@5ubB( ze+W5c`yK^0&b;i>{)RH{t9||fcwj>few&&L`~wqNJJI+0Hu_yH-|eLjPJBHpEoa{v z?gOjKiYoE*)ZpjQSjg8?wI}T7Ir%(t2LFzDRzAL{>amx%`rODhXsHT%SYWyOHnZPT zZK=(bk>@LEr-yjJ0DtxoH_dL;EX)_c_u&5hm0GvcxFG)y=T}nm&u+v=!LQPh#%h#Y@g=8UtfBfKO(dK z%X{W-E&Vv3FGnUMnjg*GKls>9_P(lN{++^i0G~?bzY$A~uB-ohk<;rLm*f*AyF8|4 z?(kKmyY91Go)Jmp0!{P}Y_mMIzh8Avj`ZQ-=<>vXH+WqBjy7wAXFcCo%#+8hB-ZYS zd*l~dZll+y(~--;$4bU`-A_td>%nKWFUgB-lv?u@)6wAb zcWAtpB%3)+my?G8-t{wh)XV>4M36HCo;ZlE_zH3A2i++R@VpIe`HtYrJEsJ@v6b|m zXHT^Cqz7M4O$buQ$R=`&EWOa#>P2S=)4^#!a#Ze|`i}h;FVD&O$ah-rk{&{tSM0js z@h@*a?;~Yc`{JCJQ)OOh3+B(01&=iS|uQ$cRS z+R&H0f$Q+ATge`kO)BxH))QmB(sI=Xev-_Rt$2gGwu-pAg=75#x5x%z446l!Y?))S zweG#Fe{gkfzuuK&hsb`Eee;(R(-`~y)5iH_$__Hl_2dT$d>!5qiC=F*1_Ty`_y0en zJwU`J5Pz8}*{nXgN=&NCKPXw@bnJ}g)rscRNrwiH;!BUl-z&$?tK=q94uyI2BLIE7 zxK=^WwoOGY7Rhh)+>sGJ3q;7Qszx9_iEhS$OFzj_|eK6hL zBe`8`pc!VyL&QxI2gmbF`+3qJI;b%h8u;jsfv=N3pzm#||Bk<-4*uPA(%ITn$v%e* zO-D29d-eCq*TG)w_wU^GXo}U?bQAu<(TroF*|P5o{*HZaGkqO%sSX*Zbpz&p?O4tt zwk${2>MKmgxA=x}8CZ3NzeBVpIM%KUuS@u8zn*U;=N;rtT@D<=Df09bPKD3y^xJ$m zxb;?*2Oc-D=$nf&SpV0$tG?kH9S8AbeTUrCeh7GIHhssQ=lM;*qgZ^!49sL)RCgZx zSE1K*&D$QzOY@)qR;DqZy~=^ z_&YiX9i@l%-CNoJ^db`+be!F;^*-s3es~CcWJ~?m!P{)c$L@n=kDuC-ef|LN(PQns zzv{Q1Hvy}jtzyli7drJ)ZkKdop4D@WmW%oj4Dh)kYiy#a*!{PHrT#1ACjhss;|z34 z7M03Z65fBS_A=~#=K2JW=IjAK{iSWe5yr6^yOYi`W)&4P77C`b+ex<2Nc7)-e3k@%XC~S`{C8B(jz!{f_fa(63BrG%=w)4OVMC zeaW@b`kq5>)B$rr1AD*G_wvRmkHKhsK2g0ayv~}(nQzf=r0Z?MKIpaU$m|;jzrIm* zFbBM4qG5OKbMR;HMDpZJOV3K-e+4tOcINW2=sQsGPyT@bYoB>n`3EMD)A(G< z_cWBWzWJyLa^{My6Mhg`x9iRF4;)ALYeN4jm}xqCu=S5a#}_w{>!Qqb8weW4-kKVscg~ZbG{AarS&h`&EU%+zZZtx%LI*aI|lm8Jk%A zj^!QwgwI&Bx*4zHS>WLd;r-gjIdgCXcu@Vqk?<9*vz~Dgt~Q?-obm6R>!vj#mC-y9 zev83pH}>_3JIbM>nb6Y}&=u$2Tz?sPCdyi&y-(2HDnFj@fH}L6IoqQ7!km!JqPZfu z{b{<}SK4zzPK)WdIMVZE6)R#W%bNLy#luV97VEjEo&7+l8{=VXQYqR(ij^LRMV zl=>fSi)UNYBYo)vN725hy+?Q_|Get?3-ug<=MT-}u&dal$F8Ni*ypMJasm1ve}ggM zd|5rredRw=+s*c#vXAa#3lM|RIycO3SIJ(*Zb4Se|F@<7fo8@pnzJt&N20iM#gWvR z@IK?{^Jr{|Y=@!G2A?xKqxm7-^0Be}*7sdr^GW{Kkkiq5bE11o`f~#D_8(-(| zSbtlfy@d6_z*zslI%|04l@rX!%KQSj!o!_$22PTc&oc&v+&K3XlVun_4@q z;Qkw5_ji2r9RI+#<|^g^UyDa;me_5^G**wT^w#{W)SG0|yXsDq?K%%U5x2ZLyN$icp7HP5kk4_-B)Cx?8|iITdT5J<&>X!Kwn;Eb=Xg&Y6459^wCSG5OyOTU+_&r?Abr7KC|(JKJN%`~OQ<^tzlo!pO2hAo?WO7tACOyzyxczu3KrT9Z?UESObm-2r`a^+O=8sY;< ze4BocaKNjrRL?=~#j_`aw<*;(wvPiJ`QR+S`h0$qY`%RSxjZJDlv&#%y!%T3c{bk` zEKST6?RUEpy8AhFxBiR9dp`G~N$jEdyEQktzUv(Bn`2VCrm_d#&v(TRQ;Y9q{!4sw z@9gs%-gc&Rb%PWB?^we3P0XV;yq8^Y5V;kOHy&QQ3jOO~f|;(oCciH^uS+~qWfxI) z9r#gBkXg*t+0Y}o1!^}#_u{j;$Ow(I;uaD)kF=6C9(}jI#@D_Iei4Z9Xy^X{k5-Om z@z|k!P&5_G2Srn{d{Df&8XD9(_(X7@#9UyGp5lW^;@y-f9>NE|04_u`u`*2ll4@|U zcGcRPb+hvtB=aTH_9N4_oE+Tve=O6$*Z*H-j`)?kPVq)@#H%YuC%I$cTa0UJB0K^9 zo0=HmTUE#d%}WpS5g9Zs)R#qvj-=Xy)^N|;3HRg-!q1Z7XTyPgA^cx_>VR{Sk67mEoW&9P8MJX@ z_0o39nuE~q!D;v9+*m!ieK_TqUp7DUz^kG%_y)?0|B3cqru-@XN12sT{s%v+w322e zM)+9|K6LT353c`r>=X-o#ev=8Y#kB9)4bLQ+y1oq82tZHXv7sj)*@HL(=6tmn{n7r z|Fn*;v4Ea!8M25miUa53qml>rm=O(+f%C_~^suJ*R4fH%p5 z)UPpioMAVyVq(J*_B^M$%yh?mf5$Fs*sPTfN9}5S!f(>nUVe8`r}h{hgzu(1(rWSb z`L3PPl(W*r*FJ4!b=@-M`#FE9xux)z)wdKjI9$pO+Z(gev3<~u@+`cCAv2mIX4^8?s+^&D5JhLQEnwa$;f1As@Jl*TGN}XGtD~N)_6PpImaAGPKE}TA=$>%RYvly4=Da@XBeGXqk$K&n!g>?5 zDf=f8dF!&A*c7hXec)HRUm0`U4Q=kHkHygEcH~6@^yh;9+|b_v<7{Yv{_cVPhFeb0 zW6)m!`kPQ4XrEkN&^`kCqwawe=&z6cbROvM2IwyZnUf0rJrDiu#w=J3@7Fq^Vte*d z=P~p);zoSuT%bH=akUTN+n71!uAG}|t}lGB`uf5*9r2#$Id?U_;+%#iM@H>J=6oOKc?Is%Djz_D`jwS#y0*JZFvfyttUR& zKJmb>k{FZC#9VmbL!U?XiJqc1fE!-)S+Y+&Eh_soZn6Pf5q_2b?%;g+B3`6V=y4q{ z#`!x!T>l6S4c)Ihl&8gggr~)w=4r@=PvU9o!2eL5_6qHjCqTCH2HDEefomLb4HeMU zZ0PDL&Zael+Q(D}2_wFT{KJyp5w#a|cE zxABhnT5@0c9-1n?I;?i4mC^OBDR<`_CQr%y>Tea^WimZ?LEpcr7~inck#5uX^Wbi` z3G-EWV%L2CISmiO7ayW8#An*{tundH9b&R=`c`@NK(d!6-gh`~Gj-owsQR!)=7@*B z%~;JZxGwJ10E`WShB-(?B&_J_-f(T)9-;kqWx3O;auJi@@-;Xr8kK)$i4(m zK~CDX{d&ve*`Kg3rwN{!2z`5??Mt0(>K7~a#GabpZEO`-0e0AwO>Idy*A|swoJN?`MVsMw45>6 zRWa?qCX&49l~EP*)4;M z!e=I1RqNmKov2OuV#EhUmp`Z7{ssTh+NoF=cu=hDlrAQ|6|0BImhXn9qdd@4CA&5b z|AX6>ow83QI}?$eedu%xBC=C_E}6E6!{emCErFib!iP4%=YsILpTpbS@HwxQQh6Ty z&6}TE>BYw6|5Pvil3sTedhu05bh~8e{k)3v8h$3-j`^{GQjRlPeDAP^l*(M$&Hs|GpVzmcyeI#!buiIK{=u+p?W0eR0dF7k zs*7u^Z0%q0oUJzwm8~fs(aBP5olN!uF@m;ijn&Db_Cg|KG*q@`kOwFgntKVlmTZ;$ zpXf+wSPISEh@G|=y88xnckgXW+wa3hlk82hQaqiOvvN4}=gN<(bVH|(I`}pAnk!>O zrOx>wwjl8alalHpx#}OF%Qk*4e&g|ZjKyY}jQ%$XpT{J8ACu4nlcC3Y=Er@O13e+x z_E|3~Dd;N2cd-~)BxmLKn8#dnAYZ4sjCU;ex%flm_ZZ|G`91bJNBF*BIlJ!SyKnMc z6S7l&59ITla^~pRE5|nMbvnDQiZdO{`F^D%(e`_!!*iuuPK(qjzlURlnT{_cv==zU z15~z@x^A?b#3Lp87Bd&#;I|U}a}{y``X+x(nymwjpuL0mJoL@9!1?Vv&?_X*MxY1$ zz#7iHORinW9O|jaZx{=WOAm-cmfeLOpt&erU<%`~4Ee=AL*FyVvTfK`%h7Xx%3PG5 zdkcE*R7aL)Ds%ggEf>$M=Vp|CK0UXY{TER^_m+sBOKvq=515QB$hU2&1RtAJLcfXV z0ke7jci`LOObz`Tdcb7lMznvDWs+IN{64Rqo6lP9KP*#HPy2Wd43Q}ZhR77HA!{zg zMaHC&F)6sRpkXy*vbSO^I!1EsB8^K#$G97wxsp9Xw?GF`U1Ry`2Xiz=!HB-m9?>_x z6Qgh34j!kXZ$!uFqc(8Z7Lg5{U3z94ctMN}yb`-Z`o;sczL8j)@ugJ-RoK9?f1~=w z&FF8j`o>alKQ5wg9CA8seM4iU{;H2reM94@F^uXP8n>1FPU#yF+jnV1-_SbBne>f3 z=HMsk8$Q~tp-vwh>4bLkM(!O`e$S#%V+-> zJVg4c?E{PY?AbRk41O%1eG+oM61mz0eqXeEK)GzXbZ5IpxP_-ti{qcXe(==Qvg z?AIFL_2@L;{_0eI=e2(ueMWYNd}9vy%6agJFCqVbj{J8(%TD-hnj-<G*h(L-5|| z*F!H+xTEs_Ey{6zg-^N+uG-C#{>`~O;qmIv9!O9d-c)9|J&%7&ScPe3&f&~e2Cbr zeQ%ng!9Dy=#I7jq_0?wZ_w z{EzdGGw%Nrda4`i2wFyG@r((Ts(#|C{lx1iwBwwz8YWX8 z!e`T0u|};k4fWk|_UaIaz4bxx_I|RNv7G1q#BgaWE~bx)FH=qg7kdE&+Ze_~aP`tR zLkGq$Q*<11KQZs!zJ^La15=5silTH4KFg<00=cmE5#(p93 zaoU&gufVH436yE*XPoQ7gYfzqb-5WU^;n?wPR8|t_(&1WBwaayLeS<5Xc?N{elAHOT`Q7*o-yIt}ntF+G zhW-?vruflRzH265QZw(3e{_(&Ta9_-I}giB z<=SQPDAV$Y_U7;IY^5&mDqm1_6r5=4yoqBAoS0Q?)a9=&1!Ik>?!YUaQ;2RWFC z?aXDL^nUxA(N}J{eR@W`o3EU_K~Gcm^_-alozTUfm>Zf)uOnx7lPjPbnwS4u{r1yO z<|cEpZtkVrGcONyAxm8B`MxIfuX8i!yB^+n@8dTqW^n7CaQ=X2(fL}?h1hFdyO4TX zXrncgSVrnFzrXo@U}$iiC=R-~$EVTtG&!oZ&vGqr6tMqX^I!%!)dW+^_x=6KDY6zi z{G@lQsDCrx81`+L<^<>`H}tFN8bihP`dh1r!(X+0a3lD6!ckqO{n)P<_Dm~>%(u@n zjTbw>v&QO4e}`nSJr3RBc+!^&Hg9|$B6iq>c-S|ji z$1QsQ=WLS>FQ4@j>Ps`oFZOC4wQbPR{joM@G#_NI&bE9lA!xzptg~FR?f<8`f)4(7 zME_sw3R*6BUQ8MBMOTk2XecAuVQ~(29%K4d?ITHs25R=^cIbfE9B^$W>p<?3{__W0|}Ez?j7e|1skk{SNi_5A;kmE46b9bABntnD$uvDCk0)q-}` zFqU;=H#f1yY|&1DcF-*vl(Xg`=??sq3sNwPXKO4qw}^3X(A-L7&Tw|bp=RXnf>GqA zqmD}En93zG->#vY&K;z0TeA4w3=b)c)Wh2Eq3p#b=tmzLwPr!Py)TD-hd+alQQr&~ zGTp`Zfp{~Z_p%Loc>j~3?}ta;V`J$3kA}X_jJ)T}PrZML_j$($PuI_xi~kE4hvuIg zNVvRT{Kml?_IpI{pZ-^xXAUjK-g6@JCh%iDv5(wb^Ywf^_;Cgn^-th84&20+K|akd zC3pN<`1XQ2>kxcvenOJ}{!)|BU`#?+KEK>N=WEWC9W4{GceJQ%Ro&$u)R`+jr~=<= zN50p+*p;jwhV*|4{~P0I$mi!w@wZOM_P6T!D(a3cw>WA0Uw_O`@4CqgVaC^8r#`E_ z(iHRLbAtOafBOV}l&MwTKA!K?yE^Wj>HfkA{7R8KRVkO>UI#o?X4ov%F}{vmGLDv; z@{+9Cj@~=&-;fkH8(7{;`2G{Ko9Syw^X@ClIuVk2!btUHpDL zX3Fg6`7Ims27ILbfAe)UgkL-p#Xa*NIOzn0dSor_b1AOCy)xH870$v#*3Oq7sdtQYq` zXMQp0A_pB~n!vS$embCqar|a-K3w^q%`YxD&KKV>1?zrd;zH-3yFCy7OZj!tj>e-j zuWQp{ex=86-R18t>k=-_yDRR`=l9mTD|V^Ax10%Gs#7+wYr^j~l^R!QF~7H<<6PP` zjK70%8OLt{y1GBF?Dig>>7Dq=xp~x;x8jd)fs0Rr?|I(;nL7Q(8HOXK<8hPx_xIA@ zrtQUtCSh+llKicg8CTck^zmCFD>8>evZ$iHP@hqjdAU__;zek$;X~ILxX%1RGUNFlgw9p zdB$|9y{E z^j%$dAYb1=pWMv7pL^+1b9h(B+1@*yR-JB_@+|OgCz6v-NEjQh$F}~166nWA27rGZL7HMU4vhqc3k8Hw~ToenhtJ{wA-Cv z+6srjBE4L&{455R$0D$N8XP|Yj@b6k)OPoXy17?o+Irvy&YwmuARBh%aZa+o$hgf& zn`hOLZ(_WeW9!zOXVgpH(>%-N(>vF8d~#m1O#IAsYtBg5Fl&y^=PAG<8OY(6w>*Ts3Xn}NO8 z6Si#&&=CWCV;Ez>74@C;P$qVN{W12TM)%cU0G+w7FnObcz2p;m3%Y~1Y~h;? z`QN?(`@)c0#dN&MbM)2@Cv{;%`!=BW>-&A^vZvb*?|GsvyZwD5m|y6X(&=+e+_Kfc zXq{F5lA&>cjV&`k?Pu{w#f%Q&ojwkl> zTZwMyJ#Toha%*z1o;vJ3PJed?e~%q3tT zplk!Xd~;Gzwt;=0ofJ%p-hVBLb8;ikQ<8(Sr|mLdOAcy$?E5Xr!K4%6KB{kP{?q?J zC;d`?y%~wY7wOj|`c+9TN$BjAyXcpne#OzRyXe=j|K)!moio8Nr#|&BpEbparu+U} z%hPum{j=ZW>%&e?7oFZkKlSY9c(b)I`i%bS*{j3CIeG1J^jpu^6uEVBv>g4{v&P6C zV!Ir8&@*`5*74DD;6u;m^DH;|47}*s7Ic=J=ri!6XX!i>UhVq8lb+p-KPV;o41DR? ztE>yi=CaFyH$6K7O?C3KY4cx)Lz`(n*8F7qq(|OuwckxjvEPm3oyKQ6^`rBLziUjj z-|cb6;VT&y0=`C#kIPCA!Ed%`9<64MHd0=^tc3BIO+3B}J}7?2+?j8;ku5(-nlF*> z#iL^BS@Xy)b9`j*)={Rhl<$v!!kUYGNP}xe?Cfc9weEeu-TL>36I}@`j34$xG|G;b@wyh zgZsugg6Z(K_A>UG4TKlgb+1gCm?u3fA9%v`OaCC6*8uGH+@-Qvb zosiI0mTelNWqX_zZT-$+K{HN!mYqS%FFr^v2WNcSH#x)4g<|UG?G%vYo{P1pDjOX<5IEd)xku$oghyQCqX~5`JIccX3Mgq<3d@4{c}n(J)dES?13gD39z_*%$u z)=D&nKcY_I!yZfU0nKR4!G)`>7F-C{$6{b@83Jp!GZB3xnO_n=Ph0OStK;xIt7DVu zy~gSo{CQX(vGoCP8LNwIJu$fP=r#XAOh4zt-{EK#zugY670*>J3h~_U(8pV#1No6A z;{(_p!*hGUZAOIWMrrftHQ^W_;dwf=?&6mSKdOM|7Q>I~vF*fnD}S6A?1C3vSzT@C zwS{-tJhm!^$NsbCRk)6=YvJ1k)R9XaEzpq7&n^%1v#7r)T8H>qlux&Z>)1LJjuzra zY&dTDY&d|^p1Zx|Z?MPdl~KWm8Jll{zXjrv^eHMDrnL_AY6VItq#ycu3u5WR;- zPKe&a8^=cP;fdmLc6oTA{7UvcJP_R`d=Kx-jNZfZ#G8sGi^%`i3vLCA4>(-NH7|0l z$&uJMc2)hRTcY25itJbio}**sgda*Ks!rEyVO)C{FZt%+nIDrK>ZkD0gG~G+IK~5q zWTee=BQnyykH|>-9(+hf+V>F|Y2Qa=q7y#8+F zqIheRZtZdHOea?Ixt5=iYxDoccnLQq8E-aZl0W`5ot;tl0GFq9 z4{-c{L-!Coq8l?M+m>BX{0gtvg4b9dr1;F&KS1swW6X&K^=T*E^)JAuqrOM+$Jp{( zGq{yGr|%>;cffz@&<9niVKk3@5WOCFxqgUQ)^+&Uy% zq@M3xNu0TkOhM+RXXJ4{!^z#9Mm)%n*U917nV7^`F}#=V+dXaFCR-O~jZ**j;HQf6 z`uAlstO@&K?fKI)24Da7OM8%u-Lhp8>MlK$jO~aI*KIb~xn;?Lv`KkG76FI++lo1I z5trvWYf8{!9bpmQl_6{5kW+e3++C;#-8}(a0({u{%zMzw3-ZX(%ibTCF#|0p7j)9E zt<5K0oh{(@W$F}fexLQH+ax2*eBxzKN!^kKx`C_~6lh-mi##zkNb*FYnuUr#>5SGVENF9<|$+4GeBv3*4@_vH@?&Oy$}d_z`81TOBpL zbD6RMTR!KX#XK>GMAs_s{pWDmKE}qam<1dy&_jKinSs6<3ex7y%me1h zvS#+k9D|0ARh|_rvBx7zv4*yTR?6SEetBO(Hm(u_3Vqw=7}Zq8B>8lKX1Lwnj8javo>aPGx;OjdP;XLc@S<34W=h zyG{R+d;I+eoMwymyAFrv-N)YTgUrdn_rZtTfBVm#yf;VtU$?FDAIfK}+E%2_yMf$E zyV3iVQ}u0foNEoG4Sv0iy%K(Wj6b99gU+PTp$vcFEBuNbDYYfQiyvx6@lk9qzS}?@ zJ@5x;ci`#%!HsEg&H-rg6&JLwb+)6|l(eStPJ7CtIrr-5Kd?78Pq1@}G*+eC{ugLD z?CX%NqVc27j@ndm&cyK2Jo+g4B09!qd2%yjt+k?>u_oBNvg#KT&GtWDd0TbAm%h3z zPs8J^4HeVJDrhSGMAZYbEt^;mf17>;Zoj#o*u|ZyN9!6|+haYKb(zzA(B@;TIk{3V z8*ppAOlK7Ftv%nYuN48~vU*@O#MO!a8E`gK7UxbUmj>T7!<(@CLN74qB$G5xrNihf zgoX4gKul#IEE6{5Z$AfKo)$_FKVy!--#6qpho3ok=8rz>3_m-;97Ya=%k_t!_47<} zV0EP33gFyB|Dv#aXxpy0C0zdJteL_S4)wAwBiW#M1o5H*YfKS5Y6CXF2Jhx`f|WbY z5wBY2x7Kco<)`W!YsDj-d9pnvFI=HtCF56f#_c@3T2iqRCb|zv=KZee* zk7w(#rQ3Fx;A78R()##gm$tT={tJEi+5>8qg`8)%Cqp-_YrfXcvtVe=nEr7V`Bj0P zSQFn;=6ArhO~Zae3)(qx$5pL|mut;9&i31dod4WUPTq$1z-K>ra{;dlSaYrXnr>jE z|Gv$@DcDu7;Jp@HL}}cD{s-6K8(ky3q!RZM7wp9^u#dj%$1mWD^hG|;${u{7cKF`sy3S!W zijK7QY_GMVXNL2+JbC zaXM|=`zW+V{zo6@du+@`ev3Ege7o?OKz9B(-@h=Xe)wS(^6FNHbe+?=K>HW`#@>q) z%V(&=_S`Mu17Oa5T{r{@+8J>zts56 z((W~DE-BXBeH8sDwQ18zcw8@I;Naeg?_T;Ve0WPQae@K*uQ(>Hb9b-Vaf!aKim$s= z`uRtF<>7N*qW=6ZG7dv?j7z6bf0Z*<^89+nAnVi^NM7sQhV;-T=v}y|0vGDH=t5)h z-63NUl@ZZ-;?-IrHpN8dYq4c5+s|C|oiM4z%;U-@*XGPx5uaAOV#NbVf!hAcZ{{dB z_ADQKt0Mn}P1*-S4xNxYFtJ~JPe#nQ23Vud|NG$du8ZR31LXE+;pLOy?2GyKOmKEb`sZeSQhh|`o$BM?{tMs! z{u92H{d$ZTs@U_bWW!#AZr%%hN6!85iC<|xb(fi-_{aNPe+$p+l-@-=Zj^8BVGe1o zi;h`yuxYqCLc`0|H_apB<5YK)kM-Dm?6e>Am|~40_U#Rmb8Q_;W49OE;XlwNH^OhESH<%GD9;JVhC;u#{_>rEm65D^nKiw`$oMnC z^j*qpZ8kUEvFzB`sX@W9jrIk{HsAq2^P~Es`=Pt%rlBw70()iz_H7Z^Ro4E7Z&$(R zqOkW6k0&^k&xEsu?bx>51Wl3ag}cXiAqS2=rvumu-N zm%fs?-88L5NFT@uh42Bx|CcS#d>}{o)OQ&X{Xly&J7Z|32mH}*c=N~U=6|@fru=bz zBszZtKRVG9=F03~erI-2^Za4-9)lfa>jh=NGfpwZX`#VE(^`YS2Dt2VQoE6j%n9Pc zIfn~9oO4Khneg5O=34HPoIk)B^RJI=x-MF#03FLJ$ zDU6?V@N@e7;;63lW#MOyW9Q*A@mV)|rQ=i_X*d0r9`Q1BRQn9}Eoa{zcz=xban}m> z!){`5kwJ$Fo~+-b{q^CwThV&BjF^!VS3W?!_T0VXbF{IQHujBUJs;m*FYVOQ&f~PB z{3T(1I$SUEx}ji&>tX8O6|H|f^+)C0cIKEpp2X7RAIT0De<;3@=fkc^i^@dGykdBN z--qPRmhFj7&p04cU7<}e&-S9Hgn3Q)-Agg=qUBq7X4hAh8q6Ah`u^Gp*hxBV@PgYQtfT46Z~cb6RUQ{nTMaU>nI^Vspr0t={A@7PgUt zGfN%hwN>8aICR`$=(zFdfC<=2R`9FHtpe={p#j{L=uf@qw28+C zH`XVH<5#!A6Vj2jUxS8pHnnU^<<0iTOV7v+J?&C_rS<~3LQY~iKjfLt+vR|gxdX_X z0P@eOzM~yKugzDj+UbQQ>?x3G`7skoH^>$dOOu-8+^LVTpy_d&&jM&bEc z1fFK#OgHHb;)lXTGIb3gv(TFdbg#Pfzi`Q!WDS~|y4PF<#+`asO_>6BcyEiwC@TL; zs9XGPXx{7(t}i2ZacOHbe|8Q0_zlKFbm^e(6~Gef8+FyOhPT36wurunt}8rkip`R;=syT+I_aF&{TIedxd@(UK^cjmaKpc@kxjt{;D z`n7fOSQ|cfmAOf>NOjBqyP5CAixvCTDL#KP;i83>?W^@YtajMbnM>@G#!Nl}@0?M= zyuSq6@mpr~mVbX_fO9ZiE1dIk|B}4S1suH<(PwcyiznxvvHfZ~mwA;5*Lyeh=4P8^ zV;T2e>U3=$6*PaTYJZ(FLv7}L*v#bU3-)0%yRn%cFEhbPY-YtAhtB*MF2wwa}G zY{$1Q+gUcV>}B~VKFwx+4IA~sqk|hYP792)uN-(|?FGdi*}Ow`_;1b$61%uj<5|j? zpt0pn@g0?0a&C~dg^$JWAXhhR9XTc_n?C=~^7Uuf`I2Q{$q#Hl|C@aLVVjEF7@@!N zd^qq$`DHI<_t3WB&9$6;_269lXT&Rwi+M@isz*A8;K)y6ZGg7+Qii<>@-^A-_v$_U z(fh@;sX8|EK8l0-VW)i=YU^X){VmV_u90tIXG5sfj^ zSQIms@+WC-Y&QP3u%06M5U25MyBwc>0GSoFhsD<Md-)GO>HO;x*dL30X3Ji9 zJT}7<-Z?V|8o3TQhjCV#BXlADuf|7aVjp1(!DD`iym`a68_e{Fku$&I>>XFyO!ncr z<#(Ox2Or1|-*9-+h46@nkUM&JAwJD7@V|rS`rjU#4F$mk!;k}#UHBGVX$c(_$_cnN z9CK3W%)Q_ZXI99~HmYIGz1ZwN3QxMdB!G{`#H%bq_cPD)>gQF9eG{Y^gHA^S&x5x5TA7|GVNuDdzNzYl0$jc%gKMpImr9nynB(h z*W-^SZ)~3*8K7qk%!Li;QwwkZcK;#r#Bw$vF@@x!c=DbczcC)i_f|ZxxMorR!kV%D z?;^;o)u<|#nuaD99+O5M!zYToAYp*|4Vkb?f-A?en zpRq%BcSzr8;aN9zmuJmXF6AJ4#8iXs@S$u=+S&n5d$H@j0xXKB(DU=T>Rmo?i+W5W0HA^|R4>*%%wI#ot^A0daOeIxB{+%lxSX8sHzp5s`pPYfqw6448_VZ8SvfOfc)Q?fn+E;;Z0GzR= zX2V###VlJ3zC_QJ>-$#x?6Obv=!5 zBf$9U-0A@Iu3SZ`M}9MWsiD@x_L}9PH<-VBv1w$N=vHnfANnzKzY#c-Pa8{2{6XU1J75E#q!-3la{c2l% zCK9&AFZ42_-jIV0y-l=J5AIwUcI>&%?3>=JoEy|v z1@GnEE3VX;9jDf1dJ2M-GrXtPWrVxGu}*E*v32IaDERUKdo*`3uIi84bA2#4qa40J zT78VPW%rq9;O|s>;36zz>%GAq?2PAQ>fOix`1Cpi+diH93w)!jwK*NM<06-~{@=m( zYR9gdzLit{N#2)Rr^iRnYAC1i`K4g|yztP^M`GYnKee}N0%Lm#F*K|R&nh?8tcv8$ z9B->@bOrtDgZ5eP^i?OnkmGGjhCe0y*20T4??b?l2R`1WjQ)QNzHudWXpM2dGjY~L z{P^pWf1d+C_BBO#hZhxa{P!h z&L^8szIf$u-HV+_+^Y7^&acLf_V!r8!rKCvgXA1Q9%gU5IEcJ_;<|Y=2R1WynhWL5 zF#VM~O~>Q!p>xQ_Mb8>#(z>+X@HcFpF~slQKpiSSnmH-EZZPtW*ckM+bosL7zhs@E zDW^Cgy#H8v4&KK%E8hldLcTw*Ho+Xrov3pJ#b;(PW@8dcwI-W71{*ItH+6pZsL+2L zC0>Rx;!K#(N#M#gqx!m;Q=70yh+~~9+>bt)bx|)dm@j0y8WPuhy?vtF&J$Q$f*ekC zF6j5VIU|wSqaEb!@1%^IJ${@K+yS4h&F7icv6Zt%w#aMPSm1a=bp8d%o2(oRF6NDB z8obW$!FRJAd%~pn(7{I8{B)4GU-3Qgy5W7w_ftmrRlTk=JU_yI0l}XKogD%8&dl_N z#C!bheb|%8q%G{BoDmBL^JIEUgby$_^X>NU^5O5$e)BL4{|OAL$4!4Lz@yedBn#ou z(>1=b?cgzUG{*CI9$SAwO#R!V^|w!q)Snjm9d+B~;VIpk@65MS>OelvDTbDz#|_?L zGY85UkBR8+{00=CFFf1tBHz(Yhh+W&zUiTzN@rciEO^5#VCqAT*W6aq@8eq+d?S}K ztEjKQf@c%2{+aMjIt}l`F}P?A+Y&y-pevTF9NNwTju*gH0l3No#$L+cTf#@2YnKz; z$mI>_s~wfVUe3FM2<|iv`yxEg!1F%qtfE*u=z3ys>Y(?T$eCTt!NBbB9&++W9_j=i zw2%DbEC$Kt$h#LQS2?E?{nOnwt>m(f;*z)yH#qU8nw{|AfiF|1>?!8Wsd&CKt(!{6 zmrwT!%4?3DLmO$($)BkI&-}tNKiod^ym1;bw-*=~i-B_X54z`-;wN$S-Eu+wCiywY zr`eThDULrA8pLOQM1D)1ZKu4>8KDpOU*9)6G8&dUGP-P-t&BdwoX_0+JM%5p*Q@nd zY=3Nh`FWlA!{YFVg>yrkUK3@_8J_bkY~M~`7HuWAUe@6y2I@8bhmTKR!1*-`!KDux zdXfK4YaDakbhH3VFJqs(!n8ikJ-YsuPVxyTo)Fps_pF_ukNt%;TIrm1@ELv6Lb>R> zbHR)FsQ&)}aZaj7aq)xjH=ecaMz#ws7vrTIio%6qpY=1~!sWkbuFKhc6Z$SVP3!BO zR=-L2_40u;?-d^@nHi0bJbDf1U17gTF8nX(c{u#fpeOuK)(GW$uz5h;q+kg?R}b%W zF59Qc4#DLHF1{V;h9_qNUjaC}=u&^j3}Ea%Uo_5KVBa3|qNDd*u3JmIlmDaCJLR`! z-m!-+^M2vM%f09HT;bh^d}04uNB^Tv?}76#_wGOUGVh%Z=|;)E9^_sxdh=oIkiE`S z@(er3_c_ML`MTJgl2@aBk69iM=Zt#xS#D3FBhho6(^FaGOsdRwx+{-45-YDO+mu#R z_AhDKWj{Jr z+QB*ZrKQZ>kT$gb+pV0+f3ZeY7FyYr*H~GV*Da?0#niu;`WJn)esUgty#8-e|2L`s zo7DfU4QXA#e*pO14!0*{C3?O~ottQ*h&HaJ4Z$4ShvSYho_}(V_DrQ84*GGgb9m*A zz)rrw$}gGZ%2?Rdm(Q#pm}C3!S@r*?$4rfdzm5L%kc0drXVukOXM65k;hOjBcvE!e zij;Z3PBcX)Ib-fIhbgLEkv;FR;ihOkWf~~+Sf(jzpv+@Arl{{+@|=w{MV#-#JIbAH ziYBVeIl5BDN11W{j@Gf1;eG35Q>1gCDk#%>p((1MOzXv_2>$E|UBElaguZBse7p}` zp=-*#P^Bq)(Xu?PrI9*j>pQ;xO62?NOwm4UuF%}b_g{~E|E+V#6#0H> z#bdBi}z1`F@irT1)=i(EX9`e-!!t zCz0<%rl`i5?rD7_^8L>u-#;Ds{+C>x!#ts1M85xZ`1^zCp`qu)b@b+vqy0tho$;R5 zXCw8z9I5B^NIicwMQ*3dBfkh|f_8-d7^&|+!}W#U4A&LfZ;JL~>$UEQ)bmcHo}=M< z$Tc~&^#fD%{PgeVR4^X9?(_ZXv-rf3cJ$J^L=?-2X*F0zLFlkoXC-*IPx??d#Q zW3>Ic!ip z(`etrm)&=ut4uXKceL4j`Ad?w@(q4^zNSR$j{CbxOmI?(6~u=Ts=9k++v%tIcXx5ydY)OdpG*8?H#WBX)kF7aN;XNB;KvWuOYTHugw|H9b#-=J za~bQqv2p9y5$CHl6hF4E&7x}bL(eVQBg#P=v*@1CJM+5l?1BHC%@Hv6cF?^oE zXC9wrd~!!p;A}o8@>$O3nb?eG(`?Q!Cj;m6dos_f_&kTt*@JhQIrr`@e^YUert&)O z!2aB;@6R*V9AZdWEA8dZlv(Ss<+Xo`B^!Urj;T33+p3AnCgsaKWNDzC8 zyu*9y+0!XELREn`OJ>Ozj4ew{1M7TZGYvk=m;>1`!Y{s2JKpNIZ>3|mWbX`|b-+LD z_=XeEbN5>9St;J5q$0K27fY=6$M%8yopxMXO-%Y<^-E{UziD7c%O~vkgHyY8)|pto zJMuUm#@VzFIYTIaH1|;@bq=tPeGGUi8lP)KbJ@ozXaAy&eGHvA*5@xV=#lfO&Pbk{je8 z>Q!#M;>KqBBwJSJ36+nl>}_mV_UA>TDy#WU$1xVNmogZa;KYt($Nv?KQk;<)zv)ow zp@LDq3bfZ^~5cInK{~v4Jq5b$>%F~;sL#~L$M_JWyW6a z4N0Flp^o8vcj|9~@4v!+!vog~pM~*x#5R<{Gx~ld-}R|A#r0X=ui<+W^KR_8j1Z5e z#n--@?%QANG6nIg?Y?R+bMUTClRQV-YZxkL!M<+Ea*fYyYbIXtN0~kOwW*Zx|Ir>peta)wvi)_A zkM1I9LiY4X_R%x;^h?I+Gom*V?^p@U*}uWge#!XBjOZ76egV%zFHL$y^lN1PS|>IA z$|P?^$*|CqVD;6+!pZhtVF|fy^xs+NfiG~wts)P6z8jwKzCX_m4|?CPb;Bchp879$ z!#ln3%iQqRiJtc_b;CRJJ@8Mt;bAX)t{dL!9p4-`JmP)7#tm=v!b5Jj%ni88y@z;XSw06V?6Iydf){f_)HIc zoCiL`4Uc%?)7|i}xBd&=@Q4>a%?)oY^|XJ18=mma|0mq=S}*+LZg|4m|Bt!h;Srwp z&v(NU-u|EGhS%nL-k<7*hrRF$H$38<-*PuR=zTxvh9^dP+CSF~w=56*95+1dg`e$) zN4)T}-0)T}e2N?1>4i^r!-L-bm$~5)uly)=!xNJ|<16vNz5I8k8*X{!`x$O{_;gSG zr@P^`Uic(8Ja~rZ{fTaP#QXjP4}6N}{qb&itM~n4H@wpeFLJ{ZUU;D!Zh8A(;D)#Q zJ^jmf!-L-W&vV0Tz3_2vc+gw_SU21n=4t;lH$39y?=fz8!smH^v>P7w(&s2Q-17D> z*A1`D@zkH=hKIfIfE%9h_TO^DgWmT?y5Zq6Px~X>@Py%k4|l_B$+PCNe}=i?tzNj_ z4Y$1b^||4#Ui_MFxaEZ#9=I1jsWO*+*6M|S;D&d4;m6(ZM8GrtV{Uk>m;T;&!)v|p z_uTNH7oK#(JH74qd*I&pkGkOzuY5e>fqUP7*9{MQ;qSQNmRCQ$?S^-H=kF~Ke7Fa{ zhu!dmS3bPyhPQg(|C<}$>6Pz)b;A?h_YZmC-uDl>;SulqeQtQz3-5KqEieD}xZy!B zeJ0#+%Uk~&Zg|2we}8eqgI@mpvm0LPZU0Yhc&k@_yzYi~df^A$aLX${y4~=g7vAND zhrRIE-0+AO{zo^w(+mHD8=mmOfA5A{Uj6wyH$3Qtzv_n9dhMIvy5T{ueft|Xymgd^ ze}3(TTi*4-em6Yqg}>s4cY5o8*$q#4;rrb1@Mur}e&vRDdf|KB@Q4@wk{jOYh5ynG zPk74?5j&%e%h$nH!$) zzW-A z4G((Z|K*0)p6RLoKi%+%_x&Ha;jLcy58d!iFTBGIPk7xOrF>u+A@CV%RPVe|Px#6u|`2B8p zt#^HVpBoD8y@l6FJJe-z5dyI-0*~V{@1$UwO;;P?Db}!O=9oo3FFS0>_O$n-yq)j$%OY@lMx?( zviFQi`@P@t-ebHs@^tpk*u#B$isBE_ac$1oB%Kk`xg^dA#=1GXR6{?sN4|_RN?mKy z`_nqJ#y(XW`Fl2X5myt=I;XTP6F2tul);<>R}m+cTI(k!%p#sRz&X<#&Y9*CS4YnG zJ4SOx*dNvT@5Ed)`T}(|a27Gw3_VqF7w77TXUjELKQ(%I=qbhd9ixBmv$v@|dByOa z$Nv7<5lxE^O)+8#|I~Qm<7O;QKHP(hEk-=6&J89wzuxE%v@CD9ewO0!>K;h%L34&H zs%MG?C^UU7)x_!dUT9Y35MQ#4^4cQ}gR_H-rT4;`Dz&4!=Fc>ybN-XKlM>>{N-7G8 z8(p_+MoXW6VnvXB=J(i#=B|%%Q`!5=*=4LyZl{u%vEp%XQ#+0Q-e%4m(1!9Km2kFb zC(lbb&(qCkg7z=qy@Tve>$_rEh)pQ2JDYfBez#Ia--#XJoI^>4&Wh?RKuHJZG1(Iz z&v(Vs3$}p$RQ0E7YVIWZv8b+`9MklroICNtdm3iVpx>%f@7AzKPKnmh)cV+}uew@NU5KZeW|asABlKeD!G^{f-bL^CGm`7Ojl7kTdpi_Rky1UtMpO z><{m`WmXKlyi2`f>0>MN)6f{6RlUtzky9Ai9By1btDHK@hp*dwHGQ+P%RFhW2-s!P z{SMGa^*!eS#&w0cA~-eEZbxObPwlQ+e!JRLE|&Ly=ZhV#HagS?;&&>h5(BdO4CAmB z)cGdkiC=u~qUZ+Rd2vGO4L!J2{)aM9!uy#FB3y#_+~6V`&R}$vlg0 z@EgfS;tR^?<4|S4S$aa*RYfN$+g5Ty*$&ahQ1!le<_Tq!TxvC0Lr_M_(AZ|lB zix)JMEAA({upv0hV(xXWX$tKep`VHqexI2Bz#GV@GmV?JO#mk&jSH6XnWzqNrVzQx z_*=={c!|Hb@kV&8&JQpCY}ivhyZy+d$mT60$#d+V*xy3CTc&U)1LYt7eq^HJWF$kl zU$o*g#6|s!fBXW~wT1V84!@=2F%+|0TyZ7vWBy5T-Jz&@xwST}L~x_5DYDx90%)M{HekIrl_~zUuQmfI*7M*c(PbAvXFOj_JWFcsaAMN((enk|BQZSs`u!F; z?E|qluCdPs4Sfd(@n{E!4RHs@w)7nw+ox8=Vi#T%+qU-3HrXrB`e(3--Tu8tuTvhU zQQ!m{<&H7L)cIX`oSd9vt>o|PB4)jY`|1?$W13rU(j6cxz=7iTcR^byb;k)^eB4T3=h@24ht< z@r~WQ`<&p!yDrUY=iIDzh##|aVQ^-3#%pI9D|Al`d>hqy>GE67T+;KHQI)vP9~*lZ zJ8proDsjINod>LYZaM}raqh6#7yiT zQ_7i^Zp!_p$JnIY*v{{l`K@-9ANv3DPA9Q?*4{C8pSvFO>)c7a=a<7`4+2AezC=zL z`#fq-CA9X~@R<4#q7Q5M|E8x8zxDJXVe`g6*t}uFPt?s>%PXOUE8vfXv7^^v-|kNH z#lxHdIrhaL!80Z>A7!{d@ea<6_>p=3aeF_DZEGRd-{!n?cCNbCwUhdPM7xRC!EKZ7 zdgd(F>%@^WzSI<7Y?&|ACjB#LcPl>7XgeP|KEONae6)r3Ie!8|JJd#O?R5ivrfu#s z7|hYrUsn@5_Qn6U^UXD?&!-K2FJNkS0eKux&ZkZ;Gdpe+K8?>+UY|*J+?^de3ZD`u z8|^)JbS&}lF|qEom$jwyo~6(Bcds4Zb{FIBUVCYq@+B%BGV$0j8wXwFp*#Rywa!UB zHms`We&J#qb&PEL8+ok{P!1ZXfIi~Id~d5%YMGwwh;PO`4$y7UQJAJFKd_x zbY2@EUm4#yyHilX8Llqo%hnyu>n9f7r5yLlt5FpH*EiDnk;vQB(!&{}l7^YHh^0^V zU}teZ;GS~oDM8jJppzczy8+qVL+)ACr{}?Di~CIL?-wr@l79^u^JdvgV~8y-jNdy@ zcVVXP8Ft;4S-3~_HA8cQ^rm=Gqb&;FyTF49jrMbA)_KGpy7+4<{554*tQ(pXf6d{) z9U1EIw=avgB$LoJZz?DF0qX3c?7xwFSoI(I3*)x=E^98=4nf12x%?q{JiIZgx(7qD zWsN;21@Zft6Jo04%h8j>nzz$(UpF1r@5#{Pdal^XwO9fb5R(-7<}0})GTbV zb!fwkS+))ZU(%u3ajDXg|35Lj<{c*v?s;@f z**%YqDU;5)XX_Yq*eIUy+&Je<)+swG$p6pXzI#Gn4;{niIc$8kbc`|U6CVozH;h1T z4Yr#4l5u7r&-Uqf?{jg^MP3b!HgN{Rj~^r*56&ISe@(}YJ3MthbnWQO zt&HEHL+J$PUYEb8{@XnD7k-5Lvt!PQ-+#&2d1kbid_~~!C2QGP_B~e?c#1F=oCQ`+ z68mg=K4)E*bJinggfShvb&=K}H(&3Ieu?>b2iw-EYZCXzbzRfESm#6ocPg(EcFWCHdoS5=KC*S0FIuvzctuOIS=G(CP^TYUMrDsF@a<0}r<+Fd ze&k5Ow}&MKm(8Yehx3e%zmX%cfqTv^d@$bs5D>JQA=>I$P{^#>zk;Uc4>ls=Tw zms0vyO24Y(leFANeIJ9QhIy6LaRpKUuI#V6pLv-$;fIQd`I=Q*so zl-E^fiXI>z#ffs)$5~_B_Kk0)U+MYIulPs$CEM4L>Ca^KZKqqN#~qpeKg~b4m37hI zlYedoYmEo#SBQ1B1+JX4DAe<&>^LJdJ6iTHv!lubH&k9>C$DP)IupM{@M~G;X)R>L z1I;y6>p0IPTo)Wd#!3#>C|{2JI_oxvX%n9BASXdmv|ekD=WfZ8itw4-g`T;Gu#vt6 z(F^oVc_;I96?Nku94Z-EwVs z4tFXTvu4)Qrx7s1IuT1U_3-n2DJ(__r_GJAh$?L)RRJL$MT4wY?lWCqU ztdMVkT*dKEv9BWj+{OFl9@-IIE4PR0C|NR+i&hTlRo#+D5-i{$?&j6>&gCbogJbYGt;+@C`C6(F>`KY>KGn*Pj}!nEB2@20C(l)oNz~mlk2#3?HT>0=<@{kUG%Va z(47&wFCsSOBKG2Z(KzcEYZqrqr&QU#I%jPrKb`QG-QNQGE1#YGb;6Hiq5=!d0q8aXB&;ScR9B?RDZiEZ|fMX(WPVH+4TGj=4q!t zw*PUMm#?6wYy%7DCR*nBnPGUP zlUz>ed@uR&`!yb5wW$*9&V2TVOwK+V)8FcH%XsahX$-COCvZWiqX9VbGMURDl)7tNV$$d(K)l}p@3UoRWc zVhjVFX5ZasA}jD?wEj*s@Ud2diwoXC5wv7co|zr)x|52qel z1FhM4b+I+>Fin>oG{6JDF{2OLxMI)antbFCdmeWb65lZVL(k$%raq25yAgXfk#!GG zn`~y!T|AE?cO_%B528Jg?_*C0uac`?8vb$b&~kN;d_3Sw@*sPzo~C@ZTpg5CS2G^= zNv>g^WNmt%%O zV9(vR=ua0g_Aqv=^q-t=Bp-*82cny@A!tT)B0Hy>x#a=t-Fwhq^0%AA`=6FiVypj?ZCjy} zt!ppgzT`(P>ALsQHu+Ar`_Gs~o}D-G#T@RscVyeLZI>U{eF55E>s>M|T8>P`|FU`e zT;JZFbIp!&e574BIKFy|JvjRD&qlKEM@F*u-27PgL&ouG&{*Q1#gpti+PXS@pOtXM zSn<)bACb9xNch>tKl_U>*?FPKV?5`eMPBGYd^i4Q?Ri0W>?IEJl!FhAQFw61sPSl= z*<*6X)5{*0Yb=$9Jr?yp8!x@kXy06o1;2KQaeNu^Pr^mlW9}HJBztG9IfoqM@jI?& zd`k!Tg)zRw7++$HPk6_;o-vNF$EX}0W04!_JGo9}hYjTKz}D)+20H{D$mX*5V6eMR zmcK)OWBH49KecOK(3QvUSVW(%qR;949o_i)z3bVz@L|~2>x27Qk|A2lhBGpEtG!pJ z9BhNR!p9LWmXSFw{Xjx(^uu6|2WPG8;LEn#tVz)ta5R*kX*c%>a2FST4Kp6R^SMWA z@i+eYkIDgK#2W(X{GbtRZTZTzUW?#EP~Owq@Af^?k6ei$cO;kSyKPSdlp7Vhs&-_5 zrSx*x#H#rHxs)NpVn<`lAc}1JNtdbeU@3=Qf?M6V6DqD z;*N-kqWxsMuee3;S!r1jzXrWue>^okpT6wpc{}%W8QAxc$)RME?H985g9iLU+EbE$ zO17TH6R?QWA{JvFx&QEUS7=<8zi5vU*tc0aQRB^C!#RE|$5zr9tz>HE7W$}h+IMC% z50@b?f0DLqM6c;J{>#2-H-6Y-U-=<%0VaE$KK44@x$#nK$Lv*(ki-<$`h{to)X$tK zHs)mVxwf7?v2%@NE%#?6(Eo|KInkaAebF4^|E#mQi|#Qq7UcW)>4)O|5-q;i&FlTq zR2_c8DI;QvAuB2l&3f%YV?~lV+I6w9^a$m2KZEk)?e~qJrsw_S@C;C{x`%ru#*@=2 zl^WalwT9buKX%A0*)QL6&pA1dRPN$#1JA3M z6dz?jCpN8N_?C$?&7z9WnGbJPKEeII61&|4_VRD4J?9elVK#Lfv1V01 z&wf`Qzbi6#3@$f`=^>V;51(fPx|!!up4od8Gk|@cx}EojdiO>0UY+FKs_%TryVx)% zdw10XAM&o&w?nmyt(^P@@AhK*&1XF1pllHivwy$g`K`5W?09OX_xZCt{}<-<2RZdK z&mD&EnYfK0cT(KRGx)G#2J;(aeoI!FE0#?o&roitYSw9KdzHj81qt?HOaEkix@&DA zd!>{3pUD3N{>SrQ%zqL81#PL_Il7Oi3V!YQ2kL%x+NdQ})cpf^R=g)ZChkynLwu=^ zyN>Q4*D7)(z?$tfa($btvE{|1*zS@giYX1Eha>Ro9LA8UG?MD`t;6bPBEO(@V_G5g zJLBnOEP@xAlcrhFYJ&1b{`#5N3zRLMb__mIpGYKC5FE}O9u3&k=Wdzk;33UFh12xA z79OzQH|uAqRM*b3oiY<_6-QRLGBWOX;6>dTwR%dIPn9q*w- zj(q;A$}XOF?`A(cWL5s)TB9=WZ`cU%Civ>;gim|<9>~Pr%@d#gBYEZE(^}}7=TV*| zd+L~Xe&aX#ua|xH9?jwRi?(e3%owBc$ds!pzcx?#q$b55q0iF0T^Ae4t@DlcTJRAd z^h$l62Tkwy1uE3{o@)Aj!Me@l(T~4J|0Y0F$M}4Y_v|}mpgU(Ss{NDtx(b*(o*ktP z`cV~VV-9ARDSb;J(FzUVg_YTmGaKt@v&4 z?Y+>*1fI{lE)c13_WV+312*V_-}c=OeeA_ge7KJ?k6pvQbE;+W^QmVw_MAv#1`?Q-2P0{k(7NwAJwE0_1%!^_80sPJDqn z)y6y{GIM@SXwghFCx?BC;-eRal0O)BRpmvKJT!%Ea3S-fxe5J)?m9BaC2z$4NIRR0 zxeN5@-TS5jufL9cIbTtpXzmzrY=D9XORuUt70n#F@cerPZ-nJgD2gYAv&n`2m0oLEAgCU&(4n%UkTn8X811% z&YCVy%PZa8(0NY!9vi7Pow^tG?AMg60ZdSmUG_9Jr=55GU`8AGG=6ddQPcR=x zf*JmV4``$6$l0H*JOE8?NWe#D2P^lVJ*)BuXO~t!c=jhMZ#_Fy`I)ofx7x9l`{$28 z<$PSR@WlDZ;^B^Skt4)IRLsJTc%S{t-rdGi6?f%45*lq?d2$^h{ZgQOk<%|C?s@`u zy;*03?(1RR#&ZwJMT%#ByKa`w4wRuc*$=I1RbI^lCoX|zq$iNQ#n@K19ekVL z>YH~K^9PNgGdU|^=V5LUt!zUcz7MXMv*|~s8138xU zR3*4K;)kp&y5aSd&$N99W&Nx7X%DrV`mW}#U6tWBT$|4m8JS$|FYen~M2sWlyt?oj z@YMA2G+$)%DY-GH_rjA6(DhN`+u2(y*7#HKwd`8SaK)T2gT6cPAzs5=C0PS$eClrr z{E`4BUgyrO&4Hcw{;J&A`4+y<-fi3u--jP=wcqILs!YrHUU2+1=&9>%V%^AvDZPfT zi+Hh#@p%mw&ypV2ygIy@T8-^J*GPVm7?Bdoc(MyV)*fd441BHh%l3t==AL8j%G3VT zPOW$Ny;67QLA%!T^|Qd~j88D1@6$#Z{oj(CzKciYYFQ7Ko^8zNDbZOVd_Fu2uBv>d zi#>eZw^oY(@O^t8CfR$n%ig|g-%<7#kK*gozT#WlHB7c`?prL59~~J=-tqqGeeX-B zgFnew(YEj@JhuBL=2k)H&fF|~H?0%il71^Hg=c$XNz2Z`pmb z>tap7S(_f#Q@SV7GEZN?Iy5(aOm`6oKgGG~d+5E?Qu(9xOmwd27W2Se7Q_Lc+_pLY zByHzraME;?^9j*wDjzhAnF;2*gn5?TnYftm4VAMJ^bvhDEZ)$C&=w8e-CZ zp4JB+J5Id3P4|-)-0ClCk?sg`U!Lf--B*-*JMWyTf8@eE0XN#8m7J5_R{l!eG4nd| z?Z>QtcTVQ~0e#}$pZF2vL3Z7JS$16mcHQ~dN?OZ=sZVwt&%~d}B0iBX$>fFq;4$r&v z$@MPxzq)>FO_|;HYv4Njx6Yts|JGXmRP%ej@@0G0HCgkp>HzJZ@_lTYQ@mgGD^LG* zPBOcHTRr2~nIHM}<=>b8%<%;#kkQiPy1$?cn$zBrF?P$fi7C)YA!`_a`iu;F2}|*D z;kO>#H|npOgFg&E_E&yt$JJ}!PJV|3x~Pv&V(kv^fzOP?b@tga(TV2j$I^w=ulD4h zkUWupNIp;Fr9c!LeUtrtRzPu-n^yOx9@_C+`M~iHuT>0S+CSX$ciulqog4qI_@8%( z|DjzIS!Xrr49FLrsXilpW|dsXhplcSDq6?aXvg;k%({M~dFLgX=Tdl=cz~9AqiJ?S zsTotOtz~FU`^A6Fj>Tndf4CbQl?)FZ4j60fb*g0;_WOFrsoRM?QXS3Y^UWI-~*%jUo^VnKtUSeW%BI%F^B5BFH+==CLLZXbNNRQm+EypwB= z+V0fjJk$EKym`G{M>%y?%;qdIXB@dVV13W8FHxxn(nJ=44tE{b+a>0 z;o8A*pfnV#K<=TZ$e(WQ;p`iD3-s-#{s4Q~8q@RW4X3Y8-Fj}*?I%V>XVb@#HqDxQ zhw>LYu@!PCvc%Svjqu8C1e0S6_l-JfIK*TdM$8@|@BaVrP9FMb^%I+& z_Mca?HyZP;-^|)2`2)rv{C!4pu>hNnGKzndER+nYg}!9x>AQRwP8^ogruMjQ7-1AJ z$1fRscg{xVbYPPuOW;XkSQB~3)ADp1Yw~LDt*>Ej*FldOV}LQ{aBgb;EvM(<2d{uG zI-K&I_{xwWwk)n58#*j`u#;z+p9Eu8IWvGihu@|xZ_@41K69S_uJ#%{@3CG^R@`2) zWWM%&DaSpWoQ+7=(>i6-;?`Gwbdz}BsRP?`N02@uck|R|WL*XDfqu3I9(LXtcsA4= zh3~G5SmTUw;kyHo7poYIpg^veQdS+nBG5p-CvyPV?lfu zeN@bc%C4r5Tj-+QMOW!q7=Fs~-~uyx7S(NPZnKeW<*_Kn3q=)ta< zYwor0^2<7F>BJ>!zD(o3A={FkIVaOz6FOY?1Sp)--kl^iZdvqxI6Vj>!Wr2HcPiI zo{JoJ?p9H|qE8?FNu<^i!)&$GUq+19^Ecmj)$i=SuvRFEvtC=;m1~)!Ru6XE$gZ8w zpX8h9OwFwUUHm8e`_1evJPPh#=b6qE&Ut4{bRu=U;}3QGnX=*|m3=a+?4O;o51$s@ zimc!cTI57Q#cX1{w!$;oW7wb7&ZAlH4zv>*W54?e-l^vu+cv<@B>O0h_Y>o${~5dw z6}upPD4f5I-S>@`Qx8RaMcWS`>jS|#c1!~M`rEJPw>3QUBzZi3IL{2-Ut#9F@B(@; z#y#tSa9Z3rF19ot}$_$7S$4KK>Wt zTGIFF7Q}zXyQ9GIU72_15A|+jZ06m9_^)|aW2*Nto}8xHs~OuKe)kRCF}w zto*16e~PDmMcm7pZ_S54YGUH;o!oOIzWx(1#Yvu>kPKoi_LW@Lf1`+p7)?CH79p6_~{K(H-YUZ5?9R@es2azx38N+7(Tgg|Yn}GonAi76acq5{my=LwqrJ zk|gk5V)v6Pp=q{!WMRP{<2)|13%-eyZ{neTe1W&)qwIMM+RMkcx#aTLvASAfCpZh- zoxAt#H;mYXLwDZitJC- zvQ9}%!3JrmX)C9nWoy@7GV5=FSn1m4OQye_6Pvtt%_W~jW@~MBlo*fHn7}h7OMF#5 zAJj#jM@}U&HU@D@1-D>hZRE4A+W$%|;x|{g?90Tr#IhbPX?VN&tU{ z_n!xj4Lu`4JNd+?{E_vKQHHVCM)y_^i@yVHu-9ARyqk`#Dv3YE`>!E4>yevXl#hHs%vK3@ z3oQ_H*p$>1|yO32MweHVk>TZJW%q5}Z zGHgrZ)10$FrebSVRQ+KHUGSZJTZa4$K0ZTq&A6Y{JfW{y1KNG__NfLv^ilhCL8ecN zN!o_4)#pl|(IHz`-`_#!il6skZ{5oCh~ghJek6@u{XTxjRmh7^BR8%@eq4bZS%@EL z0p}Ldv3IIZ;ullWy>_l4bgG~w6_lWb!=qack9rtU||p4F3Sr+xB2nY z!ABVQ$}GCgjt$Q~7j5g^5b+N1yL5F78dDwrimX_6Ae){`;$xu4u1<{Z=}$tc-@ zUi-eXg$ACcCFMtsJ&$i$=QK*>e5Ct91!_(tGUOY5n^0h@j=@=CVDR@=@=+w+bs zFMPibzJ{_9)z0jZ#wOW{-^aENe=L+-c@c61n$jAl;bYi5&7oK+=R*YxvmPG7-B{r{ zd}pmESkZE_qsl+_4*AYv*gn!XnwQM) z8G+->8?qwjcy6STPrF|1zHaJ&^af+E@LY(@sK1Q5u^szqXX|AAG3-NZ8I#TzC*0nL z{iwM>es5Z~*yaz;6_iwbis$uLhLU%KEAL!l`@UpsEs0)`UT?n?8271U1Lu|JX}vw1 zbJH1k{c**PX1sGt%oL<-6F?0Dc416=t2H@9U{^esgJ5b!#n6+@igf zR^950?!_Cyn)qaOkN7+5R-Ia_ALc#b1wP#&e5gIO^_{}?_ zZL@AFu;rBYm;Qw___C>W9#v}xr;c6J;ov{(T-*~zX`!7bg>`8BdgR=B@Q%)DB+;2e z`P&Bb(3o-A9pZ(u)`{!^i8g}#u|JgImbsu&V$C`p=l+f*&_zN55Ghy z@7>J%XY%N989lc)p+gaQxkzVW{5ES?S_i z87KGG?_G)QmTmhxelYPM^A?}Ktu!hcD<|HeI^a9JlfKlB3$)}yzuU2qwdPOH=csre zcifLUUe~$B%(eYnjl7e4d~|0Y`TNdiPUrI;ccM<4&mL#Z@hzPJW6;kNMz%ZtDA^E} z@x)CfhShiw99y>=dF$v;Iep1FbmzYEhUNY9;k)_ZuXdc(!rF6Y&eubvRZ6d`4EiQM zb@Yivd-@K4+RwA=e>_va#v7n+jh7gx{vO8Wv>_R3w=>+Z_a+=aS++0CA_s?sA46r; zcC~d9-x#skDpQVao9!FZ+Qs(i8Ae+(Yc}d$<)GA(ujGP|9sA+P&YV*Ai^jo!W79tT1;~Rv#C^=WD>t&8`kcCY zQd@`@4Ky}G)5w~QWxp?s{sX$qKt4ES)ZX%2j68f`9CM*h#JQB4CDUZ5s*lYt?H$2y<@;v(E2!yAu!xn<kNp(G z_Ug>69VAnVXQrWa&Rj<3193u^)E8QSfzNc zo8irae1?e)$ku(%Io-OK&|6&O*oW=v$A%q-EjyfY zBUl5E#IIxy0jX*~3{47=X)hn~V#t*ti4+w+SpTPxd0XPeK; z;1g%v4#t%1!5=yge<--M{h=>ICtACLWAXy@KRtlcxkmKOSB*_Y{*r0fxwW@ar`BgB zw9y2wm5_(Cd*qghkJ-Fr7JURR&C4sF z4Skn|>{z~)(8pgWAAm2b0yp1RJtf$948Oi<%Y@KVcQT$WJX^*%8tzW>r}}v72&3aS zyqE9?D@w}GylES_UCBJ_EP-fJxOpG^m5Ju25JTu+{nT~f^4}R(^^{8*MJM7NmDO+c z&2Gy-p;3DqU%T6?)%x~);d>qWB#hmNqrXf)H>OPzW%43dGIlQXD&1kMVWaBQU9I3&?@z38Gb;;{i}bfU(a?C zgYs*h3CHZ?+H=LeL2YK=fCQGQ8TSKs@T2~SW;Ff{JZ~j;f_H5afIda570AXQG`if6 z9}{`AoO;6_qA_}Si%wjZ(Wm9)(wdBpi(XsD#(LF|=s`c}@I zS0hU$*Xr{O;tdAZH&cN-GA#rSZCwUE&4->Ep{F|REzL(Yb1!|OdNa6KB%Ts3ly9IE z+hmN|g0}Fvc4%A;nQ_g--z&MgjrKJ*@rc$Jmmrf>Uv___6K#GA79qp(<9}scD7k$U zygE8YG80*x#BUrgr=K~OhGz8+GiK!6RTmKs?RJjmM5JTnKh}C(^Si*7_t=k&BNu(P zIzwx@!$Pb2kfja8XsUc*n6XM@3TDQnI!)ej>TlLKocigP>NbaI4Q$1K!kFk6^3`Ho z>Ai;>lN>FS^U{R9Ud4uy?W?(7n3-#5t(x9@O6T&)_KlpAnQQTu7kAED)xnkMt{S-* zga&Hhb+0}at{i==@%l5k5*{>H7PwF3N0_g{@klQVhmP)Y^l~lw7{A(pK7N}qi?&M8 zms>};X$zd&v}MBI&`UaS$76%?Y#iz^bZ-FTlRcfjrO?45<`HE zFMVh-&M@*$GRlG;Wq;&if2)z^ zE=C!vehgeT=zsEQ)9IZ>(MzFGy~|o=fJW1H5Ofc&c7m$`9%FpSxSN{kqjYHw(#!;T{)3B{nj(PKgYI!?lf(y{p>li`xYD@J(IpA>6bUo zCy{j~+DadR?l#5uutw9_y^Y38=SIuk86Q<%jo0rFkb5^L_C}qN zJaC%X_NQWB+h0ojZQIt)Zo3#?!M~vIhoboeG~WZQiSFg2`4;7}eKd}5Mth;zbRT{> zE#row`!MS=(LOj>pz(r#^4+Czlkn$M1kNIk8G7y|=Xi-(a#J7sn$@E338Amed$MI> zHL{>tbHLtkHF;yF@*SHMd15tcZ4|gOH==s>pnoK@B&Qtv(4?IYvsspPhF3V>5OaL-Gk%$KKK~GaR|A=ReH&1TD@UXxYmQ$|^#|dtHU0=^BITni zj63t+voAgOJHc6YeF^46^E`n)M6EF+z@?*TJXvB6I)p`5_{jFan$1b*Zx)WWa$st=*84etRFI-_;S0< zN5ScT(cfeSr?WmHPB(oBPG`Asx8WIm=B zG56y4aE9N@k!PX-hulL}! zpBg(h!J-EAg>8G?a%P_F1GR1D+dhyy=6c2h^iMh--RaWtvg@=Svg=u8@o2HeV8 zf8{hI8ia4x!H>wB8IDX9?PyK4nosdmZKg~FK79c>Qx1Jy&9i0Dfc(#QvF?aa#{(HX zt2MReu@(M%7kv^NGY3C--7ML%G3=yujB87l4rEVeKnE711Iw8|Cztx5yw+Mo`e`r@ z&YzjPW#CG=)63{@7#$oTp2KeU7x>@joV?v#o_2FTV!MCdo0bhGYhAT_4(%R$5qnd7 z0e|g7{yF`gqCSBq@0hV?2ga=R*FoOTwnI<0{`zUAZP7qCb?8&JX94Z#yL85Hhv*9- z2l(#>_rA038|^zhkyWqkoNRx)6KkXT_tBPY+@Wl1@BZi0o-qx|S@*g!nrVBh-R_`& zKz6Riru|RpHtiAUnK!OcFvY{%|7GG7*w4Wzd$avlmnWTEY)9GO5RBN{9pN#=DSz4C z$Fuiz3sb2b^~4L8l=@#0Z#rvQC$=$pl6wWR?f{6dkZ)%z=ltov?yQ=}926<nGP$8~t~H~QlLg?sYoxumYvVYTBnK7k)ZP{9U?1j*8C9X= zBYXT1bt)F&v%H_4FXc!qihqIevOb+Iyi87yD_-iaW?!c`{wjS6zg|;S>oY2~ zk40YQ!`HGG-iF^#b*_T;?mwQ|QHw39J=bo0vw-cyOalH?)x2tj`RfBju z46QEV9r?eG-!Yi~tG{k8^DzfMa7~Q)iPpU%UO74ZdqCI=SJk zOwK^XIOlN=B1F5{zIh*eWa+(P-4&9KRSOv<`vvKl`*-8YqH`lLN}$sUvQ7S8^)o~bTZ zf9{!>=}&(AcbUE@X8yx)5zNBH6P%e?0;e6W zJ^vKvEEEIUg^l3&>s;g5bcS6|ocAyNIkB9qSFnRRRF~shY`$~&BVEJ;%7;~}+R3{UG*l zSTyJCPz>8A8W?UQU(niyzFNz;BZju@=XV*g&*rD&;hg$TqrM0GN6u9Ig!c2DyKr`% zW-R?4aWt0k*IzwLoNVn_d=l@i-Ut8hwZ`py)haX=uLJL+Mw7#Wvu!!-`IcX>Vd8dh zHHv$ZN+uX9YPna>;_TOk&l`)Zv-ysXeBIewCKj22+-y7^ebN@DH_uf#lj``vI$I{*w#u*J30K*;B6o z_o26l0Tn*MyXM|HKXhO4G^?c;nUv)Ba_AtKpQZuLy$NqOT4C zoj#O7lOMGY#s{f(>kQ(=M8of1o(!%E_xD^+Y}H+Low{SE;;!6I?I&l?dGnh(lPG!o zdHCiZ;GgT@qiebU=yT+@8)@&!6;WsQ+!ZHM?NRX5L(W2tMR<{&T~Ra4zF*?JMN?bW zeVsgSM^ih9fmn11_;#LsxjB^lia9L0p=?;R4Ln{6?s~yNDdS(y_pRp6+4VI|vxUL8uzNYrR=jm6?@aXoex?<4qr#KCTma4sH)BUPpe8SoMUBkJXba0G4hf?k3AEn&iQtctil@ZHQ_Eqj%psus1 zNBpiB0qom~OTLY5_j0No+x(Q}rctix^;G+LDn}jCGbg>fZ0@MNP7J$re9=8$*jKo6 z;XdgZ@P{rK5k1O(19c`cdf+znK($%iSCf$g30n>nwqWlK=m4#^RPJKQgUf4>NyU9- z_^x{w)TCw7q=t! zjnRcIrF=?Wg14Dzd5#S?1-+>F;mqA`^~j|6U*)bbTb>v9X-$4|Sqkou=ML^l!JTl` z&;R=myZhpn-Glfs(c8A{&fuq6?R~_#5`NH0oS!R+U$?`* z?}u)u3t6U|gW2sYL;rc(Y3TW%wPTP^;v!EwHACsI-v3!U^?ymV&!wHTUV+!8yF7Yj z-rH$e+Ivl!zvXZ8mT3SltmlfIGR;GjLD#kq&nmNRh%zzsNr}qX`l+~2vR381`YHC; zR6BbB#l3auKJ{w8hbo7RU*ajZY^ZX``9G3_BD=ntp~^vLM=6)qcWHci_1*J5srHGq z+4sd7bYgLz-Ja>zi9LO(cH%7u%GiDPlnF4#4z-o3XPf%tDO1k)_Nm{t-ps;RjazSW zcUk+pD)$u+zP#nIXGRKNS>^2hd&@D#`#km4sDIvajB~rjVC&%wzM4IHxGUBE3zfMo z3tyfx4F`}tS!HZ|dCIK&Q>y*OtTHyfJY{0g`@NJ&>vQBSeI81m$9|V;=UhZ_@9H$Z z(&a{+p!Z{cNVV%8<_|6RTB`lBk5aBX)m}%rQ`ra8S$l+DOtm+=%8fX|j;Nqq{Y$C# z`#+>yDdn1WrP_&C$ZGe*`idym^^;V4CFNMpY_!nT))l2KCUL@A@7m8;;~1=Q^xNwr z)w-5-%to!JGXBGg8Qd+ISvT^$D*E{S_PTMSm9?(YIwv3*kFU^fBYSOiwGrF$UbTJ3 zU2)k3*db0owAQLN3(_&Bp7qvX|EISUVPg;nfX_odr-F(D*kMzG_Ih-nzq(`9@r`P) z*g2n)?blFDMX4eG9R3B$%MYk^%WaIQVe;G<>&z(q=?kBUzlpKnt551#_*Xi+I~bFh zU8mE}z!haJIaf?>3DVak{Hrak4~e;m_nynV(2o6^@AtoI*ghI3#&f9s76(U#zYisg z+4BSk5!EUG!z--OQ@*0cHS}@IAIW#d9*Sa?@%_YnvENA!F>+P=l;bwP z@i*VNek!Wjy_32WZW^bKf?C8Av zCVZOg#S!Obv6e4@Hnb=81?XTUbRc*(>?-E$x70i1M^|<{n`5ka4BN%wNn(X7QX`Gz zRnRWJH%wdrXW+{!-C6Ies_2<}(`S&@M6F zjJ3E$?}?`J^&YVZdQZQF2j{&}nfHA7oR1PO@<--OeZhCs_^$7PiB6x^5_hV$3+ZbS zzL*03)d%4+H-k$(b7**kp6%CJf+GA9?z1JzA@%;q%WQwC?Qfw!&KTe0tl%33tC%q{J*t$55{(?gM{OIF-(wVVyS7u5)MFXY@qc zXJp@9RPoL^#M|TR$KQU_k^H93;@y?Ry+}T4PeS7qjwJW=-NCi|K=RxFlJ{?ec7#(0 zpXxuGY{;3x{z4z={({#CegFUMFZpISK~Hvni^e{28hUCqyN@)kYByU4Z*=lR3bDf&+Tu8N-a5$HQx$K=z$F#WSK{=Adv z80)kG8^=dZn4^M<7W%3EQm0S*=;vtqneCUiUdHc=U(dShreuTsW$0VU4)L?*CCK_% z^Rk?}R%@(T=iglQ{O08KXzrbQKC5~%y2J7JTj&SL$ub%Y0 z;_s4^bWZKWGXsOZZT1z44bF9wUj#pS&@yHqAD?J{;c{|J5obhB5RHxesyo8p3e8GP zFlJN%$4~gga`a2p(aV$ad20`84Y{*x$=6ypEOdB2dxaM3$@#;Kz4P&f&uCaa>)C(x zuSiTKR>-(_aUFN^-Rc_|O^}~30q-^BSuMd=nma0McsACUUU4XtoVsYFeAA2OkGA$K z#W&hOOjEDqFtM|T$XgwLp=RKIriK^Hjw^^Acqd3soa=7sZ(#3K=kmkcA1$9{5C76D z-9h^-0kXjz2bCm;o^y_X`!6?j9UVDyIypow;u)MVOlJ&R7(?gT#0rhsSyg%6O67f8 z5ud_u&Xg=?Jk{)FAMvl>Jg>l*?u@gR{D(>GmlyITwDgc`Hh4wzGc(5F7eB+8zJPrj z_AM7RGRBpRkxhw?m_5co<2TP_jH2hVIaRG^a`sr?m(8gPmv9z_{E)(p_RgPYuk%Ly zDFtQB5#v_QE#+ni;ENZYYfHg7e)w|y@a6d7*F!hwFe_|myh4a zSQekxviz2NIvRG3i>}Ypy?vuvM$Mj+Y}`98T1%fdjLzA!iFe?kj;ZjB;^`khOb%s! z6N6kF{zXkk+HNX}hnd%pJ&jC1oN9lGHZAsVGd5Km|4qkb{g~xN&kuo<^e-H1i;Iowe8;Q%3-Mm-$v-BQJ7yfBu!kWb!{u>$~ z$3qC5wt_G50O!i8tWu-OIE$QV#cS+6q)R%LLr}6hKVAi|gi4JW+4at%Uhz%%FDcvR z6kp%-lXP1j%bKGgboRe7N7I=j(P*GAwe%kFmCgrN5?{r42XC)~7vfba>QA@x6va2O z_7Xga>_84YVKJwA&ti^BnAaQF+i785Usyb~WnTN5j@n)1J3q~sz9%oYrSbDM$%T7! zqU>Wp!vULy@yB=MGOs$9dWd}>hlV*TGC;%n{ya3SoQ!%`eDeZye2}=NPVQZE=D(9? z-~EBHiNNsotLS4+*8GcC6!WK?#9ZlYP=zLz;p3S{x(|z;E z&)@4Wk4Jnt*o{WqX9m`*EyYzov?h&@5NpR7w3SV3tv+5@TSiuoYfY=`r#wL3N4=AVD#4qx(#%brn=nBch<@e5Y`@FZiC*86%c zno-QeyZmp3o=l#vLKbLWaVEBp=8D6MI~<&)_Z185IevqB)R(2KbCW+w@0V(wU>Vpl zS#q)d*Quq?zLM7I+M^C+;W+@Fza;!}J~l}1RcAksGWX?xU+w1wkr_SUH+Vs0bB{k5 zkCA6Yc$!R3LkGXr4u0{C=)6J>^*Z>d{ab4Kwt_Ss6!(xl?i(1h_B}f%Wp{`RL+>BaIo8*&E*AD~xYAUcYbQ7^7YKnSJ8{ zz2@lV@OkKv&)w2*+4|YC^>YyZ_3CG2)d~9fYGg&WepX)Q^txTNTM&Qir_7N@Uyr8l zSJ2mKomFrWU9B96?syKxdC0z5FPcP8&p=Q6*gJQ|cpmwYHAd-OM|V48tj*|djZ?b2 z0^J=(cQ>HBrAv=+Co(bJ(;XZ{*fY`ie~x{kv)Ieg-3{pO9CY_L(cKG?U2W*IcEMx*g;#(7oE+U=x%bs}V-tRu_T%t_*4z)gmD;hA|0KUx z<9peO&V)t_96gW!V(Gm9MLi$;TWaYasb4fEonU43!RzSx?dXH$(g*1ITJ(JRIAeOF z^Z|N)A$s1?2e5R9XeYjn;nJs&z=NFRuzrJ`V*_+n$BcDmj z-dg7FU3g>}^-Is!@tLiUC!yEHLk2oedj7uMX+2+h=Bf4k9Og_sC_S%x@=DKe>-kSo zuXr-@`kz2nUHZtQr?ceeg-7&Wy1_s;i1`k?vN-l|up zNN?@u9)Ydgbsr;Eq&$muPo=wCvvjxOQKes6o6_Ux`hBYX0>+V0|Fd+M?p%?5`9s<( z9a4v<>F5@0P1%bUIy~V|-&qGAza&2HL1#;kpGQ5;vw1wLz#lUoJsv=hYo5jHW_jQd z1Kj!;_iel{d>-{(96j;@dWH56Q1{?m$X+e1I5r_YhNfSq+Q;lxZawY+$;3B#b@j{S z?a=yrC|%vm7}fVKd~$*0N5`vW4%$QxUKeNZ+VpCw{dbH79gG=%QM3(Rl+nTbu8N-Z zEn5e_bP65(l69gEru@luu#Kzq8hzuSAEl(?QR1aCYjnFWz3}7X^bMW6Q9AhEuN#}5 zd(PIu8x3^uT7LhS->J1aM?Z{s!QsTIj38cRBylSiKHdQFUcP~I^cOLo*Y3GIN$mf| z1i944v&NOon84nA5qtBTN5+F0s#^|D!5H_o&xGiFLitY&aZy}Z}NbC?geWP@FBoj*( zB$jME@K$tH3|*DcC-_i}6>p%kWjxBbBo`)`51WsGzCX4d)mQsNI9 z$vC)K2yW`Z%j28L5yc+zf(hj6MF+}X=irB!l%oY|@D(sHw)WfnH+ zccB;b!bW@jcY_gq06ic%Xt3WXy)X*d-G%QY^og`y5NxRR-+!`}6K)i@-T{6>+%=P3 z?=#dZnuuWEIyS(tUFo)NqYu+0>8X|@x&}W`* zER7#X$4a%Ke;@yLD0yBcR_eRZ+#X`3zS0~@e$^ySirAqhaQSKQn8UfHIG+Y_QZI~~ z(4tr=;TQWQX&g+ow|elgD1(ok#1{)6o%C7w*m*8_w84ee6s`0fzuv%F_*oymsL4jv zZCN;32~H^E!pY6jx!_B5lsJ@{uJz5cJBgDJJsl#(jC11wBW;hp18w`Z8qsUvrCL8Z zdf+AeKk3*Po{5)g70Wk*UQ0}ed0k^a=b-U-m@R=(-1|;` zeAYfUz3$Jg*h##5SU$81=2Qho+jjXzYxL#G2lMUwy{=^ zWMoY3IK{gZwHSO##vqr7cS*R1MFJt*^)L7KAc$OWL8g8G{v5NJ7`Knv(aW&zW)40~>A#WJlJp2PE8{0x+QzG_S zzmTOd`-6Pf-7<;=fAT40IQ@H$JWj*zGV}FJbl1ys3w{&*JOO+GH$Q3E~Mv$p3;35nzdcj{Qv=BkAvbT`5pzj9gJA}OrKJE9ROFJ)@ zLqDB%Tv^dTT$ymExU&CAdpqGH_#J!}#Fa;Q9duHLKgrJ=3&;5G@LLqb|M-34%9!UH z8QTf_*Y=%*w#AqD{U7`e|3_xd z(Wl`l!|i-+&YZW<7tO8Yh~|;}9mVUA=b8u3oA!09C^wuh%L*ZljU!xc3=-L z+JH=7x2U}3eCXt3&`Be7vH_YH4SjS%Cn4rE%y$D?QGI>n;4Fn+G{5Vil@Ku;iapJ4 zZyU1b?`cnMIPGP()j~gxJ(ErgD47W0@gMe z#G(>E###;7+5l=1t5w_DfLkEAuxAj=_xZYaZgLY@?C1CSeILKyAM==d?mhRMbKmEE z-rISf_xnX_&Xz%|BFilw9(&H7`Ad8KZt~Iu@x9SHRGr{eap7r!D~9C<)H$Da6cd;N5c3B2tx_Nj4Z{8<}^5||I)52#lRe)iMYX|H4K zv^Cu^_G^%fQc0>UE5rw&pp;J|Fq!Aax>dJH|e>r|Bnbk3H$d z_WECG?0<2L9pAO}j4nCl<)1JnjYvOy)@SBdYzXk27{@&OvI%tpoi}{=x zhc33je?=ojA6uSluYa2Fsbs$Fu@wrSo_wChdX##_A7b0kSgEJ6PNA(8 zw6)za)@$%9yNNn3Y<8yqcf75ir*y#j6~@|F-(ElLDD|GFUd0=hUzy!r_j#+MPATi< zBHH?)dZ~2LibP{|Dr40CtmM8LY*KTGDKK#3cWX@3 z$(4U&9X^0!W*R5mAo3$>xLiM{^>^l6Ryer*-q@~#$m>AXS?6gu5hntd;73l)|JLl z{70*=D;R@tLi6h)){c#5c>^pwYtFUsO!6gi2DZEGh~&N&;>-lWi#BX`d$7GL{?06N zT?WCA7QeB28nWYfa6`Dd6}e|SwBwp*`_#P$jue3-q8(ZXS_2jEfGL+;+g1*~5V*|5 zl@|2H*U;bgczXE*;6$1BQqbSFvVNF@Z}P#RwBD(v{^n=CeCTh*x8MBIXvMljR>a2L zJAt`cPn%mk`Tj{C=C}ELfd=lI`tS9)7S-s<{Te8SG^;ek0jeRfFAI>oAxu{Y|#plt@#0S3C%?-+MRhwy}5U#6kH} zZIRECZ1{YedYc@WXEERXA~nc{|6slokqssL%1*f- znkv2V$EHppczI3x>K^_tW&QZjE7;SCEMSb91MTYw{(|9j@J%sz*W?lS~&zPap$2nJg`9{(pEFS<->WJPQ(dgtWhW*q%|Wk`KCJUtnghvVQ?w7?4elG@7ylXDGdk9p;I4HhxI5Qb;GOpReVLg1!8b|~`NJBC&-roNkE+BT%`%6GtmNown6zOOq>s;~zLw=}1sn>Jm24%n12X3dv$R>8)`r*d0o zWpC2Pr|vqdQ-9PNRy$SD)*Qxd&%boYfaV_^p+e())|xwVs7(q^wR9VM?y8u3og*#f zyuEDxbD94?zk+YK%Gmtxqr8T)bIhALchr5%=t%!7V>Z^e*Ne|t>kaxLnj6!7uZ`)x zD>)M+-S;}?_4~SSX4SFGWnNF#Lu^i4i1}Rsd_-%KflV_qJ6*nyw#sO$iE7bmM+)veH*NEMMrCE8|O^_rM66*jeQ^eO?Q4D zdiLL_GaYz#d>><<@BBXIW<$rAV&B(oOy$5_d&*_B)8+eUtJ3*>*f^e5TW>hNFQ)Hx zd>`jVUZc*@+S-O5`n1|IaUu47^f%A>efVfSPMzt%v*Y_1`%35cF*iE*VvQ;Geci@X z4$P&imeEd^?~B2qWB$-pZ&6!sJH9WbBX)e>AFXpm&bD%~O9i{NwGIC$ol&xIA+fbE zR_7V#_o3JSf;!WIr@l{hrZe`poZrXXT-GtBRI8nCV=4#czoSkW?bzS9S^dS}(D8lH z@t;tys}3n%(jR%o+~@PB&ove~d053lXg@inooyGIYt7R0?Y5^uV^7gIP23>oK&;N2 zj_+NGER{!{>A+Xtt2)#9mMKNl!mGEZxiOoxFnY{Ty>`v(UWwdX@ z!L(h(;MFl!&Qu-v0rl1+d#Ij;4~ebIq|?&YcE>j*bAEdlb%3dP&fDIDcfxcnbV zY~ZNp+^@G`{}S~APqo*-ca(b1QqOp-YrW2XJK^*j>NPHA{nOqL@!j6#Azu~y6!N2x z-)GPR2kTXMiXB#@t3;5=m zKDLZWdx`R`_2OHreOCF`QU`s)$~iCBR@vg=CVV^n+xGRt4?X7J zwy7uhbtMM(yJle+bz21h#uk z*ruDX4aQ)59`9O_Q#Yb3$`BjJBQ0(a0zkJ@_@%KPaN>=0X}=W;j@+3 zPxw#-PG+aQ;kENU;q{*NUGe%pU~j}=9|iUYfyt5ZnmQe@e+IbfJCuiRKfhYOr#oK%j`pJ_ z?6a&r^EL~wcjEK*CkO1;-Dxc9;eh?0DI1h+JS`%YqWmUDk^kvP^I^xMz|Oc)bj3ak zQpVV1XRJxG?2N?ul|T0a`S7H2PSKk-yQGvHgRe7Q>A(wsx&EJGoKs}K#!pD)HTdJ` ze{zWPOx(YXb%`&NU;a@UGmZ<_Gfr~u^#1n%VFRWbicxJ zU$L#5WB2cJ+|T8{Y;vj}cHEzWU(4TO_sbpkmtmWG`%vxg$JaA<|4v8!T<&YnTkU58 zn~;M}$iCj44m`=ZeK5)^t@%3b`bGp&jxIb7n zzTh{VK91)xw`xzer;}J0HjOi$9>AH^Ud5^Rd~q1$Uk=v~yND=b<7t4>`od6VKZR ztV9=%HV<0w_B{Ls7<8Y9r)bxN$G*;a_?mGFRxdFRZ>{T`hd<3Af6Pmr^Uz57Tgv~* zJd__{9@c9t|HFCc+yDFXa7DLyct5|IjbVGSh0RDb!it;e`;pOS^j6pPp(e3y^ic1Q3wo2E8R zyYX~*Wq3wBapYok*Fje^X!k>R->MeRh{z}p=kRt-KArxCWj{yEJoevIU-$4$7|!)O zH+wQSR1Y?G)bOl2-Pn=K`hu3MTFw}ZqNK27lX~W(6#oBOnz17%i!-i$j2+AP7tNWi zdZj<6US?zk^{`(L&*NShu*h?{3yR%38_M@3$gj5`ax-I6f6JiVy#LJ@`PnL5|Q+-IL)Sam;R$7aqu>%Fz=x5j#JzWU`ojmK!~eD8kUk2i*e z`f^t06k|sV=kdy^Q-1vb^mXIdX}5R6Q+=A>@{-%^_cpyd+!~8{|DV?V8@OM?nLxp! zae@Vl!inF9MUZ+G6CALZ;x32>7Jb2;8{gpU8}QP3p;GYT3blz}cWF<>J*;4i5+>z%%l z(mZ2?+LiZr3^9A+dII7~6L@F^@k! zdb_V4u3g7Br_pXrt}#47yDhX^G2rO!)^=-G=QuQ90eHD^cMbpgW+(0*fTkF6)_fK9 zbr(Et?T7gl``Ub$Gc9_z@RaYr`z7yIoNVr0ZOs2EG5i>7p35kxq77ZMcd#*k-PxRr zH5%^Yp88(SedB`x=DPV8?>F{0hUwmohj{)T&vSULwNlRW3UGC**3ISdCa%7vu_stK zSuxBSr)&84$N4(-0_-1}<9Ubi#Kt)#*5($C>44SdOVlkzt}p)Y_QeM*xus$ASOYmy z_?hh=dk1_O`MK!?a!X@l@VYWLXuqTI1Y=U;8RU}1mTf#?G!$Fk^UYd~w=KgQ?=rQ0 zoVi}D`@haK?=R&(G-~+J!}NIx{;TRUkG(W!pUc?8&2M>llW+H{JH0)O@Vbn(JEh~* z{>a<|UgceOwwV|CgxFZz#9ucax4!dk`fLWaqAhDUv(MaEwB>S-Ii_1QR^T8UsDXY6 z2Xfh?qTVVe{!CZB922&j&FzFOXE0K#M;bdSpsUqa89Q7S?tSAX-mr0&MN_We-2m}J zzJBr0ux`0M_T2-zeK+IC?%~?;)XByqwRgMqeHQJG;nvx-UDMCpdtBuOH}P99_{^dm z-z?XhgskrzrFy_Y-*5C+4q%J+j^KU;F%GJaF?NuDWK#8I#ts7;d-XZMf&Bs1A0T-C z#(Kw3ee#Z&@G_3K;5Cc??(oW?UgMw`ygut(Gp{l>=*y}b-#Dj(?ukaGP(O#W-K7p1 zc?|bBgWQEiCaB$h=69@urmZq*+D#TstD$YL_G1s?CqRBp#eV207s5yN%!Qxv4s)$W zm-1YA%vs!mN4cI1U3i3leal*X^PPiCIDLtnFT9<3nEAUcHh&@J!HKu7e#Bt;F?J#K z`6COg+% zAi6P*>%3Fk1ts)Vc$n|L248-y*IMFFJHHz}@{sFwD`);hC;md4`ahZf3pn#(03YMP zInlq@Te&9wiR=>^>^d*If%C<*B_HPf*c(jU0-30WHRh~Sgszd#nDcU*mhivHx9T@L zD_u>Z5! zg57eiZCXAJ*j2#G*K$wKoH*#@sqLKa>3}0V6`68X5IIS5N zro~{})D5-B&I}bFHoIy4DJW7syS{wKj}< ztm`go?IiBy@U2s6TXw!8{0dgNiI*P@2n&WbUS?7U zUm@Y;HzsTsY^W8WBlbgCZD>Cvf4~!3}&4v?t13v5qF-E0FX@5gH9J!hJR$r^^2jH3iP&OTBPSXH=184TM3&OcC z^$ky@EyXHW%Xg_=?875N)X{wRZRpi7dP;oM4a}Xfs_)s3O%Wdk>pO@IM{FA9sMS08 zFKrx8J9$$$YxS>$hWCN@AOGCGc;{-?`*kmK7R<$&FgItzy4q~e?^b10bLLU=3%$r+ zcTThgIOgE@Hklj{eaX{@jcG>w+QK1>B(U}%m5n_(t4L(Vb}KZE&HJTm9~ z2Jrhb#DSIF2@+pPF@?xe8?ynXMz)b#^it7APiAW~aJB2)guUls zcmFD#E0{(8-BRYYf?VHGd;>!7Pl#4-nqbDs8cmLa(EG_z=5A8x?FrF`HYJ<)LvJTX zA9;R4bT6>8Xo6&sl*oT^2Bpbg7_H=)=5ntmv?Pt31^)teI?FVVdEdo%cln+u=bC_1 z2pD+jJCFB|&^#@b^;P;pSYza}E{DN;oF}mjlBI} z8}9y&*wB=9?)7VMt%CNfazXfNaLJnI&Uo{afq@Ty4Sl!ytlPO)8!rwQbKdL|`(@`k zmYhW1$G)-nB(bp!VJsV&YYR8gg;?kMX88rz?r0wt955!T?`{E~vWahKppRQHNQqPf zgL`7{3a2ljET0?R_1oV{tfsmbx_z(zfAGC_j4qvLPK)sp``qv0VswdK+HHj72HfA{Rkl z!JmyG(OBX#HZpf7zJ@$Uezn5aCN_*dF#7tZ(7Vhr)(j4()2GH5j5CJ)9XPi<8EDY6 zar(wP{btM(S88=uC9y7&Vs&fbHL6z&uUSlA*#}2o-|}*=hNqCp^=?!9tP*3?iQzT; z>;8A_(YCxiu|cs7@@Th`cUqn_8uE}QSVtT4;nn(vyo1%DHNXg6aHPiXOUAa$H+Wr@ zSt0uG18vAb-m&4=1NaFZz(P4cZV)_#|7z<6;GsEm;tA)+COt{q18{w#`c>RB`C}Jz zJ)U3jlraSp_%AOUQ*f@H$NBtq&y6XVcixx+uP?#xO-}TCT|V;PCHd7~?Q`ye7V>KQ z;A{T|Px7V=E;!tH9%MZBxZiIkP7-)#&KG<76@06o+(hD;ImB<&-b;|!JYMG0i|p!^ z4B2As#gqdd^S%9No99U7t9=DFdwsKL_*Fco9e;(}6@G<&UidtI#H-A!;F(XJN^D>w z#FwR;nmBOVknkhSk8q%6Tl;X~h+S9pG|xHS!aMTg3KR2Z&B{U^DIY^#4&|-f${af9 ze*pKT4;YNw_R08rY(14mtv#xV$Tr%esyNDC;diUy7mQiw0lxj%JP$eC+3tSY6`Wf3 zcJ5J~4U7fDE^yo1J{8D{FznAb8XuzJR@e)TcvB)sUHD)X*LT1g!Zj{AEJNQzo8-S>}vV^r^zS%7L-+K%Sq-^Alp_6AaVWtMa5+`6MH{#62eh(C!8~r`M@>#}nU%Pr${k|Y6`rybwRM!uVG@=ild2VzSzhm9URINHQDZ1dl z0_OFC|1zTU3VFtF9(9j(CsnN~+%i#~RVc@QIkhS}!&EX=da@_QHRt{tDoW;&%P*N&RugATZ^J)1Tur+xY2(4z`){3@ zymUVAeu9k(KAWBsA6Plyn8KBzfeGQtPZGj&pGpW@NS zJNtV-JEj9pMhDEc`g=ROTYnGI-vjhFpZ?~Js{We21^V+Y*7teA-;BuTl&!f=k6fvK zF6Fx}bbQyPvF|G3yDp4BE+a0SP&Qe1%V!?pS(SHlqXd*eMN zi_L!1BTXIsrbPnkcl~+M$~VsI{J!8m#s$&J5f^m!dq%3&@2M9=9~zNr z^?NG!=y$oiJ^0>=8`ybHX>Obr9=tIZ#xBAcO@95u4|6cm{(!ZDQPd()KucH5l;Q=*c zT;U0^K5IDV^k@348*o-f9PrxXExEK7x{d4=u6^56C%w9Ipf7CPTe#9aKd>_Ni7)J8 zeTJUmp8l`$g+2Ta;5QmrtG-d_3O_`j-m*C*zM6R@53^@3ex>ngU*<(%?S(FR>+pNz zp6HU!nAp$%sOR6_U$XbA*$WpjPwCgp26is<`+mguoAHf29eP&LvvB3?UV)WCk8+%) zSLMXHRt`uuR%Xxd+fX?M9vvPRO?-Hq*~d7SugVwpMJvzd{$t}fQ$+b;%Ej=&C&oq1 z=Z{gI$n#~f=TA^B;rY|C=gTNx$n(0`^QS3a%JY@6=XI2?4=PM~s4MZ0KYqS05 ze9;NO<^2Kf=Qz9^Hjv@%@A`uK*4j^rOlDpxJMs%)V>*trIc>x|f!}IgLT|h4)<~v@ zf19>Jc&zM6lKl?OC_-jA2ifIpWSE~K%bZ0_Cj1kLQzaSbZ;kDX_Z`!}t-FoKi(E1s z+2~VzD2XYiyaC81M&$a-k;mS+SaF_Ln{#%rCAWH!*Tq|c%y<4<=R^}UpYVr#e#KGF znfcab6*I5FWxjsCKI}E**Dduv=aLQ4TNG!EblOh##h=xtv1yIo(y>OkMLd`Ir~!R#yR+=nKS5dff>3G-g9X(pgJ=@o`Z8=oG$% z^Y1Sh1^dz4_hNf(_ZS5$`2Gsn7^w4D&p^b))5XM@mOL_nIMb7f51T}+*mB}bb3W{y3B;M6Oq}Vt#N=f?#o|nF z=!i3&?@CC_50g6{T~WC$lry3%%~zE_CDF=9$l9@Uej!htc5k+JDOIugs_@S;v1C zITE$6^6%HsebE!PV((X;Gw&};-c=q(!8*HlV25{BEi@=?PAAmdh z7ANi$@hyr`?j5wj{1(M1NB2ogh<(d8zGa&y!S7E=s0tds4aI!Vc)o|clKdw%J;V2m z2Cn9qW?ADAF4NEdtGJw>a%fx@Tm@6X_E2!03tZof;mue7AA@h6%k%$7_yT7uuc`31 zJAB1AM7KZXe9ZuO>Okhj1>HPc?)5f!!!G!)c*7p}Zgmgixqa|0>}8X@_yu|K3u=;2 z5IK&q3r>>#YZ&c4V#-8mkq^k9Ys(HlzuhT2q?@t>JfKr{_}E>SjovM<0EPM@}_m zR-5;aL9QySv)fU9BSm?jj~pAci_HjKb~v^l$yGj6pS9$y5sX)Iz||e|m=^h%{plcj zZCQt2i>wpVYlG;uH^ubYAbRcXJtsyRh=mYZ&OUHT?YQCGxq`hv*A5fe89}ey6ZHrsgcth-?HK`-@=)$N!`Ci=iui* zqL?3vt+T+-`AdlDLOj}eod2K8m=xzz@xJB}17NVrh)&?_zs}I;+{Jd};CaZL8s}#C z?w6E@uvQ-A9JS8Je?_iHougg=oRYW~o)w5z#LmYj(Drk*mu#Mo_f~z$bq+BvD+(*l zR6i5r*q0*?m*S#q$L7`14{Pg7%I7-!VLY#>9`>>>^(aqw)O(g#G|B;-!f$o;oQHO1NdA=J8znGvO4NaWbBJ+XS$=#cC${ujyflD zj`{)W06Qy|5w^TJf4ym;Ek7%lZ6z|eV&qwSfT@v8@Wl)NNr=(BL}d6oSvx^uEi2A& zzubzFn^{M~#~^k_;ivpU_JVJECc4rzvES0{f}Cjfgq&y`?Qw=D{O^Uvj?zKw|MJX- zE|@NT%@Zog;9u`7rJhCe(j$GrFSDMnl$}TFk3386vC;QFOe|?0k#XO4NR?bAmad|gX z&a`A4$<)?5P)yydh~)aG4~V{HM6R>P3$A>DZ-{KQ$O~cgD)PxnrevS>2aL<-3zZ-T zGM=o+UhJ}`lrSHht7-wBKH&K+w8cBYw=}JHAgp~E)yt!f?gxkgrLjE2Hw1ssHz+?v zKl3}jMh}o4Sc@KL(*KwqSliPWAstY%=Qi;L>44~iKSU2~>S2u7!W!*_S#~5J{-H4r z1YSA#ln&50fQQ*O{HV|7-hZSA{^|l^h4Ef@T~KJY?mUMqh4eU!9N;D(UA+sB%8@QPwb*wd=%KXH1M9Us?d3IQSP_o6i zBJ9IQFZnsPF>}vGHdG@@e0XS%_6Q|gY3{Nk70g>Bep8>Y=AF;~jxO#j+x2gvzUtQC zuOj=M~u&Pb131Kv69A-4a^DD!)cd^X7ool1a~hkDd5 zO=At1@(?s$c9?yh9)6wUjUo$6R@+ZmG(X#(8*q6sbE7tje&${3i_v`PnxgMM=zV~7 zBATB-9ntO-)_~}G(a#c=`W&<#I<`^q-ts+JRr#kB7qH%<-rMhZDJg$l)(|h}ncyL< z)KQEi^(S7S*hzamNq+T>&qV|~iP`pLCnYRB;7Qvc+Pt0oc}2nn=DU!oF8irU%iIijHL`53w<1j z=7MuUVh30D3^a^(xypUWa7MDLsu3B}%igL_{s6?SXG{$%nd3wGhB$p7#aHUV;T-UK z4{Ov*UR2o_C69P%Lvm^*<7hnCK71N>#d6sdV{(k_ezLh0G4`3@)#2@o)*MQwNh8;Y z+WgxG$VAxtZ>9~Ee@!1X`{@Jwe!1-X-E@qOK4SL%Aol*7V)p(Z_I{f`s*i)@Xi%F+ z`u5cy$dAv%UUpCHzwy|Cz1V|&=r0NEWhdg(=k7Q=djR|du^>(|qIq}T5Pke4 zk!M66n zMszni(mv$K?6|;=+JV^kXivEgyx4cVVQSTMdurf zG}b^IzL3nJ+B#AjdQb1`-J;mLr5>X`k8!jNbQSOmZ#|74w$hdp0u5?Qw%rqW-b}0( zjmfgBW|?wI`)Ry`O*P81k1n#t(wt?i)_2!p%QS6nuB2A z`NC#kEZ8LU4m7x?V9N!s^2-t;z%yD2oo~T6z=uq)TwD%YU5CwMH~dXwXofF0Fi*lE z;Y_KyPu(|i622Fm`H?ffpC9>icipY%t3mcon7eTBQt1XBGnQaJWzncIa7^QEy0&)d zIAl)to}-gvbaXIuZ!og?cIN2>a3%u%r~*gXZ)lk5N+_=dN4LSV#?L>eEfM@I2R{dc zi|5h?cFt%Szwum`aW5bI97lh`(Z7R>xAMPr0DO+Pde{LY*b!2s$7t_m>q!BV_my&P z=r-=tzkeumHF(0%s&d9O4;oGUwcb_G=e~{hs^fuEvi}q+U66`fY62_y(*O zuXz?4Jrvyh3L0JRN{STYLy$H+ajBp2{2d=ZAK!242Jo(N*ZG#R&KkV)FMSvOTkawz z*=^=p$%wr4cYGHQJ!c*ERpFNGITlW(N3@>qj#_e05p!tIUE@thpS$2xbKc$wPQ|w1 zfJ+Uwfm4Bt?5BhI-mMb{SLO5l#lQs~(7O`2$gbMjtGMbK?&kv+@i^t(o0q$8$vk+M z;4&S!Y(FtjXYsCtNZL@}(hxihe%+-%>G;;(&3I^AF*I%*w5|x6cMf}AXR`<5G55Sg zYrdFht(V4sw%3=z3q(V$I5vGFe}!&4zk_o#lM-HTU##`B|NOCaT01}G9of*e)+GPb z()a(S9sON=nY`HZ-{GJAixILb4ln}IW#qPdZiVuA1)_zC#6#!$NI9z6XOjHn-xrgg zBwN*h*ESD%2N^@YN#_$MbU)uG9~tE=BTk0DoxCVboM93_mwc9ojFXOhCjDDBJjr2y z1lKHF$%qUGS5#N^4kd?4pOqZu<2^5W&M@#=@{qm8?RN!dZ*HkYqgby_nJE!G8IO;- z*5a?hE&DsRFsI0`{XTSr|y=!4uPwi}t-I_x&6N;&FDAS0F>@lMM`rL%mK@X>OQUV_7}mZcSGVTyuGk#zY#*lB zg~Fec@#m=^KL0nAjkul)J>u#fujbDd4H=!KhkN*;WaM2&3H!fr;&PdF$&PS|? z?|MFB8)qPNR)A!_tM_0RsZAJfo{gA;|Mp(SrSlQ*zisLE>)nSxAK`YKkEmhq?v&N| z0LI#C!5_2{y<#2j=PRc?d|~TpmF#8t#;g5&_@QLBMrc(TW63A)^f2y`|EOHP2d4bA z`Rr)^guJTZ_{rnQ4V^G`N!EeazX@N9Am;*V;p@(G5Klhd{Tzg48@Fxf=GviMowrHh ze9IQT*UrOb+uP;Ss`};s?mUN(%e<&R(X1f;pY}O~AT&Ytu}17I8lUzN?qUp1xK8Dn z&e7P<>By~rgXhmB zKh8LEMYeb{{1=fgveIV^t0ITwbmWx5=s)wwmHP-ZbUf!1w=wVRJC@If&Y117_d9?+ z()uK0q*;c3fdd7VNt|DGtc6<6H;20A3mVFp%y?DDSTfEs+7#{3xr>>GYX@^M=_zx4 z2tRb5DLwMxFR+my1KMZPN~v=Z^?%c?-llH#*3eh?+?>(Ao$<>=vmZ@VOg6*62%B{G z_C5hNqoMoGzANC_@(GfS#A{Trj;(R>-3{R2;tFB`ZDx)>JKwUGSbI^*-Iw9NZz(eG z`S@ZhcOUkRz0?DKtF)JH!gEjj2GNm^t;S+MQJI{YobLCKuX@2erN*JuCTzF4++?IkJ> z&*9G4**+&~A0EF-b`Zg(2pJ-O_&L!e`1w9;ESc=94+cLjMZS@J#2=rq)b>v?VSuc2 z0s8xR)_;#QW5i(2nUpWX&ks95QbkQkna@|1sBfXp;PS+!Ly%9AwZY4T4L;=5;=7WT zj&>!Mk3V?TyXC*UYhe}k|LO1r$;`#%l#O7oz&@~k8uDJqji0i76Buj$&wNY$$OE$H zG_rQI|8K&q%(oOdqCx)aT2uNy)Bgw?DSW!YrguTTW6J{AUuAohZ}Vu{(>a35Ug$T- zA}Sl;dFZ#6{Z#+bV;605)%k((X!=o(9lI?rV-g+0j(*s_Tfl9*zV9e~mvVmRP<{V| zzP;F*RNQeHP(TgUh(luG`4$H!ie z)`Gt8ui(4>W4s62!r4sZijH?qvFu8`7nYxp?-a|9cej9`Q zCOKN)`47&nh0u?E;MX|Dkj(mOpA-MifnQ~!M`WQ-^n=g$htL0rcw_NldtJ#ED|%l$ zr(YX{5+?0KB? z7rkiWTTGe)@AVE1EVR$R>Au#L)_dMXzNOknvvFk|xPlBZ%!Ktt$nnqy;nz>>dDp!z z^UfJXY^>d7`_mYU>*ie6COwREsm%Ko%+uw}*JaGxrF92>bqP5r<72vC-^l8JnKr)qoxs~gMBgHAx%R-HVgLKVS%K&R_P^iG?GYA! zUB|p7Lfbd+A7Jmh3S6s6PBdeQYfdMCYdOTe*7^Gt3uApiPgl(D(V4G`K9Uu8LUoon z>Tou3I&|O6=S7__+50|Ub(TBoxE=YIG*A`2fph)9z5d`JbYjMF=mhcMOF17}#N5bcFcZ13c1~7X3-htp6Nk)b#VQrA#l~pT z648a*pd*9vz3}x-tQx{v^5b`rXZl@`w~}}B#W}pKU_Fg%)%? zOXr#F49dM)1kbheq~nKTUd#4S3%!(l+=MTzEf z3U*`3r`gW6hyOoy$nNHzROCljaZcTVSEcx&Cxcfu9W~EXn6``5$O!0Bj$3DcyTYmw zxp)MzG{ke4d(9loe$8p6d9FMq@&xxZpUWsKH^W@$aFv4&&xS_ManQ)nVa{dP@`HHI z3$!Ww!1d(96&-%qq{FF^WX9NnUZF9DcC-(l3q7uK(BnDOo9*oDFlR8FeLWiM>q7dH zU;aMUiw$SxQdi8Dq$=_Cgw{3SS<+Z5Z^tp`leYuf)9W)UZ$~oaRnX+;pEC2|C9QDO8EeJqx9hwOJ=VF*q!%1@=ERw??(I6y zLXTTj=MRoLug973;q5vPK##Xj=lk?nc$umF5dYOTFvovyum6C0M~XSp{(8@MOh3G1 z&<)w2-vu4J@eXp7@L%i6YLlL)bC_+Op8oyl?a`Poru2HnE~-Y4_d=6GJ*<7E;I!cB z-{M;_5?wnTd!Oc7^s9*T*Kr=%khFJ`f*hO4SfyiTM{Yv)F+9eI1MC+cAn!DK+3Lyd zqjxk(SBbqZy`{ySRi!meZ2k?{apr2TD@g8UbDcwH)%R5Lc^_ckbuzxuuM<1Yi*NMT ztjWFf`D0?JiG~KrTVvDEJ9vj+DXYKYd%xuOHgD@89{S@}Xj~pN zZYwlyE9+K%>S`z3mEJmr@kuWJcdWf?Vv`ZaXi})HE8Wyw9;w}YR}YhR{vF%ncGk$~ zzQ7$?rE$;U|DUc^h4bb!3RU3Gvdxtcp-uny?d0V=qb4&YW^T;@x#(6=@X>H4je z(B|&v1X_RKIRThWJ+W?N$6nxl&eV@hn0%6NOd9`?abAQR;N$9NHx!XWx6-e zyFfTA+?8!Ycz4_O_!06Yti>)<3x3&qrYA2l7CnNk4P8Gak$zM+qqVCIhPA&SUw$3f zFGAS$WrLCZMRuACo;DTsW6x{dGAFd8$fPxQzMGv%ETpaN zlX`T}73E#fesF5!ec&fp>}4;s-JRLG<3%P3H$<##^Iq~f(HF>*+=hKXdYzSn{Pve*W3E6R z@?qEad5nfKY$fp~>Mx>FbI~q`fcZvtzuXdD-)c zS%A!A)7CWnDkaZKz8>xIwT2{9I%J_kt)0Pqe-dX}fGP6PI$}#D#?G{G9cT2Z9UM;?r9?uVv0b^pQ0=hDZd`#1gz zJ_$Z`@*Uxac#ixSQX_XwHRY|ylMYzbN=E))fED~HQS(xUZSryWU<-Jjy37b458Tq( z%QW#QmeUgYR=yTmZ{u5=jc>g=@og7*1tTS=ah4PQQ9p*9g15bNG4x0{M=W6z=OTUj zT6r#G`QepUushyO-`+mSggN7Ritl~}yZXa1`rO?{QwWZVJ~y#XW7}v-x%NVzl~-~c z-=cF?!^xqgHLG=K+rKrZtVRFhZLxJo-lVfMuhg??o#@<6?6Ws+Xs6AuHi_1W)_cL% z(0{DlR`!b_l%4hqt&tyM zzX*^+X&C)z&2Ww@-L{puuswITl?)}H-v1L@Ni(^2U zu^LCro}xP4?J2UANQW4V{H}J`Yn(I~`CaxD8!ob6e8;>V$>t(k$s+Z21e=R&CG6K8 z(dMGQW~ub8Njx=T*G;i8rId-)EkU$ zqVJcEGZ@`up7Z<3Klm!&KX`?s4!X%Rjye;_aaXT8=q4TQpqsqus56u~dq#E8O*-nJ zn{0Q~Nhbf}Q`E7?ZJm$kACvd3{92=R4rWz*{bSS{Z4jr5ctYZ9dBh(w>e1)8Zh;nL zbJokWrN-hv+p(X^_(tjGswZ7FpZD_Wq4#zh4gbS78mueGfa1cz5u-yBqSKCN3ztTCLlN zPU8M;-~9xBoF19>wrmnQd)9gOyKVBHO!?yj&P-}QIgj{3UauFQy2&BzS-tVC3q?5p zY0g8}XXiPedGF>-reY0oZq!FSTiYj3?{)Vdz}FK0OCtha4~yB&{rkxyq&eXG>vsa1 zU9qxz*}M{uSNj*%{^qUNf*bq8uZSB{$oFdR99TAhSNcVbPmSvg#*usHN#WVdjbeaS ze01NLjlk0AXN+95H#u6pKRG%Y-*7Ky($q%9{(cP=z&rOhCxxdHD^~ARyL&~ed-jSx z&wGJPa%JOBIg2_2z|n#HG@j3jh=Z2FGkhcUY)tIF-v5|q73n;~cU{jEk5upI+246q zlV*(6+HkQp3Lw@^O@NR6f%xgW!v5j-`C1RZi1$m1kPzK6-u(1+mG@JwVb9-twN>7$=PKWCm3QiSBIR1E{FR=oyv{0b z)AIz%+pO~EdaiQfT(kczdhVk<#43NR=PFOI${*>um-4k%`F%ZC`9Z6^Nzdacud>Q- z>$%GBSmpJ4-jni9tNezZtK7HB?0>DE6Eng)%qqXC=PF-dm0#9#zQQUm(R1SW zdhfQ%k5Dcp9^1nm>p3lQ12)3ll9iB&Y#$igUj{p+U-qI$?mjj8qLdGi#MPTc>?y6oe^LKK z=-nY`QzVzZCXr@DXBYs6lzUhXP**m2JIkuBCa<7Id2 zoN-#@WWLKfXPg!p$MwVJIb-={at^G^H#=j*Sm%t7>%n?t?GY(!0&m|WE=Qp}NH`{=Z zp2qVVx5OzPj!DZ-4KVM0jIaxud6KRNa7~P?A-X=5>z?>F=o($o1r0x5*CkvhKtpvs zmFuL6k{n&%z;&;Rk{{`MHrK~ilw|38A=lU{(sjL@Yxq=(uAk?+e?>`eU9aUjr=p~n zt~YU=hfkiaw{U$j{u;X8$@S?KC2_jOkJuHcC~@n$57$4hC}|(f^#HC*D@qRN`c$re zT~QL%^%$-zI0vKa60U2Asj2I!TsKyfe530dxNhbwtFC8rZOkg!rt5`V2WFLgq3h*b zSIjE;RM*dQ-8idcv#!^w{_K*Eb-hV-W|#a!*IRUdPRR$l-l_X@O5W2oc;hneDcPj! zKDxfQ;Bh6mB06 zZtETKnXv$*ck9{kbSUh8=H4SdmV>kGDb2*Q3otJy?X5BNo_XCfTKZ2e~hIj-zBR(Y_t989lW%!g`en4gT zlwD>+Xp#7oUA|9c_>^6qtulPdF5j&(e9A80p)!2RF5jXue9A71?uk#?e7wr= zDZ4yCW%!g`K2Bx$lwHnJ89rr~(^Q5}+2uYe!>8_h!aFZG)&+mN;gw76 zIPmHq`CFZMg_BYjyxOC(1Fv?d?7*upRd(RjR+Sxi^_j{JyxOd?1F!z2vIDO^RM~-7 z@2Tv-t9Mj(;MH3yJMe0q$_~8xv&s&<2VTur*@0J;Dm(D%UX>kqHA`g& zUfrd#1FvpZ*@06xs|?MuaqNqrvB*A4dZwO1kL4w#!YFO`L~r+xLX?YlB^J9zDHbGY=%Etb0Rc2R}K0Uh3p0Z7Ms~{ce>V>;4;+9qWF(%8qr< zo@JMH|EbE3bx-WSF6;iEDm&Kw2P!+({ohn}tow~BJJ$W1Dm&Ia`y^e~{hw5J@RLTB z9qaxTl^yH;C6yiPeznSub-zkw$GU$;WyiX&RoSubf1|Qv-J>UUS@(~t>{$1Ys_a3`vodH*8Mz{9qT@%vSZ!fqq1Y&SE%e*_jju7SogQ7>{$1$zv^7~*mJwA`%ii1 zT=%~jN7=FNYgBfu`^Qywtoy&|eaE`rsIp_-ze)K}>;8>jAx~k`Qm(2N)?jvyu_I4= zBHD9{D~fo1{Ce}t&?`W&1^mIVOP37fpcONW32msj@{e$yXq`# zPuIg+C*78!GgW;fp|gxd`RI?@^At_YpK@~8#s6I7)4}*RkLmr>l~<((R(fZV_r;aD z<6PtFC$Hi6Aiq`oibrKkEa&$OzU1@yf0o}dcOSEK+Fc1tXS;s&*?K zyoz7op5}>DINRmoJU|ZD#!EX88Lxga?%>7m_B7&J>+sPy!H~Wt-{;K8&Ct&fIbk%` zD;TR{gv6VBt=4mA%qqnSS!?JVVJuHU?5souz82rDkS(#V&P+$g(+>ZUX7o+L?GE6x z6d0OiPrt~$lpjZzvC91;4^SpnqE(+grE4i$&$1#jx!z1W&AfMm?xTlHW(*ax-Qmdt zJv**~|G58xp8((CC1zJYSJnnsQdZT}U3a~EBRc)$L}SM%;!(Z&RgL4GG0PnukZZgH z4)~YyeC|Yib~zhj&yU}e(5gAwcG}z}<~&{Nfj9l%n=9~dqmTLW5?^Kzb2i$;oSppG zlFEsOaI#>tM|{_~p(VXK>v*2!%g5T4;cvkXVb7a$&MpL2wK}63o2xa%?*Nan`JRLC z-r3l|e~RzkS@`aa<;>_9&Wu`dQf+)sjKLf}yGVO`M~cQ+a?)`Y@hU@;iGe4nq=ZcCgJH4Etbq!j-1R1Aw zxtY7#2#1K*_E-95PtqLUM_e}vR`G)I;bVM^1=o)j_iP;guX*hIE3R2lO<@84iNgh( zJl0;oaD}6up2YTui+t@L&5TdS2y*+@4m)W zXC$)+(#MF(AM@5;#KQQ=<LknQ`*dD;A2F~V^JG?y7>nQTNt}7{B0n-7{G0rt1Mwr(p0nRL{6{{4M#$H!{#$K_werq z$M=Ba@+q9o7&Gw&w8jap{=y&*_t;n*?)28$*f>SEG|pOLHVU@&V>c+}_xHlknX@CkkE}h~7jj2oq1PG_WZbGRJGpy}^SsUSpn3 zTk-;A!~%HFFvkClQR)j9Fqf7+e2rPxf?emexo=3UY{OS*?&aznU;7sZJm~wm^7gSW z`00&C__Kk;eEzPYBp$o6WHr?I#u;G{PyL-Lddtcfy8{J11P`O5G6bNz4{k&N~(EJD<;8G%!3S zw&xd2Frte&%ZN@9U9In8PvcL_rxiyc1H4Q3D<|k#E^BXNAoJFe9cV}=-goG3Gk;XH zdW)+dbg8#a`SX5%CbH82&N_|B-Z2XvlqZ6(4hE$m087mfI&FF9kt zX?KqrV52C>BY!NxMGxWca0P=zaAEY|Z^C zwK0WpDu&^3+IR`SEE_+XIIFAmo)2#1!>h&-D^NM5TRPmTZQlB<}L*YLNh!}m!HpBv#<#OT4bR?e!_!X#-uSHDsH(mN5J2aDOryV(qFfM zAH~>=uv@*ep0oVuyE`NojYd}5%D>iR9=RL^)6MuyHnH9>+Sk513!L0+(u(xRSMcE$ zuD2q8t=|i;clq#b&#EfAps1>e^|}u_|KG&u5?q6O+J~=1F7q0*Hv!9?W4LZ%kLxx5 z8<}rA|J2CGES)I_UmPQIcFD8gM>A*k+1FiN%eaM`=%NMQ9HYSaF@DAAvqd}oS1v(! z0)IDyQmr_UhPRiKVI-cJ`*Bhi#~^m+k!k8~6Dd8GCP3!8oj78d$ zxCSTYRT+iu@a$2>qG{X4q8p`D)w$C)lz%lgnsjhOv;?{erja^UdE&~NDQq-oHBV&+JEt=yy+302~2CVzvc ziGBp39YJVE_q~2QuLLyndwyzjp$kL0A@sBRc%e^WgZCvD+Sjgw)n-|`oQw6Vcq9jiCAa@q zZsbG{G^GbLB@UVbU76zw2a#3MW3tKtosmRVsr0yYjwn1V;0lYUj*$N_WBC+)R(f_p zzzBbYOg4_3&EkFHUu&iqiz3ILRaZ2YbDO+7*_F{+7vq0*@IT$F+v2IynWPqMUN(Ld zokO279LavdbA1s!xDGt|FYu)AGS7LkUB!bhjhxB2v8Qp$jDbe@D&)UfWWg-HUE{hR zx~4M?PxUBz={M}{+q`v)>BpW%PLcG;L+B)?os9bZ9OHh7ad+l1%4q#w9wWy5viJgg zEgxE%gUne39G2}}bLPuuqu=iw8_mI1Sc4pj{38BTRf{ZY(+k#xWQ|p}tbu+oT>MV7 zr50R;ehjM>o&uj*aPrczMp$~U^xZ7}HRhrd0(Hgk>Q-d)L}c?IGI^^feZvq}|EgW+ z>5Jj_#jdmsTim`4Tit2I&2(?D@Fz?1J~R^_nH5G|5w?e9bUb2(t}fD8(XE2W0fONO zU?}>~J+{jJUyZ@GRagfenP*b%Tp(V~fO@-~Aw87MdOY)cG@CUrOVr$tBUj{H)=B&8 z#yh)^KkOJu=Oa5y?>1#jd|BkfDBB8g7Z%s>EiHVD51Ql~8hF9yMQ)+))|ee?8F1FR z*(Le?b;JCI<-a|={yiL=mu@8Z+48vJok(XAU!4b>YI;Xb4 zhiDUYt_65EfiuvUXcO};_#Y~N!KT;S({ov8;Ae{df9v?Sb;z~gm0}}_rr*Vx7WnlJ zA2d?7o*bWxe!bW#E&b8y$CL{m=T_u~-JGeC52fh$c)kT##{97g>*VJmx*|PU@eO}= zhUC<2zwGd(vmYE)I{N{B^ZCu=SH*8Gze;`~e)o@(&TF?Zm9gK#?^b?JonLx-3-XHk zAF$)Dcbn*6?^*WFY`=77*Q2}x+&5=ieo_bFiy`PE`C`U;+d(Qg?_xH2@4wd6eb7X*9Mx!sh0Xd@r zx@DEU;W4E#`G`H_gh7Eii~e`!Be!5l1~u#?dghy8)TPo^?bM=zAaL9TUM&G`=vUt~ z_9-NHf>Hm*x9y9uEl1ov7L<6fDF`RV!wVbxfY-+wt5c{ON-I>()71ucmjLpr_Ay?v z-x~m(h`o0v?^UF^qWI0OR{N^oNFOWhx?hGm>Yhs7W}V9|b%lNSk$LkB&f^#jO?nSo zPaZJy9%~e+oC4g5L0rG|5bfm94t}#{JNez(kuBT6UR0e$yR2a^xm_##&@c2j_cZqO zlLO|OuDsh*SABPny3q0Ny3xXW)xi(J{R>MU$*xO|>R$HB*#C2}|9b0Ru~<&A-`jL= ziTl_&rXHjFgSnsnX8U6EyL?6id$YsjV>*(tR=d;7XTgVi1JB9?{MIMrlLyNPGcQq} zFVMigaJbs-Yu&;ggzB;uCRO(gnE8W!)XQg{*P)Z?yrOTGRUSuKc|@J)jfdA8&AH_TpzM#`bQ(cN#neErS)lT&m z*f@O0TGK`eeeCq#w{yuIZO>HuqME}sod1;UBRx!X<6CIO%@0{RRx^77``KFv5Thc9 z{#pbsyok-}05E=ksyqBG=O=c-!#=>ie=+tquWTRKC3dl9|CT`BKgIr|uJ$SbTv)Lx zGM+NRYuShG2XB*Hw28j5jjUFexhEr=NI&$btVoEtE&Z;2__y%9%UB<`%^20zf_-~8 zFp<3QZ+Av(>g}V*AMAyvWysz->16sDWBQ_6wv~j7%)Q#@0?6`Bj1ifX{b2FM6rJfT zu=YtZl_TJKudA9fkk+1WBeIfgh3fYf`fa#lRNFdqxfXQHaoB>u_hBTI4coBGkHHIi z!aizdbvqx9*`En=CHWYhIzjq7QXJOKzI%daao|=D`u4zsEMBB}Xku?&aNsjpX}Bk}JFhJkx%Y zQR)u&14q54T^jsy8}-il^31q{b9S0<(;jQ`i5hV6Qee~W&T3`-O|suDg5TtS*FG#k zau)AR#wIilyvfP!x74n`4BI+=uZ}-95d9VZ&bGVrr0(`rJN9O?k^R2Lud%D0FQ5;a z+r!nDjN+7Sw7w(X=?t{X*;Bj)F?qvp%POpeZ*qBe z_+%r6>y5fT$8mi&*M~a))Q;X`=Up)AJ9M;mPXD(1;PvUWvj={$6KlsDLoM$=&KhQn#1M?q#VE)4o z^e?)<(tfWSKlmN@HLfDdHo72x^V!H|*^vasIT-!999?TW-yl6DvE1J_7d{}nvzj4uQujzB*tD8VR@j$e~v>jnnH<0sXSAFZIccGi+_xW-u53+PHd{WJO#4?L| z*{4x^-h6)S-I&+#9&bwQ8rqdTuSZ*UMRuF_z)$Kj!IKdENY*uGc~;{0u(IMl^2I(7 zSZUlFM=skQE3@m#R|}1T7IwrflZ~lv&gE^gmu`kfy@np+0Q z9QZ%%y?K08)%o~;XC{F=SwIY7*AS?h3?N0;sF(=|CV^mu_|du)(AEYbP}~(UK|}*V zWGF2yv<0+FW&lM=#JCYeTNXi(YHRyx4Ir9DK#(Pa;C$cDoqLm;>mLs~roLt9(OF#a@b@l$sd@*h?f<(sR60 zQ)remW4@z8)<{*2_V4S>9$~CM*Z7-nzDS;G;u3sea;M0hmOGVup>n6~;jaq(cFAeP zfrk>zQ`NT{%Q{}csmL#RD#@oMxo+{0D})z@K(C$x139XGH^cW!^mB4lsht0q^K;b{ zR6Zl~pbi)71xWI20>Z|=(f3I^R+3Z|gpTc2x9F{S|M_WP8M%kW@{O7*_CsC(Ok=2< z%D?#*e_!@$&q1b=_}*eM-;E!GFKOH#yDrC{pLL60a&KeMCH2=|=6a5P9oA=hSmlAqP(6Ks)cmIeBF#xmxYK8|T33WbXp)95^p!9sP+B zc2B2~Qm+%-oBCp|6)JV)z=P+poU1n?U%o62zp8PHU!5T5=vje(Jm+iN@_)S?!0!lo)>}VC4r|c@U+$W z2YCL;d;b8Q{W?7V;N1f{Jp0*yMT4g;@C5gwm)ejYS>U-v;5j1w-x-AGFl~Izd5sRw zG0tmE@YDd$Vcih#d-|*gP;IU@}W$HBVIx>R%Y`6JGYw)PR6Wp6#;JMlY z&p?5vJ$_ZY41W!NhbEitY<@L?YpxMN9wJ|$_8DQ~Av-)Y0v;LxOzcO`{`8uRU1={@ zyYpT*+UljZ)s1(1>oE1Q`I9x6oWSI)AG1UYOzi}w{%Vfu<|X1TBC zoNw^>tH;sKc)gu*v@?NgJ{_jX#x)UAlyh1-mk5{=k@1vi)IKPtf{`&cq@WGlK;K~V( zr!U^GzQp;9IsTeg^lLA2Z3EX{%?a|$E1YlC@4XuDe+eJI>MGhCsJD3)eGK7zsD4g9 zv+8QjuhGx1=KMO&)AjS~I3L0JNd0_-d}EsZrI_ne|CHme`Xl{4Ja@eFk-6;WCEvGX z4WEO(fvENC;cyoJoOPR$JBj(%?d5$qn726kG4_bP2x4RY)xF$xG{+yy5$Dg*Xvy{B zn4|IjRL2UtGJ@yBqmS9-DYqj>)Z z&{W__{etB>H*3bSB{EzcbOXGZ$exAnpaL%*z^Wky+_TRQ0x-e&~e;V(swZ#?R zn-Rx+OQ7@FSqpP8@*sFBr z8;JQ_6NvK%aUdW5B=C~B$B0SfzDkUdXGf+PBPZ}B6RSAE(av|yBuDoNKK@7GXTZMn zEy>FzhRn-J9zf!K^y z6yo2@yj(Wlih2Y)c`Lc%`OK@!8Vv{E5>gB9^H1d*e99;CT+jK}KbB=vE>4Zkj!l&s z7QQppi(Hxbq7rXa5KG<6-T+0slSCU;j^KV9Q#iNox8bDiRN56kun_th`)v&48vAzV zR}j->-I!9GMhspKT*3K~E-u+;qmH_vyzlI@(bf0vTxF@?kpS=2QSTw}3+#2gQ^g*G zzhpcE7b{04&--_D+g~{^;&_;nqnbBFXs%?HZY3E7$5*Z;E(6NYLjn4-L>#il%leULTAMnRD{AS)b ztXSIzgq#*@u}b%`zo6hw{Ns9E$2;?zz@UcxMLPPP25;G`_@yO@!qfa4pB0F1ZkP%>OdWr&{6JXQY&HXACgH9I*GaLRY#2dL%R5;(TBksBe-5d ze>z;g@0sr1bvTf_ntP35=;9g<3=%Ug<(o>9H$2yzYvcD-Y6DeQU)rc*pG1jSAEytq z9)j0=Lg#ttp?f&U#gH#faG_u&QhDY1J?@hrQ*J~cea zsk8*oUxny1l#W-UWPh6|bd`geP-W&x@@OXqDruLYe?N^v_ad_!6?dB0f)ad{Wm;`U zqR$0S$~*$+gRwV*M1LQDM&@%9eH+^-X@?dD=XMilhxx3aE@zDixzgON|zy3r^Twff*ftRB~jQI$o<2ls#f+ zht}iJV2h=lyQsM`+g>xz3+(TK#~(T5*-i8}lD_zkuGXLQB{WpIFEk9!OC`AqENu#0 z1{{eY;IQ`D5gp$Y4W8O3ykVpSGP#c-ad~$BR!t0{}=jv>VKin zpPombE#cc6L$v?D!MFWFz+vUvNb}G|=h3G@+YI^H+jr#v2viYl)41=|W)`6FZ{ZG zOj;!G|G3b8->Rp5i~4;tM88%ZXz96p>)`}7?8k3ppWj)m@7|(l^&MX3nT;5@n%u}J zYT_>E*vI-tiGPjwzCz-2=FTPl)oL`zD@t97n;8F6Jr=*8cs*2H=Q`B_6z}&XE?p&+a zkag7L$|aYw?6vy*IM-B9J8DW)Sxej~Ki<)tA78AR^W!(MzVdXz5?R}#Kxge;^4wAb zl|(L>96~NNXFi#y*ucEN{ZCSRqxb*!jY894&AYKiJk)nWB&Q?!hIwts>2VC9c7E%( zllJ1bmkK?vq-~>3gNG(t@cMghD7+fw{Tt&&-oN4r_mYp;+cWVZx75yRG&@1g$p_rR zXXleMdN4%J=%2-g&S}UQ8GL5tgM;wbpE(47@;PPXbIOvzKlRL{z4LYa4c-!dBiG^@ zZEwsMMw^lsx{UYRaLE5HBmUJ31Og`Tui&#w{-T}6A;!@11Y&(&rMZ93Vj z)*GTt;*;G{W0%7I**5a}HsnXHrG)qf0q^HpVVe0*$^SNnCsfW*<~4+F?@=HBM-KUa zH^0|&36I1zXTqgMnT`L2icn+W8@k!z`0NOEoDo5RTY;AGmB zYeV^cg=^@pK|wih7+<5lI+$}$D&aIGixY(wF@{lMRuTEXBq zD}vv_p}yAj6!(mF(*Gs@JNf!>Es1B9LdDBm)8bTQQ1q0G{5SIvrP=((IQeEBzkCUFJW6N5>-b8yTZh-;VYVVZ17N zPU<8sD$?rX237LBoW~jGF~M_{^TM{u;0(rU6LtG;=51u{#AN+k?1=2sC*v#kW&X#> zxs0)#Gu{>*mzbYH8|iV%Oz?&`Y_2y)A%A=q<~4q+Zkt4{d2s##ek}bf$9$P*V!lx3 z6XhQE%HM@P8u)Oq#)lu(1xg#*Wl!NRz^$(BP~o-i@R_;o3DgQ}`yE4@hB?#(U&Vs@ zIYv!?&3aY%MdlcBFN$AT1N1d}BeRB@I+x&F>11$c&f5Pq_g6C3b2z^Xx&-URS&R1xb&{-I z4$4tzSVaf>;a6i$b)PnGoambi&SL*$-`du~XY&Q()8ilE<8$MA;G@rhYh!GcOx*2#0~@%lJn zACaF<$d&YGWA54LkJ^4$GH~`p$L~ch4Bhp*wx?!7lQMt|`*L(xC(zel*}qZh=Hh}z$xQ+ z!c@y3U7k4E>5 zLib#c?iuOzC&>ELp#HfTJ!^iakaZO&Sck9GTbvD)4&o?+pQPSGbaNFxx}ih&o5m;j z?Y{(HA!8yn7XRelxz;X-zhJ=59>XqdnoHI(>;eywo!~vyGYXnlNv^q(kKC~4PrGNP zIcxs3YuIG5A)WD!S3&Q9Ju!b$knZbM=x+3_<^=WsMf7_DIia@TV#D5xkI)|7xRqGS zsJC}BHvQqp>SMF_rn0UAyYES5Z3Q~P(}gwvvc7_KMT6iqt)FW2aSe6UvFPqpa^h;T z;u$kfsgrLD8893z4J_M;Z_|!;`KH>@F1;1cB*xE%{^q%Qm;|2Mzq9TO`Wo-<=UsTj zV}@}bW7X7}fZ2?1DF5x0X*^EoH7ka{F~u?_l{bYNlg{;W@SQQaQ}K$w_A0*cFE})w zlG+b&Wc|78Yxv$6Gl^yLh$-#R;&1ShfoBPL8nNQZTO^L@>@xhkP;rc5BgHnJ)NP~K z%WVgPwsEV@*K>ej6*?_Jw~fdl`qAxwhvz2gak2O$V*kYE8#>9be_MzPgYjR3|7pY* z7Xnx5dBU~sJmLCnb8uPav;V-DNX}1iRH3=(9S8LIjBzPag7PmsW!%qG8rPCFrisJ% zM19{4bKKTEPoqBZ!JF-q`{(L&@_F?69ytDyqeNtr{YEu>s2h{M8_ge}?>6iBTKjEm zhaA3<=h3ej*CJ{*Wxk>jeUSx&zH*;F8_ii5xfEm0;_~z8vk|WBZ>ZziqPY_FJo;^f zE8j$F7FX_{3)fn5Izjm|@lq2pllWzZzOedb!_XgQyYkWxLg}yupG?z7X1hX;;vwDd znap=ZBvz^uyF?9m46$7{_p=33|W%D2nqcMNe~>L1XD)X2$N zs({^@Z^JIhIw|}Te-SyC1oRARc-MbC?uJHv#1MBlu^;Al-Pr5^b`D*YTg@6Ql{M6| z_Db4HA>Y^TOMQKN{o3u0M=@hy@l^DZE%}3?IY~65VXbiBT_f2fQQxh`|$`H=H9Lo7;BJ znH}OscEexlj^EUSoa!a4nQNMAz@;!&&(TADO5<~J?^S1IA=-p58 zuQTv9%HXrkZLwj`D<0p|Kia$X;^5xR-ly+cQotCjmYPw<1V5zI`_S0E3Ty8Q`LCk4 z)$)R4J@MIv7l+Nw+q`FX;D^p_tHr^Uv~`l> zr^HErfp^}2GPuXA*y8t}W)A@T&>c@I{_WsC23*&HXJLGN@s6hz?K_yV$7~@rnE&S8 z9SeecB1&E9juF9qId_c@?#sFJba1cC_c?!Vj@!SOJ?wX}hrP6G-IsF>eXNR)EiPmq zdukZ`IrK4!y&&JCkG{VMEZec9VeiEqPX_nPH2OKjn#DH+mgS~r-U;r7X*{!&{W4#X zXP!4bV-M~ZX*{!zScrWbI`8nAo*5C`FVcACS!z31$uqB*o_R93U!?KOG-$I-o_XE$ z%savTB8_Kqs8e0UGvrCu%RZOJw_8PSSu@MJza-ZNd~r8>Pd>$S*wDr5zP6r%#mdo2 z_JY>(85Y07m|v6ccsaFi7~o&bcCOHP@ej-=v6ppAZ10b$;rotlXaWY-4zCwDvgx-T zmQ?2ozG>1Tn>aEOFo^uN4c`ap6#@ord?v?u3NCUqIX=o6 zDuVcSu;5!1NqqTcd^q?%Yr!`&g7|(!TW0wk8%cb-hKKJ>7JTC(i0`j0_+AuAd@l(P z-$V<(yI+c&zPi(b?_bA8P=1rc!}k+euN901HThk7f%wW?0db`EuH;9}>mHLW`2H#+ zz76vP_!EuoFY^RL@HhU3uTd8CH#GaJB-=bo;1$;Xx@0cLut`P4gp!jNpF{2uQ)@m) z<2+n~?>FH5u?go}G@LszC-7aL4dhuDW3E_*{Nvt?)+L zpCoe6WrGxLgZ{;b~Pv~LZ`TFT5T+WA*O)6ArC{t3O?Y1cE8+Tjxi>j8>+N7{*| z9oZXDYyadk_3f`TwclCV|Jy`=)?Q;Dc-sG4a3A>04<`C2><#V%pZP&jef#(8?H_uP z_Su_NYkye-{Lh%$53qkr=Iy|LiwXYQb@(R$|1CQF6B^*ZU2p#*X`el0wD#|RsviCs zruJp;*UTL^`Lq6VQv-c=bO_RC%T4|Xf9VjU&z27L@Q>8ne@ohDPc*Il8|T%xFZ^fF z{{w0Nnw$MuLvLL`vY(GPZ*jQY=2;4efxd%_WvO5v;U3OexIl7+fO&Ozfsy> zbE`k=Pq#L-zotvD{TFWaPxw=pVEZq0sc%0)Z~s|opFP^N_MHvmdxfd}mC}CiJN#Km zcQmx$yGOA7?sxbnB=rck-@QkD``_@6@Xv#^pX=zM@#M{p?!_HU?ar5W|Hiuay*Ul- z{w+S(?gu&W4c`PnwlY76uW$FD-tI(cm-V-W>=|%||3d4+5sK#l?$58^FCd||=M&f#J-*X+bSd9)>3KlbvIgJzyAF@Y-VZc*3iQ2Xfal4ix(fC}D|+%rbwPNp^VX1K zFn=SeZsG{^lg#@hxz5b^bBV-}7aAvR1ct)k+#-Clf^HUnW zp}!vzYK)u1w}S=WqDbQVlknvDSqr|I5ybZ++A_=W*hu1gQ+W8^WWhHsg82U0g6~C< z#P{a#@J+PfyIb2EHZuHtrv=}?W=2qc?+g#$PYN2#@6rpzw>cf1Y{B!`i!>BGJJcaaGq@U+}r}3Ct7gsc7ZrIr>Fm9!THjVI5&zP z8v7sLoo8wAw_XmwX^}pHkGdSP=-5k$_+NFF8zWZ;8pd8N&PmaHMqOlx5A3=Qk zTk!p3B=MaY9=>ZV_}&vie7~kGvkXs-B)$)ZhwlsvzOD%3yU~L0m661Ec6j&>wBUQ# zdtvh1oL!n_!S~qcQ1Tm&U2=H7gm&TDrIWK8+ohK^oFikGuCw4=7Ac%3J3KG70OxWG z&JSE5&bnP%94ZgMYA{2NxzGH65a-3sh0^4EXkR<{rlby|9rYUk7T6!{*0 zv@&2bc}-c9D|>%5wLVI(Db#S5oJXB$9g6G)Hs3-6B_s_R)f6_SfnI;loJ!d*I4W45 zcbfHWY8Q98)ZaH156itgYAqYp{ps;=G~=t+=WB3Ru;-}MC{2f1 zc>CPAcVR8v2_5%01osqb(x%KEFMCy#Lr=lAsW~>e*VG)_md6{@;fYqFgKD8eL6~$f z^T4A*hl`}9HMnN7QBFjkU(|UZWKGjk7TnWY3HNQZYt{ko2;hFT;NCHO+(%h(S6T`8 z1s2?G5y1T$){i0kVUK$o3+|i$)H%^Je@vnemSf5C3hCHJ1CQ zS_%Jl7X0T$0RI9r{>{|4HaGtBE%;yGO8D=hU9;?uiU9uUX8fC}cWn;;EDQcot%Uy~ z3;xj&z(4SrN&cIue{Bwbrv?9a|IkYEZ^8e)5fPAoGycug!#0Qiwnr`U-%9wm)A6s+ z*7d8NdEmeGJL}&Y;vMatd7Yvj>zVamff@g1>SLS3f4+`?5H6?Z`WE1Cv=J)S-9@`1 z{|5e}B7lFo8UJSLWt+o4%YuJYE8)M$f`4=b@DCg?$$vBTv(4e}wBY~l@57h>=Ei@V z1^@RlTF(EQ%=ljxru}~;DE|iRPqh;M?JW4uiva!wX8ikwhyQ#F{@1q>{<~O0{Ew!@qaly z{Ie|hN3|0Ei!As@M*#o8K9l^jhiW+E@3i3m?(?lA{}%k;OOJs3oAGa^Zn-)CXWK&- z`EMos+gb3R7XkbW%=kA`$J`wL^DX#a-%9xJqTNRF9|8Q+&G@H@!cM&VKDq^`EHMS{!_knnq{F$#dV={ zLPH(HcVc3_?t2(>fSoy3krRDgr-_&SnPWN>%KnY1|E-V^pTo|(BIQ+YA#r(#crAB4@HYM4Yc4H)iXOti`|Xq`Ww+=ccZ!f5VUx> zDO#}iXE?NYqJb6~9%-#ai!HRP>k5q)S4V&rgPNiR`<{kFi)$>jIJrvXCh~kyXraZa z;hK!Jh%b(^ue;0}d`Cvu?=>7+)ICryBN|_9XeC+v)Lz?ieR1&LP3Q~ucMgXZU*6wXU;L?+XfeP-i;@V?;`yd%!JgLP(86n>#gAKw z7KdrqtS^2N0a{FKiWdIxXmO8)7Rjwdi!~Nn^ojs2dNf4~_9zdBFM3;O@$cWYj=spV z(Bji;TCOiXVLyeC{ssH9heM0X`x@(u6|F>z%Ph3`O$2E1%cf{?G(1}T)xWV z{xkE*2UoXTe{B2jCiKTAw>P=QSC8RC)->;)*;s!RYqV%OR?~7EtD_o!W3l-hhbBj% z;`)mk=(Aqp`e%alS>H@tzdlr4FLlnRsQZ!}#~c%V1aCFbS*&K=5|(bA-#{DSWB&gf zv^fW#Xl;a|gYciuCmJ0_MSu?Jq3Dnk9vyD5(81nHbXaJigChcTILm%VhP|!lhnvEq zL$rks6~Aeb-_ayLWLxO4IZe|WEs~2jLeb&o@aVARXZ1Qm!{LcA>7d(Lt&LE1F{*_Q z1tICs$j&y_4XFtz1{zVr21H(ee!G(>#pM8?*>w=s4ZbEbOLJkZ5Z?_WunHKyvMF9VD zGyXpeZ~V8+Xw3glv=aWR1^k>@U(>nXNR_251e9;qk28yZF;?@t_MQ;&Nd7F zZ?+Qtqb>Nq6#@J=n(?0#o*vjdy|Es6yp`~8W5IuR1n_^@jQ_Oo@SkJB|Jqi<|If5* z)&avKfd5c4{>^-+p*cI4VZr~*uUkp}E%=`u5&`*VPqBLWe>A-Dw^{Iivz724ZNdMo zF!49r!HqipO0vWAD*P0#9jutvSO?5%84VUooE<6_h;E<(�cqgEUy&I9Jff7hK#Z zSD?oNW*R(X&>+L%d9VdEC}^O82LIKqM1yx}SJwj?4Xz232Ih4VgUxbK9iAObx8Q&J zS1r;5P4d7~7W}^%tm%OZ<^ePQC;n#A1FOT+1A%+%Wk7@d^;W|FdJFz9$YZ?*v^*ymdO0EIoVjziCI?clfhjdehIE74%2g$A!jfCi-o4KkuU8(Khv zw=6Vxq?Kr3x6t6xFlk`61NZARknvYjGn$-B(f#3&@!)I={=-@c|95EDtOwE}fd4=< z{+q+o1H&!&*Zs0}^uUuA{QU#N(*x%5m;K}G!RJZ?=O2b^Mj&XwOyX zlW^_ewH6wj`bF#LfH@W#)Lj*x4lvW;*k5&DAS2py6q<%hgKwrZ)&sA$5)IrI8f*-c z24+3@vQ7id4o(Tr4!&W*|DhJ(ze@WiKT7 zJUuW=$KQZ`NGsuALA#;{4E%>i0RI7I{C9?j|1}o;Pc98#2N*OAl@lto;D2gB%g6ud zUXvfNCp`SW$qUNA0sE`1gumN@|HcU5|FRkXPr}3h4GaDcwG#e{1^J>n3;u&6fPa58{zt>ZKh1*w@g=P!{}%jD^pAl2?=i{0 zKRo>X_gLh=mGHmLf`3_<_?zwEOFI5aa$C=f@Kd;U@HGn!?r$X;oTXi}4tO8}G?;GC zAfv5kS_^3Ku!RO!wGs{9w9sH+m^3iQgZ<2MP#vBf9BRS;n5T90z~dJDzrM2Ndf?D* zlO9M8&kmmYNn<_mN-N=itp)!L5x{?)8UOh3@ZV^`|Gt*tzq_d#fitwL>j0inJ@*Ik z-`!M=0QrEB>vGKeKh2Ck-^XuijaGAh(8CS*Yp`F{O8CEN!GB-`@b72FziW8-54GTb zY;lX^zsd1`+=Bnt{WST%;PE%x!9yPe^V4T_!)EUwHVRxVy0qSl>$cUv0tv zeSrNP&CHP!l;o<-FWQ+W_68^(1_`ehZ z{Jm!Ue;6MA8yfImFR}demf^3p5o&(8j&^nV=NZ*ABZ&X{X7T|c*AJU*z!V*SB{{~E zhdhOA1MaiXpkFJ|V55ZwS4MyaDFzKPVmw#0fCd9BH2891i}XN~eDIKk28U8LJ#fK% zVAca)>@ev8Z+Ld_=%ji*puxWO9Qdaw6(2gCo0q$0+g41pskJH5^Sh~vQeNSh_;KCD zH@zosv3ZZ*X!o9-N%olh+F|FrHa1_8 zO&O#;x9tYMVsn>&wji*~*;DZ(Daz3*zJpWAzK?l|QmQKMv?{wYSbKKgXn&=ni*KJJ z!MESh$*1NFEmqHdy7hn~(f667qc6#Sd-*TBD@%{_KJr&u$u)WQE_^2We|A>GKFxI# z-z^dN_YqfLbtn6tuxDf?aP0#=+URKf|6L!YV^76Vybm1SH2ptm`hQhlc|OS{?HRw| z;g@r#!Z#gbl+u*0Zhr~C>4}P8uGKnJUwT)?U+}EGZYujEZSSCzX7NlGaLY3VvlZ{e zvvzN+)9oM2k>Ag~WFbd^Ev~k9*~)HTUa_)Uk}ak--xfFKOU@5*zR(swrhq*&i*3c- zirdx=jiH?}?7&jd=nod28g$l|vj*K@d@Lw`Qb zUBLH=HdaZ#AibU1ZzlI*d3KDB>r~ETcvkSNt_zeoz)kR}0UxK(8GQQio5VF4hg64B z8VCG>htOH@SZRx|HE5e;bJhxd4V(m@mEf?}wy@h;TisA6{q?2)INB206mk9~eWd`y z8fcqH|6c;j82(?({}k@W@qAzX`T3kXc~*Z08Y})Oz*psnWvt?SW>~kK4wM~&231@a zUJ}?{z%9Iz4cx*j6K%1zHPE|OqrY04ZHpaa@Qv`yL|~t4ThPr1>;j7uSYl`^OZo+V z`7h%l?F(GUJF=&0v=&^oetE~q^TM-Q8$2ucBD3|+%Ks#rt#(z6lIvm*$e2W>G?Vw_ zT9$3)@)Y(5+}cqojRn^}mnx-G`OT)iiS&^KUlnl3xJ+wuTwDdV|)w1uf`GY`;zmV1f_Hg&+WKaDV@fz@Z=`i z7ycJGh5u)82>&19_|(?E*5G*=58?YGjLBDw$r0N{V?JdJ4%-%WJB$pBp`Gc#Igb9? zAp_ev7a0&f9>?|RTpve&J2)Q$d?EuW-y7+zj)m8f(HR|sV`9}S$UsE(N-Q*G44c&} zSSC&H4ru52}=sivU@cW;jSJ;o(Cp=kH8z}SI;cNEp6rRq6_QLmap2s&GZU!fj zi9B?A64yl+ISl$&{)O;BzixIi!{Y>u5Z;`5$rXmy4uDmb2R*UY9 zfw#8ndRFw)M2?s!r8FMiYD&k7Eoe%|_Cv;H%tXh=)0XgU3Fn!>Cbqx{T$!B5bKk6M zMStF_C}pwSA8W&Y@a_U&6MRJX$aop>xxNXMoq{Ll>vT%pr+8$)X9GX^6}^(WS@ERU z(6`_(xja3Y|pX z)P5T%i-i7O5PSamTXgQey@6a!=c2=e&ld1&_B%x9iarb5?@-UJAGX2wX8quXrcLRG zd+||3Kh!YJ{|Wlxd~Bc4dG*#n?jMnd=J>h#)c@%#MC-wkx`$*;r*^55*=J9YnFZN|TMS>&wVzqjE_ zbL5}>@!kT@*m~a{A1%M4t&&^(KP^wdGLB;%@NDvotM}otE%}N~$=!*LbfNzYe|@L! zuYU%w_alxO$6@yU#U?%bZD2?i?}YN@|IT~FOpSc`SlSrNVUBggXWelsFyunzX^O=@ zOu?qdfnPt4h2Xf-lT+{G_P+-@OZ0dj{&(en9PkSM=9od^h4;?} zhBRj{uqn;j3opL@=g`Qo6#~Bt*kd`efq$ZBOue6vkFUYM0QjZP3-Re)z%Fz*#|;uQ zYzO}H(RoH}P=!n<5z{7}RlE`-OaZ@C^ml5Ppnod5d@6oGhQP;tC%%Bh2nv3I%@i}A zk4$AZ<=aA=revz}A|-cChjZJgYKss1>t<}!LS$R;is3h;jRKzkEt#s4co)AeD=8T7 z#_~HASdADfU5{P*0^>A}P%`0mD!Fknbwm3wW)e%8W7aA7h}BxG76x)o7X$r4hJ;9urGa1**3zZ2cTkPtgmKXK}_UrCWP7ooC60e&#speEhq8__R&woIcQ1V)o|pw^N{%Iga`*bT#-) z@|?nF29L!-PvL2a_x@9Cl#y#u$&JiME<)v6GWo{#o#3VATF7-7xfb!aLh0JIwSggd z(5QkUUH?I>3z{i${;i*fo`Jxd?pXfj^Y5oP&)LAGamB zn3(^8Tuc^YX3eRmT72y+$;Ch`(V3zAib>F_?>T&J@vplJ5(&k)XJ|;uY$Ee6>4Dz|%5+~N@V}PeApSC$YvgSPE$7)MHCZicWvQW>* z#PIu{qDRgrKGAYte+lIN0Y6XVtEn90wh%d){}=e$$jh8)e@Wf(8*R+hGG9Aa@qA6Z zw%VqAb;99buGa2Kem~yxj;(OKWodJF6By~-leznvkE)(gy=tP{ zU*~8`9;%J+H1pKQnd3dh9JQYu&)4LUN*vwldfug!KJ>ieskGZXl_M4ZoOyv|lgL*| z&dHywEUoS}(i_ZECDc}3s`$xeS1SU`dO8&U z$y?fZ%k15X>wenSd%C~k&+Q`f)d^bOYCn0Xf5j_HCvi<;<1D+fK9#v|l{ux9cvYM8 zuBJVilbnnWRaN@i`M{DSTVm}2`d6dek5>Oy2AOmbH&t>kB<3% zSAEQRcJhY2Cx1P@WcBO!rOu97kz!M~+v3%6zF&Jj?Dw+4M^D>Ne>Zi(Z$DHn=`zmugBP^tx2OM2dw%<`>q0&+&q}+} zrnDt((s%4O>I`Dy3&*RMxR<0R5BI2-#jH?lHqRqZ1(q#sr<9h#H?MASr@apE$aw98 zuD^FEON;Dozkeh&J28DprK78_$gcSFd1os;T=Vs4Z`Bqht?I}qZ(X|EzYjWl<@)_L zZ_V9y&u0hho`YNLp3h&fd%l=!_sE#-rzYWmqr30;M8$u)k5XO@kJj{7yh$CDrKcw- z<*WKCgZAC%_D?1jkmpCWqefwZ+y4N$k>Hqj^triqfH9N)-lIRGU8!p*YC}7W-EHXU z^3Kd3!BeRYcUtN&@}|S`hE@S*ZY=zTjX+lGd2QOJz%sd~kYl+O9_rG&KlK$|Y70jw zxzhfh+fw(yeQGoO&Uo4!p_JCp|CkMK?^C-TfHtZx8=jpEZ#~I@KJbmAR->ma{bP9# z{&LB?NwzMv$b{DBH?%46&9^J#U4t`S**5o*Oxyh%@Zvh~#r^!H; z^xe&eYso7fD7jqs z#L%zI*HtkNs=r__GOG0(OTT&a%YG9xRlVP;fkoX4=r^B!vj-G+lfLtlw=bV!b1q1| zZ2NNB8<;sOuTG^8r`E2sHuE|q*Gapnw3`Y(skG~&UG{O9nN7RJ;8QbjLAP|;O{Lu_ z1Lo`PZd)#}Sf!O$Iy1x+0lEe+Bu73ZyrgWu5d{VeWx6JZ0{Iwu6d`MC%=bM zHs;YOb@b&)@1Pu2k-xhG0ZrFbT?21KyN`Mg1ag-i2$Yq4o4&_@F)yuo+~)(M8TT0a zP6hw$z!yUEEMOG;YpDO(3eACCqq*>84D^_YJ`|cWE*j0H4y9+fE#|4@w*Th--jQ`l`ohIZ)DL{HFRNY zZpykbwKx^J;I~4TuC;#VeT6nIXcMd95L;VyrIMQs4gya${f-5P1v(C9z6piHe2pJ> zET_GJS+l0rrRlUucQ|XQSIkZQrZGSE(fKi7=kt>Ti|hF@3x1UGma*>B7FhJL9zxqr z_^}3l91A}NWnt@b;X~ocVxH^KwQxMSgOEJ=sl7-3wtoe3e}kSme!JV>k(g8T|H8Qq zHa)86xox`G53#R?O%XeyW;iuFGCx*o&AR&j_GDn$@mm`DxZl)AZBonn_?h0v+w^gI zqTB!22Z6FTz72GqVnfy$w_!Hbmz3m~S#ojE=AVH+Uu}x=9_`J1|F~hr%4(ZuydN7$ zK1=+I`QW3HXHD{C)mBlTQcZn|iqEhLot{UYRoa?^pRpCX;am9Uw+Z?lrf0`g5r>Jt zA*H@da2L|FQ#x@V#nT?*2ajU^*EkK^&(?&Yv9*INDIQ4eS zbes<4Vrx38wwbSpO({(fdzd%0#PP_`s-e^pT|`}4ApK|&dH$k+;!zJiw!{fds@mcw z*ql3lP7Yxz@UJ=e+=pMLF8*-M67R7uzkK`HntA^|_Wq0yj`fMDJ+`)R|FLQ93dd&; zh+W~eImb+GgMUDuDfqJ~_?u$iO3oAvk#l^9H0d|*OrY$iJY$T(wTyxATh{hKZeD-) zEBI$0`jM+|7>h38DYX#>ZpO3K%o$gWyH*>YnmXz{fA98+U*~eD#i{&e+qx`BwW-Bo zJEWE?h2QFDqz*8#Hq{pE${kp^Je``N+Y**8pQ0#3<@!4t1Gz;Z#=<%VKH}i1A;v)D zN$|-0CXky1kBJXy==Hok%yFNUxv4Hb&3|}1P&Npfh(EA;Qy}+S?qx%(G1%RF{MWeI zbH_`4+9BvZH9n>|o7&AWZ3-_2_Toc~!I%>{#JF+AKE}q!xcC^8os5Z#F<-SJWCZ1<9ChjWM6h3vN2U3e@LdS$j%{DW=E z5E-`v6T}x3Kf60MsSmueAHPBPC=ve)xlT)YD=;XtEj6carVq{hliS-7x)0Ebc_2?yQ|gD(;J|*tRQ>o5eFlTsz6NjrzUQ z^j*#Q&74bH%lW`iOGbwilV4R^8ai}5qV=9bZjTl>SLy^wRw8Xl&8l` zd7s<8ThT|rQ>GN$=YR9fz_JSbbH;}G%)$?)p3`;7^bHHRrrIVk4qX|`ZusCGeTl?O z@|^0we+hEw&%5vTG!^^z8}{d0nczNp0DJcZzV+t=iRZNUx@dL82YCN8U^_?~HYYyu zKzEwxvOK#osEBvxC%8Q#e=!_l>(2ngJ7)vAQWqudHPzPV$jTu#SdT1lP0?(I@Lors z*=C%fPoY!xoWL@nt>M=uC0vhy9& zlZ+>M0Ak=^jkeH0V&Dp=Qu=FR;0@Y%Cip67KRBL=zF~}~w|+dS)zHVYL?6$R5AEL9 z?-}X;2f4tKsUwLMI@NBA5`R5W8#gzxdkN!&3=R_c433S|ZFkXVw4eCk0AuwT&mUx* zzJPBRl#fffL;3d1GJU?uSP$|GhQV;pA@gm+^5AW(oZ+v zz8Gbx=wpd3$R%_XIHXPKM|k$L!p7s0A@K$n6v47g!jk=EKdEyXnz%RI%Z!j2Vc#HPqoS7<8Q*>T!+6Y zzGnV_XS<2NN$zN^(ZR|3Ae4s2YNt69eZlK1tO1@ZG{(`K{!)=#LrRlz8NxUuOr!zw|?! zSL&v>emion`jgp9@+5v+-A&V*E^JPA^2f_lZ86)Wp1^2tH8NV%4%YitP=d0s&sza^LveUUS2g2 zU03zY_(!!`r>?$A__2^NQ!AetKZ$F%T%s(^``twUl;sUIQ+dA&)=cI7a-#o%<-wY% zyk92ytBJ|f8|ICFjQ7NUA&=&-CLTBJ+kRxGx;@thVkgxi0g1@21+{EYqWmh^@@$NU|x-#Kmb<_?1JDA&n z-!vUv)gOBGQK$#$Qe5?Ix<_nN3bq?w+>->~&&8+A=DjJWpD53U|Azv5A6w_xeE9bE zALiE;IMmt^_^7AaC>`W_L9%0}jX3m0ct-T|*9C!P*LG6MUf0fp>q2fhi4C5Bj9)?C zj=8kJ276lI1$$Zvx>}R7Wb!5S-BXkIj=smp9j)SC9^>`d2J%4UfSinJ8*(BJ56ooA)yNqmqR{Ek)Rh2+1yvys?9{F#9E zeuD45mjlZV553--PoGsAMx@oeFe1%M9dJH+ES;k-h0P}>3>qZ z>fjjPl=tpg60a1EUwGL)OYnVt@ei!JPqoG2_r&}19EyL|5u10BmV**{boV`r{j7{u zhTMJe%<{{iubC%4f%&qL$`SyEnyDyO*ce?&VRt_kdoz zmq+bhHSMKSCzMW|(9~DqTWa}4rdfwln$0}O)b7+ggP&GQ0)1vB2G3o{x2B`07ZMn{ zbN)DEI|$hF(A%i;@zstL7qNa#We+7+_&D#9yrF-kR%it_tOh=-mK+_tEc{=$M)B74 zA@>C@e?~s<;N5ml4g6o@Ajdb79A9tA@df!sWJB_Nk{{G^e9-XnIAv*JcV*CLBi;Vb zfYVAhk)buzA)Y2@^;kWvyZNriE`_FZH{U{AFoqlrUZ;TV8+hf7E0waxdEby7ksGmd za?dPlmHm|5SvKMMAk4oz5m+X2Ne;5q%GV-uU68j)I)A@I4$k23eU2W!Gx;}YHC)2q z!5pXL>O>!O(|BC`J?ZCL-ur}lpLCtiEA@B9h6n9;4=u;nA6~Cuj!>@4yJzqn#J-lV z4=g)0aI|+SJYGazGoNu>NZxSa&Ik6c9W{N!v{>Yk>*Ny49>yN&qcw^!7t+rTr{JvL&zlF~QPZggu@NN3ht=LU3@kI?WNY%HB$4i`1L+l}O zqL+AMJMl)G9xtX6cld}qkeMCF@n4FG8znAWYl|B*CDplNK5=2{W$!N^uFJ)Pnp`CM z9%0-x{jAHy5!R&r&&oxjCKs`!?sB0OdGbcNb>VUJ*U(V9N9bM*&R-JyNzO!`&BKOH`SuCg z?TVaqj>X31r_3qpmUsHP@>F={RPRyVBk0FW=6FSBRD8fH=v0loq#`FSalxYJ$9dt+~g0ATe001Go~21xfFb5etj&wxe(rrhYw}l z+F{x#hDY<^(Xr$Pvf<0aw0AE&I?WbeEO{Hr*+{--m!7YA5g9E`i(T;|y!$EZBPBPm zW_G)+i%h(mt@EzrMWyB@i`*hP^kQ z>f@pC+k?)(T@`0=E`vQTSqzsBd3_@N+5nRxR?74Ww_C+imr;qSk{7L3Qu z?`E==C69JHgL?%zRh2b8@Ql`;5*=)-kI@$QzDHZKj(0xoO?}br6*J;A3d43%R`RE@6JO$g}#;W#4zY)%p1COzIY8CuJJ__@f&lJ#yH-~JKr;n;oQMF zdu1GD427SL^WNW(wKScVM(N{a?!!immiI@uR{@S>j)B;dRnf{&Grd#n zTylMhK4Ofe@+)i4UqfdIADj6nkNeO%O;uAgJ{FmrBsX%TyI+Sl|_~bd;nLzG7&I5C8d(1T> z6C~$JJXTS>{$}_OO11<5g(?J z2kD^3DQQ`XGPrFH_(^O$_ea{?^=5Km#+)_%YV|9cuBsaC-*>m-nRBDEv?q1FO4ldH ze~q72$NQDUMwP_v3cB`d{FM^$+22`$lIzG=W$6ddVAqepi@G4?kyOun7m@QGr=Ke>&t}fg$0$<_;5W zN_5(k0N;nu<{1-ho`4qR&_bgXw0Vs4GR{xl@)mUn4RuzbbncdD@U`gN3TUM9knU@W zEtdKo{I*iz{~rc~uhF~k_!O7hn~&~@<9?J$@AgL43z1oQei_fl>H4<%)u2q6+fTOW z+x_TLv#uTWIJil!iC9K#+nZhf=WUx&D<(GXRp zYP_ib%EZSxLC#Hb*;30zO`G<;5y_(#q34{`jX0rk8giN@^)SqZS7^RPg3k`EkQcub zohJUc%s>1FnpN>`Av#mu&BE7_IWDQ;)%<6ksnL9xPWUj1KGB_`H%0%6-YlX$@_hI( zU27+4Iyu32mDZ1P@!46|1N%sD7(uN{hS%SARGrkX1-2;Ez6I;^ym!0PN)p`Ve^!)> z10(gETW=)|n99jjQ};$q+!p2Bb#HT>d+tDw6?LZeiJG@>9Bo|Gyt$0L`i_tnkZ&)j#Q)oE65F)*eMK&3 z>NxTMjNkniEB>3|fj9BDxc|M1SzXyK>ZCwwEBVFXH{D(HS@us0K4jxcE zpEKU(XJigio*TgZGsK&3G6#OTH~A)htCAQa`Rz_z+uj(fPT0@{ctG)YCJ&_&Q(l8S z%+lncd+jRScew6bYzFsad?YpzowJ>Bjph3Yy&Rp3B`=f=pN~S{1naoE`X<5i(q5g$ z^WAlxPxL)VTWhF2tft2B2=`;CKO93HqL(_uZ0ZmDP=`2$Izy>1bWwkpPaUF{nnS5I z{QE`9koTX6U9t0tm=!PD$YsnM>&xeSD(4l{EoRRf<6AS^xgv{Ws!iPvFKo}CF7YMm z67BpZQ=9k_wTW(idr?Ptg!;sc9?8@DQ=e$(Je7JvH?@T+TAgD16*2tAO5Nh@_FBE- zy?VXkTIv<^xhC(tPt9T$|0h$=DDS>c?P4-@ic*^>eFSSB<5oyN>g+hJoj%-?T1FS` z=W|Wk$>P^&L*6&)8>J2Kh$Vhm-;NqbX{QTyj;DGJ_sILwmO8QBiYd03SgCm&!o91p zV@Zxq+W4uwv-L-+Ru4Id>t>z4n{%^HzoB!3PCtWwz%H}yx^-g~(d81i!~@IM_+O%z zjQN7i?ZgL8@CD{JU(a`#HVzN}!Gu%kNJ3YZv_W68yGn zUQRu~RqFh*@CYQP*1|&|P z4y!NZ`#ZQF%qJykdiQmyyKRq82d_CVsr=9hzg5ArRrn=U`18g!$+=Vj|E$uW4^k;U z2sqE@dsdPw6dHNw-mb+FtC=5zcI1v4`xIw%pF-NLB7P`=4pTUM&?c68ia2l=`;09u z{*s!&d#SB(_H%DoOJ97qjBnlG^H3Wm^~t_CO8Ui-nZQwvcNf0@aNZDlWx?bTWisor}|3j z&_&iUbf7SC^TP=C5?Rs8o>jmbu#0L@^ z@f7gN8WJ_ z=OK8`_^x4ay@-wVBZ}`;cn^c?`65Xv|GZf7N==E>whRM?0bRU^xg=T6&QO2sj!ty3|wT`cQ9WX*@H_eh&9tf6;_qx{Tx30m6I+rkYdV;d692{l44(viNz7HE>x>+HH1oO#(C0F5c{e^N@d^69yYD&Ln?zhI?X`uk z8)}%keYb|G#_k=64{FfdfWfFclD>&y2Gx;=_%xEWDSTD z-snSJR{?drE_AWfwJ=Yen@)J(K zSG=qt7_g|@y5#qlUkShQy`2SXnUgN2-f9i7O_AFFoM7#LDmD4iudIVmRcimkto5f( zd(V8EtUn3X`#a-eiu*h}x^4`4f-z@D?#-t@D^_6+kNT{8ZLI%A|M^%q!WxI|)OS!@ zVe?I-_G2QoAAPtlYjpFe`RKzpV&wc@{`1|r(m9F2bK1qPKqpf3k<5SRk4MUPV@gxW z=cZo7{0Qf+IK`hFAH0{`f#>7!HGb^yJ6TVdG9a^9&2wn$3bUx~dJ8)t^{@uq#mogs zUN5l4hTgX^UUtUK!T3cnj;tG;h3=ng^JG8uqrE8?#T3T^OV(3&u^uY8POyL)p?!>- z%DiF``g_U)WA}E9cNP!ge-U;zvAp0|WgI$N><#|Q8|cl)(T#7QJKsbK_j9NwG`FTwxu;{RyA4?4UA9iE2{FF}Xzb5smZ znXRr+ZQZsj_&>^1`|8)zHLTC;&icIWTHIh>Z`W|Y2lsn?=e`lIitg@;e5c&qc||Am zwZwNrkXzBQJ-PN`g0fWZIpaslx*31EEul7>I5Q@ixh8CvGg;Xqwmt>gO8l9^f1#aP zUa({nYqYTExzgsGhtLl?P3Mrm?|>ZkEo4m@JhhT>*vxZdpOUrGd`F^C@n>rQbnc2g*+1yNW(PJ8Lv!J&G~Ep0OVZ4~#%&Gsvw4bMj+~iSMttE$ z4y`eU#1}>!A+g1O+IrVd&<(QQRq{Y-=mhnrG4--`J92@KJ4j-1<)`Y34=*OK$A6Xo z4Kmm9J2KZ19yH5b)f%@~Vl;Dnrp;B_dg^N)g7m!_Uo}tUlUn;c;yaPe(h_ zm{k1Kv$KB0Sl-E)-oe=3?mhF<+mNZYnjNgXTk)LWxE@}hR-^NPTLa6Q%Ijw_*IdOK z^}h3dwAXb}TyYM1qR+g$_|7zILFgwN8Bcw1?A|Q&F5fmjTJ4y>9DOq0t6&SLGfi*= z{r^7bi!0F=pYq;j>RPTuSCCUI?T4=DOS{kN?QEl+e&`CZJ&TZ0qpfYoy7&ST^BUJu zxt7gZ9bniazI-;vNNP*+u@|Wq#TNGk)~VQwmDt^6^hXMPh>dX4M-s5gx6YH~yXW}q zsx2V~eF!~Omo=6*<_G!#|0denOl?I9@Mi*lU*PXUU-#0s=sE>m*BAInw)pdaKlpv~ zB%gds?n-po*V=d4?Y=|wQ{{+qoupP-=BR`&8Eur@MI)j-{gGwQ`2NM;f73nFGgKidBGZ?>2i*6R3 zD)qdd(~j`*qUFJUZ9Kc^ohZ)?Q@>v^|GZz?Lyt>)@=T^hkLRFUzm#^KH??Dr_B>~5 zXFaxSx3sg$)Q&~h$MapWe@Ht%Q#&J~JujKs`PtDxnbf-HU=QlyvDky1^tDymdBxPu zlhK|JOznLCNT6)3w1ds4Z^vpg=<8K!=XFy%??ii!nA-V@@5(HfcCag2JB(eX#jfPg z*Y9~|A3k$$WbOCFcoOG{jgfUgb&hzKpYf#b#J7(afVqFyXO6Be8E5L%vG<*|ai2#U ze3we&e`)I}Q#*TkZz1>3 zwODxW1U2N$!}$+uw$`r6C$z~GAO46Id$;rLLiX3A3wGc1{o)RGa;!J?D}IqSMgJT{ z{@ROd0_RJ#EApw>gK{~Ya~YpQv?I6+|32#oj`JLRo*b=@9=h9y zyCgjJsS;Z&IV9mRv2ns@MPlpVu^Mo_0zL}xjM&=V@XiZ-Z}D>YNo?lRyptEF7GDlO z^|2}bxs0>a1pYVgogk;oyej%Wo^hA;Om^QyY|KICWhW!sl288}et7$)-o<};#_c~y zUVIpuj)+~5zC-W{df)cXg$i|+@H2Ne8o`mLhRy^7+08h+c`JIZs9J!o?# zxeJF29M1rUif$HPb^+rp{mOWc)cWk`J0REL599jX+Vvj3e{p@P>H1vldUxO7xSnmg z{(^SBn{N--U8d_>wCi1cJGdTWx_&^r-o^JG*QHih_7vFH>0#J2JUG#ohzo#aVk7^pv@X z`{6&IBO~p#Yux4KTliiNIk?l*8L&y zbbKApe*?c9cf`2fIu%$ZYY#prmaC=qtB$$))8Jd<=;%7`=#DIR@=bD3L&f~jtM(o{ zSf5l61M5u3Qy<~G99RT~;F^an^xw(Hx|}o(2Kqn2y%*@G7PzD>H$KZ&dx9?|N-2{V zwFa1yUU1BG!vCqvMW$x)t$pzA3E#{8uNmX->3!IdA*s_kz&q0J6~rzq$Q>fjIa%)` zW0B6cG_sIC$umgI8q`$@S`L}`yO=ii$opM*SMueu?x>3Q-HcBr@CBf$)HX<))y&5j z*X0?Bx8?pyj*u)rFJs$|5W7qJi=jzhbc^&+K_6doF7|Ad?q`ZUlRBZX_=XzJ zz&I9tonvmFxLsg5kl^-i2A;9_ee$g6+Og>MX{Kj)5x<|1XI7b>QKCK1nx1(PeI|B# zY>DX^?7}+JGmG$TMYoN`7pw29Fxs=z^vra8Oz{oJ;%C)AgIzgfdS)E)yXZFYjl{PS zemlV028rK`i7!RJH5a!x%;Pr1?G<|5Zi&lx|KA*!my_R+xLoGrBrdPIry(wX@VjBzr)nZ)%SpY?z8I&myntk~vp?&2!Qp$$UBb%{!CzAGB$nYg-Lp<1-VFo;#Mm z3BHhc>AKv|>k^D{m+_WwkjWgBe1A;7IhM`$#!@)$B_BG5?~bK%to=XSy?K0;)z!fN z%uGTi2|HQXGzn43!lJUPG#g3)m5Q;heF>m7Auh$Wh?Zbz4F)w2QmJ4IioRw>saT;p z)xLss35^TXy4#n8V4X=oK(-8!&hLAkW%A^i8PK|T|?sJ}V&%O7YbI(2Z z+;j1TI*75U#&5Y7zveK07ZE4(BL2-r_NM0YJBwJETf}$69#z)RXD)vc-%SqtR1za2 z@jnvRBk?v8ha<5z4*7?c_a*oQm+XHDt}dTy(wY(A$_LkSOMbLU zelIa_nRrYb^8K(q%I)IA{T*_t3>|j4@RRHV4+Kv-_}B~a9%ipMl|a0=MkVy%EMot_K63n|E@?XB|3Mehb`hEpSXu zx_^py-?m<}9ls9aOYx@tAKPJ#Avf=YletskF!9MOXiiu=SbiU#`9IiB`kYsKvVgpa zF0btEwPM?deRF9a{Ty#CIYcVx+Z<%Ff;eoM&koOgOWSik)aT3&6UP|hY$_hSbrNzo zR_rH%JCsn|A$2*3A(nUehP3*v?`|Xx)a9<8<)Cj*w>CBP?jzF`-fJ{0z zRXYPD{vP3#*$O$s7^gSotQUV9ovC8;?F_h5n>aW8u;lP^4`DtBPwv90;-BB?W}hdg zvSMNmxyLg46&~Epd?sjtw7GJ;1?8Re=G|<*!za~W zSvKXqiumctZtg-OzNKn6@%XjO`5Ej_RP+1V%NYv;m*1u2$zH`W{?C2DaV7R|mE?t^ zk7fvd!7Vtd>BCCs$t8E*SjyHq-P*kjfy;C1CJ;Pg{pqOk;2%_aIqK+p*+chnX737-4QyH8 zWxQJ_HX62lAVJ10vE8AasW|n1j_|4YNg6nB(@6e5jThN(b+g}^$ewCaFJ&iZ6BgvK zZy9h@uaLMW&Xp6#o5pz*_rsFMWNskDyqMv&)?LUMlEt%RE*;=qE%7thYjU@MJ5J7F z$hc+X$(-$mc7gfZ+rvJHLhe&X&5|FeCbriKzJr2F9mvJ|K#u7qb2*DX8B(X{*!m^e}=|Z z3@Z)_AC|HvlDrw^Ur0M8&t-}?8=f4_w++rh&(kUFFWfZi{bA^Uo>P>S1HKMz{uS^1 z^oiC#&7rc_jC$nTzl~E?4&?vtbcvtNowtvDd%r7P^Vv-TM(qBgROZ0f0Sg|-( z9Vhxt&Nlj459YA85Itw=IHB_b=$!3xY562;(4%_$WlU08N559F$y}uI7yK=}UBte9w6(t(TOhP>E?uL=)R97qAOD`jcddp-k;$en;L!m} z&`m5iyuYcIn80VnZ-kCcaKtr{*J#t5Jqiz2fXmeT4U!)dy?-`(-_06%H|sRL4@DQH zXlrzBee6CnZLyEB=^zYL`>$F;$xxg3-tQiygy}b zmy(2V?knjJ(XqsSY3C<|*CdWk&l~Ir$7@L&#`8|>tlOo(GwAQk!l1;1`H9IFJj8v} z=N_3FoX))0_@i0xcWhH8jaXRGfzNDG@?$lee@m{TjGT?i0+|mvubt-~uK3){UHV0WvcCSWV*@k%MQQ~cf;-o41j$$6ZeA*V~orA&G* zppDgL!If0CR6BDg-&%1+!(+v<5*(aK+az&t5)UKaoAtJ$J0!=egIpBV$XEAwgS}TD z46Twrses4L81QoVd?aIh1f0KTpSK{P@L&!x6ecg`Bv2U=$#+m)2 zpWjIH{#s?t1P*IIv=HOKS?Rn`Nx~`5g==FcGD*MpV9d?2%Z3lgSE2^s53@cwYt0&7 z%cT`qPyQ3+^{@LRx31!qamYlE@6zPOp&sWv%IDbD(*L=Kg_c3yC&)#xp7)p5i0>d# z^YvW%5b^Zrkr_3_T+r`k%tf`tTs$N;fgW=q`PZetWbP0fNBT=-cSfBaM<70$86`%1 z#g%$|#YlWK+Fl_4cZ;u}o@ntE>um8AwSxa~9lry;KBLBntuXN$KAN8j+!{+f!!{$n z!hnb;8 zkbR9AuUlZY8SxbcjKo*SJdUreKI_L*gSu}(`~rhzeDj7eZqsR&$+Pa zb)SOR3X&^kP~J!QC1j0}iS5og@A`qv$rb4Tr{Mw7|AUwZ3S8;!yB~aVbyJbDQsjFx z?_`~kL(W*?yQA2Gwdey=<|iQYp+}Vk73BETIvSI@Y|HIQif=i*(%4(_ zMe6bXjgocM>6w_h8MfuK#{cFB4w;}K;+ zBYIA+Gu7MtOXf>UedXAPU(V#Nbcy35w{8yoVzphDA-_F|3l!T`?8aR34CkXOdlEy) zI%`2`7CD~Dle_$f$`z91nQ!|qJA3WmbVp*c^yxCrN{^Iv96l}gKc)tg_?sB7&mr`0 zaQbsogBh8w59Z`cTyDXKBF}LTI0iUNM8D|$#96Ss74%aM>mr-Y>AbSUa%iRrL)0ua&_YL6bMoxmp>#(Xm3g;;*wNvW8*YHW&MH z7cPA5+MP+92TiP62c5znf9*f8PIzS64~v5L(vB4D8958${rP{r-`HQDTRcD4=N9pi z$=s5IuJYU?@fV48TYz^XLk@i0DbU&MB37M;7{QhM!T0uqy8bM}c6|HiddynWg9>+m z==yU5ctn3T{k%(mR->z!XV}9|@qUdRYUPdUy`i#&;B_uq*deyF==*b!DT()TF-DEF z$FVg@8z z?pmLV9^OI@i!H<~tN<_u7QnCk=wwxX2haL1^u;f0TLdBmTP!+7MxU5@?l zY|n(x+m=qBVR1BuQ_qOHTaf)g~|%}eezNA8QRXITkDg3 zSy?NY^@@Efb+5<1@ymLGb-(yTn&DOXo5{Mn!pPAmf2ICA-*tFKz~>dbv-%r0-O}CP zkk6UZ?*0bO8e04fzp(fl!u4wHre69YTD{_Xs=(h++e6wtpgoIx#cTQPwwKc%QZ8$t zuc|_;^fkHUk|@ScSH!uDRPBuUM*2(6VILyTf_WC(%mX3))=FRPMwYSTw6&)79F2@6 zc6HGE2!0*n;R1$V;%RtZ>XDoc=9%rg$g2=?IdLy>MueTuqhuSZWO)OD+0onc-u}rC($DSl*%U8?EgC z*6KHfem$NKo%BOu02TKB55w#FdQ5BqtIs3F`-{i){E%k9tYc0)pMDUZjO5qy3y)se%(SPgM17F zyq{haS|#sfU94a;6`@PS{uh{pBF^eqe8kO+yVzgvoZ_3~g-S}~oj$rO`=)Rnn4|S# zqqI<;@PyQLw=<>gl}kgb-uyaaVi(_M}G<33_e;*YY!z{%g<14nwA@wkn+cNNgwXV%dz5iE9?)7vDykC7!E|7=S~_&@J%} z{hOPV1r5+w^$-1<8uXaxwp)JI^}bf#|Ine~tf9T)BQ1=7_|rF?X02hF%gIkqyL{xVW?r3^ z0DVFmISZ47H*=A@Jf-Y7^Y?%#xO!>x!Ct-R!o)14Ujh6x&7t@<{7ETm%PtHGy`|_k-l>#pHj4C<@R{?f+UxxD3Vng+ zwYwwo{NOX`F6Yh$WVFAvZgLuzN#5rCdO7E}eQ>_A?hfQ>zT}I$jy4M3Y0xTtocjyp zW@oPIwe<7W-tNn#uib-`{%+cnL3?T$lRDt57K~7r0Xu(T#ZPh*lj<^olQw>pQ2jS~ zKZ?C!X=~NB=c>`#JMu?*du?qE{*1mVNK_U$ z+=SHS?o_U*N*ooHo|yeTCwlz{zI6VUOS0=XKKbO==e3PiPtf0QI+Z)Ttk3cWJUp0j zAnrXQFZrt4PiMkMp+n9|w>+?8&2oIUk8J3(X5{=^Cywe=`Yk?NIhczr-%gz4BQr)# z^dkrP4oANbxZw90mEgeH<#A!(1zR6dkwIiB1%~E4saqyHw=f4w;MOOq%fLZuqm-nbwp+Zu4?yTAg#r zFIPJ!b59()yuy{9lo78i*zO!yw}iG`3eHD(#w}yt204>&Mb3_L4{(Fi-K6n5_SFRH zZ#eqX6`91v{FQIV;~#G6vt@DsACttCkc+5J?%0sC>rVqS9Uj%n`$M!pAa)$ow&#IZEVidFr{? z4H@3qa~uom9CfQj?s_6~w^rUVafG9HvdA1dcjqHjBPNz&H!X4XteZ&R{e=5!?!M>$pM16D_UL>?gjGzwT0noIvpF}C;WhQT@L(}~SqTs3 zI+KzLL^k2itikZ=VD{0Gqi48}OLRSZ_Du!ss}Cb?Bc7B7#lx)ATrAypeFlE zGW0VOkBZ!9=;QOCG4=s;n8=IV13sI&oM&(@g}R!N^)_T(bij6EI3)gUI(sm3kN9Qa z5nLksTaoW7L-x@D-DF?%!llUei}2gE$o{2<>|?8iWq*YE9@!tk`{jo0o3L*q``21z zUxP>X1$G3of2~FKm&_EsCF{gQO)t*-C{(ru`Tb99%hoHpw`Ci5BW9!HPNj<;xi^|F zs`lN_{Z690+Y`tqa75SXqQ6C7S23oN&qv16fvvpU7)!>q+gP?RmX|SB8yL%ljOAs< zSUzGI%aP`L#&RU@A2G(#gnfsxTxc0f4W6+S*pZCoLSrm5Sl_NgZ<+jW>T&d&xJ4td>i=Gtz62FgWGl{O`&WRafHwmvz;|_)YFWO5A_p3&; zmkKAwp4VPCd;#tlet^r$BKbsYBCG8#cDsz5@JLc(;fl8_=TFRc^h#zw@}R^HA=5jT zSLIL4L9S(8c>?>UgWODi#or{pKv`D~;f#^HNA)ahAY*MQe50=|?R+C+Eb9={_L-Y$ zv3*LlJA=CMjcHTMeL=cSy=L{u82qx8^_yu|-_5%1o7>ex{$uRwEyJVpi9&fSyZSu% zMCRVT$cM~3P2}hnJ7PJqvClH+Y%t~==AMW-=W^zh-yrKxqEjz7<{Z}bVf$f}`JOpv z6z?B3<{T6DUFMu8Epv_r&zvK$qnLA^w9Gk0_=UvRbd+_lw5j#Cp|VwSFQd$zw86vt z@mFV})&_mvO!Qt3{9`;af5;peYyK2F>Hn?y^GzqQHr(%{+d@ftTd;-x{2yuyIdaXm z=yqL6L#S*j<6_!*t;~h2=Tuq$$y!fA=gaub?ON-F-xX0N{UYz@{XA4QkvZ=_(LQ{7 zSoc0z|7ob~Q^xn-?US1NP}wq@U2w&T?smg%w*SNak$Xer{vZx$NcCii4LZi}objPm ztq&D%Xd%u=_9i)x>6JBVs$-z$uV^@5_kmS2X5s@oh7T-<_^?XWsR8cqkoC}m#JQMr z{DZ7d4kByTIo{OOR{zIa6C>BDOQ^%t)7FHwjRP6qbw}`c2jg}&PY1)*gJ-Ot2 zp|Z}PoL826dsEs=*cTE%y*cmyC;3V;gQ2K%e>rip(za;6k~e6psSgv;>Hi7(FoV6b zIpp24>cg6^^cWbihsDok+Qc6)wpJVYr`!`^{eM6Ihvx_Mj@aC?UWul6tnWnc$lN0R znq3_#;|>Y%7`hoevW5}89Sx85omGF8ctT}Yd}cz*5b z#q)C}`iZY_vv+955*I{DJ&EZ$|r;>BT~%>J<7C32?) zdzdr(vS-P@Wl5ac`ncQk#hOIVmscyn<^*NK?mmtUd(xd7{+Z$0An_oT#6`4aJJr2$ z)hk{oVLzNZLIT8J^gOWZ>*-IkKjPr5eXFwbg%>XJcwU+8!QZ@LeT`%1)2_)IHg)Ln zA?95mdVI(go|7}b&lzZ)ro9)FW7!tB6H35aOl-lKw41h$jp-VkMj3N0;^NsL0ZlRL`5>#6x+ zhsc=3YEh>uF)QXi@JVAI_>V$gpEEkFcU@@omBj6AsdIZ;A5WzJl009|j^2M&)YBV! z-xYd$RC7K|QTNiHiS*xw5=DKX<_u!to$B%y_S=T{@@%Ia`^NP2w74$W;AhThr;Sy# zv4FNUXER+yJiT zIQ=~5-E)Ye0S>$A3isl~S`To7N5hv5K7M~H=Un9XJaVV;yMnVHE&MNIFZNbNk`nxk z^1Frsqm`$4ckq9qt7AgNutFc_rm7m)3lJWWeFNc5?s|t~7X(KD|D5#E68Z=lI7h9=QkXGd%`T0`yuZtLj4AS{{)6Az`BQ!$!gkuu|5jXE zi0}A<_#W%IREf`_-N?Jc`_WwUjPC&NOkx1K?p#laymOs9x}`77bE<-4_*^~yEq0&K z#}dOqpO}3ueNH|_@Ejz8$;o43yktqQE?EKuQ-v!>tq{2e)2 zNo!;cR^cRD5S4*4~^W1_8%67YI2VWzxdE2H>ien#8AKL+(O(gz`eScTKwS(mxOs}cPTAcE7 zb3cXgRlFY%7b&q%)tbCYez*bNrRXJK&g1T@vPtk3W9*ZDa}vXsNzP*Ub)(`Qqv^|w zbe^qu8$6rSM;UvstUJhkaU*b<4a;UN{rG%!%Y5%0$V{GomY4fj=ohJXOE1 zY-5fKHnRsU_KVOl*QA3!Pq)$WFV0t4>5%goZaE8K(82kIai!!8a1+lSa1Ib375l;i z+Os(iamDJq z4~S9XTj>{xTlBl&b;sbkR7d*5KIh;jHbXa=IlZ$ieemJ=-N)h&Mjy!7$as80UJGj< zG;jx6qjOM`gIq){9idI@=!aegzB#Oq-m>BI*zo?=_vBz=;T?_rA z@gj9RVWal}8xK5bqgV821+v@S=DfsnC|_cdb-A5M{Y$|s`OA(#gEjZPY4mubB8 zr|FYQxhF&Wy>bFdjGt*8z4}K7N6Zw+()M{)A;q4bJ?JBpm zYpShXVw=5xN|Hh)kYT9rhd<~(8Eo$w(2SRr%~!`h7PA@?H4JYv3o zi~XSTQ@r2J`)i`S*LY+@vCq%^zP-P)G1oWMhpbegv&zI?mbqNw>&p2nbxfT?3=aR# z;eWwd!vC@6Gm|fy(d#moy*66sv0A=!JE~VtL+?o*i<}=ig4?N2)05|(uReOXJ0F!M zcJYxpzbVA{K1^S^MVIcoBq+G@GOK6h&52)#txr6uvPsTDX*gFh2fzoHPtav_9lVWQ zhK|R+Cho-LlDuyzdCNHqFLQ4uW%7S5|HH2vv!;y24rjj;c}S+L+(}LdDSx)BJf3pJ zERP!(h7XP9TsnQ*NZ(d6hSQ-l(U;(xw!!I>JXKPj!T)miru>$;R+sU68oveBlnt}( zMd-w6bNl_+2r?G-N2B$|aq2MifSggtV|_06M@N*q`YPTapE^1GB$tZZ(>L&Ghx#*o zgG^o>{U3>MlpheDgGIU}{i8K*jl_k3y$YZcCg)%Is!kQ`z2IRjP6 z*|2rJlj^FA$@P7ef^F0NZXVN4k~(Klr{F)vdPZy-_x*~;eLwNg_d9u9JaIhnJUw_4 zc-%aRJV`vsJUw}O@$}~D!_$|iA5VXt0X!)@sXS>s=^jmvw#8$E^n+Q6}d`+lI$^cHgbpB4aZ~ zu)cLTba{Jrg8EzX0?62yzL~;fO4%z#p;eqE*fgIyWPcf-aGvbBPJdvWb_PvirbqFN z<~f(=Jf3yaDe@a|~w>`W9EtnW(s(bqeP&q<`fc z?|$HA{kWIEU-S1X{vP13#Dagt-*)~=uC)XFmH6$=@q z0Y3MW;9JNM-aRH;X^k@?^KhYM6Eev+&okJr9EhVDG(8k%>~H6%~&H$2W6Ue3@G z8=sa}BlUteK;81apSfXtRwFqHlzPGKo-|I)^0m zS^ysLd(2@yC3I_TuD?Jn_UrUl8tpr()hqQ29YT-LMUD{G9cH_w5AOblwqD#QeZGuG zeDsOozKj3k`qzx7tinp*1J7zFC=&*ZrfXv)-eel-Lh917p8%4ihq2eM(eYD zEBQKQeW2%_%<$e~w7(YJEB!BXRvm3*y}E;T)~7DaxbjZgy7B^aGGiw;jpVi}HtGH=^Tw9-P(xu;Ba{ zoWRDy`6)QfeviYZ$Qn9XEmkJ0OV^#NePidlcdayBIyq`z{apJ0Qwt3h-~=`n4L&Q* z`=jGrApQTb1?Mlp32ZE!&suRdM#p)B^#6wzoO{3tY%H9QS#f55FKXMTO8@`eg7c#9 zsl~v?!g-&LvtHx9vgkO^2j{xKX*lbVSMn=>6WCZdZ!&O-&rew%iSrfs`3!Jw*0MGgb#+(Wvqze40~8G2oG&wS4B$XR`KN`2P-i5i~4@{LVLi;nOs$^mqA zy;VnBbi_uxj&_5`)LFu-S+Dih@H-9swZD(hfg2A~M=Tw9IJpZS^oq`>Ptk#mdi(eC z-q1Pq%7_AOoJHsCE^)F?q3dsHyXc&zJGDGA+NktYBIvrMJFbmc zLlw0cI1N`6y4ISwz8{I}T05?#v=O*ixR#r^R)UK;#X63c2rhg8qN|l>dB48<`|sK7 z0;U)|=GZ(Bygo-cwE2F4S=V0X@%2u1cWc+!l2G?`IOPzbb`6b_NqwoLBbCp?NJ@{<(^|99Xd1QSF(dzs0Ab8KAJ_mfX`X-t6 zjde!w%b`1T`7-Ixn&MQ8fz$db3cs9V;%WdFJYePHW;?Fezy+L!3t0PHH^9X8Hn?nK z{Ei*h=imZP!v(C}#!yUL2O@F(z6+Pg{YFi{0~ZU|z9iiaP=>l9=z89cOY}N$v2cBA z;<_{v*UBzj8t-J?;9?$+jq5KauAjJ6JKrp^3ZFc>qT$@XZEL^ zuCK^fQjU&LK90oIV8`VL7jUs~eP-fHNr>pLU)XV-#XJpMEL@Eyu1h0vJ!r>u1Gs>T zg^T?jy^YJkWt08e?YN!-7jPP`D6;0y#YMY z?A0F__)FuJafT11-td8JG<+cSb|1*J?}(-e-D%QwovgE88=&>wXsHML-s;B@KC7kA zlxD_(xz_5xkTr2n#-jZ8F54nYY`LLseJp@4vFUXD>Dsuu#a}133VPp;^O)oVzLowF zpOGQ2Vz0>fn)3vE8(Pjl(-)F!#Va;R6ud2hx5|bWyW~`O2Uzh=bQ7ZzNpF+jt+ez( znOiNsOYeg{z^}LU!N2f_Nxy1+&=4Kx@4&h4W(&?gg7f3pI0FVwMM>0X-0kYJx7Q1O z-Sl1}x=LT4MWgRS1D7`LZ-eg@N5mLC3eNI*7W(|)RLJEeV+6c?+_zeB-jc-lMB-d1 zbk4EhtW8pjpEYm-Z^!u}IE6?2ncrNN?lHcx`>ek0dh|x{Smj$=6P5K*i>q~?)zLeZ z+!Ixf+~7QCuJ|yok$R1_bKC7*GFBjdOV*Kc*IrwyGIlV!=zgBNc<$kmSh?#8CaOo^ zp>$;Csj128ll)%k?4^Eqc`vo1hcdpG?vJgPzL$0JWV4Obb0=}?nr{^Ri;4Sui!+~% z>}iPZnZ{WvL-*7ROez1Hx<|vsdYHt}5&G~#ef_>ezBkwJ6Y;SwPEZ!Kv)3Wtwvh92 zH@OZ6^Zg>q#kX1nyoMLQaf;C&R$cS-JnWkq)(5Vk-eVudduG88TMCtNEn^CU>*O7> z)H=_hitkkH#8+z4JJtb{S4Cq^=2q=X;jAwnf(U zGok0gD0Kllk1@#_+bg0jSGT&-v~~>zPc?W>)vo!#luy*_YCIT{wOjXgRY%rE?2|TD zr3cs-fY(~^e`x*9+=lN$^VNut$JF>Ru za|QH?o~_sPSVJ$h<;qB%bHnZES;k%ZSCipC_F`S!OV?w-+jY)k2F;~s>N@9CGGy*y zT>Blhb87c?))ymBS)z0Pn61%k*EttTy{68&I)={at&C;ApkDS1t^%LvoO^jrATxK~ zF2r46R8(a)ZA*EKGqEfvR3FO%;HjW%EDsoI?{`HwZ3)Z2}W zgxf85R@!BS(vq4)uN>sPVIUdCSgmF&6CA`a{d;=tmySjp4b2c~Wt z_8qmmuch76PEEJT+T?JSI+QaRqQ{l;p6Z|5L#rIQ%#HjnZB~{it65W$RoCIrsuMZ3 za;2Brd5D+{d-|_0b z^3C>g_M%>;{3Uxi`$;cT?y;A%C-eg4>y2`)-=!Z|6Fx`zuk7Wl>()}f%3jX8>`BU1 zdpYZ>$0@I~m$No{gz}%+%UP2=MEPQSIqQlglrOZGGxsl|{7%a4=V^BLaeMZpCwl&w zk>rVXo~8o1kr+oe@wKwYCFcs-SzCEaxPx*lx$okuS9o8#NOIO|XGG*|h@7wSK0nzb z`E-@|!VT`Fg&sL$vo2xs2JX@Jo(nC4w|mTE_j5M=CvlGD_TM9y4&>8`oVt+LIAUSq zIR`|xiISw9hp6*MqZHqbX|CKkX^PLxn{LF%Fo(N{PjKcX%3AxbKI)vgE_E(3Lim%k z_ydWJseD)o4!ITEcdfQpQLn8@HvwDR9kv3ve!#8!wFS-(+^|Tv1i@#8JFBlc#~BIN zNgm#Hzp~)_F>t$`-Ds404*{2Hg?kaWw<6&Jg6}B{zGmQ_iG69n8K>UJxW+mmBZ8<8TVN54JLlw*HNFKWRP^9LE+2gBX1#H@#qbs6uR-rt-@ZZK&1Hav2a^_rbW zrh~`ABRj%8!hWI9S1k2J;gRi(c@g{3SqfwFmLp~#TyM}>%9v~L=K{~!lA0JiHPe7^ zWXv`AGTczPN9-N%59(q|i|(H7I!V3n(Aeb&icI+i{= zpxJxW!~N?kq4_{oLw~LB#4lj#!uuHKicy8W`?%Z8f4(wa>dV}INvHh9FS26>-<5w) z>CAlRl1}lFR9%~@^6r4dc2!)$Ih5Uwm+wZm=Kd^Ity;l-u)M3bNl9lrVV9X@ZdL-p)-sJA?m=&8y}_uQSA=(**Y{+^28rhCdCRyxant#r=+ zL8_->7I(bYV;bs#>wa z;cI-*saDFje+JLH(D%$x&vxR1^Z35fJJeIk_c?rD@lVe6@cq5!_o-=W*Pqc<|8ZhPjee-QEKRL=#--Q$m3-&$RUM^{VSPOTv+lgU;kxg8 zurTPvcXc1%NFOUZwf;UJ?a6&0O}!8LE525I<@eD3pXz%0C(yYNc$tf{n2Yb@e0@=J zVQxusp>GOuDl#z;J}Si@Q8`OL`*56levgw+Qg~1E^8@oQV%J5cJ;jPz* zf8piIJSDh&82xt+vApP;%3<{NJlaJpZ!UG*f18q9$+@~xWWbm+cI3hn;%_nLj2&*< zoN*1jXP+}9R?)905xl+2m@~*jb6_C8KWS47zBHLLE&-1jt0?jp)(2D0(cr!(^&n@~ z`C?&zE#COR^^DU=a2|*6pFqAhd(+fKQ_taiCw@ihNn|WUKBPX8nS5}`JIQ}LyN_~k z9eGs!)alL|r4C$pU&qD#URUGV>F6tGn^KXbA=niIz0cL2rTzq&xCg%9aez4)n7I$K zAIx1nro7#I2WLspUBvo zd@lM?c-`cGv6D3Z!FRlddztJ!Z`euQcz%*$CuzQ8jn{|5>w~;?d~ea?J2bqk3B|9x zRB*%R!rQ{5R(_v@?h`wA9(qE)>1)neflzo3(r_6*Ntu6g=&M@hpxeG7&EoGC+pg}P zatD>jr_emm`(GBCN8*E#_TR@{Jon#LnCr(UD*wCLb3EN#cGUju*XG46YWt_sfm-N5tN1ZBMR*|*WPo%n6! zn}64iXa|p#|H8U<9s8y9VXM?*)3xF^sD+ngzjO{d&$`}{wak`E^wlKJyzMUZRRUM} zJ?0qJaqh`H3+`XRxes*`=O7FCtN9@QK7h4YoH`d^w6a)b9pC!2dE_dcowe{zo!ECWa3XdQ=#v#GRC4M{#DXJWF8b7wLi z3qHY>LoS92z7yFHoO6*Yktq{r1^5cVw`GKOhR^3u=B`R)q7*x2O|mjUNaP(*TeVbULU-*;~8kua5EP_ zBe-ok(0ng@z-PgKGNyW4Q}d$9(j$iM_p?VLxWrD^@JE@SfBtRTqTLbM)s}lKZCPiT zpTCJ5-yEgxX!gwM=49@JvGUUnbA39Ka`a0O+e+oGkMNl^_BYGkmH9A6)Y&{hqzm*d+zi}IoNa(sDbQl4fn$9Ff7^1k+Re0BXPPon(&F&gb% zv@(|_^UkGzvqNix`2Cq1<07!e)Gyj9zVX^OnkYChA&t0`RM-KMDH8>PVdkCHElYTKUe(Ahqb#dU3vI* zy~K5l6}dU;O2BW?gY$0jUb$1nJmW>;mmTQ0`k|chnuLB?bfKKPm3%UTutSG<$)C3B zsjj$-9a($C?vwLhgT0&;T_yKCIXNRPW!%ZdzE_HOI{Y_~yav+0S)ZHm1HI(zvBB$i z^?eU`U=L&|=V`dt0*CBw)YsSpyleP>1bVJQ_p5HiKd~_==QUX`OlQ3;YY36`V)MKo zv^>f;71(Z-w7re?;akw+;^b`Kbk6ftah`7?>*jkL53gpwSv%8r3>?Qa->S4<^8clI z?*|WeN|?S?Vgk(e4D#O1yJ+p_O!7*xbt+f~oJ!V0HJnqO-5)s|fIOz)ljn?TN!R&Q zX=ec6`}8Ff)P|a&9d3uxcSdF9L^t>G=ws~U9w>)4&Wa=B!yhDZH&BwYfEeX6Y|I_Z zD?5tg`l$1&M{?d&NqNF}-(r7XI8r6QO-hZkFgO=~aIqi1))+-~WLD4mdM~nPe3wOD zLj!NUhWD(=>be1rokgsh{J_ltM_Qsz!>M5ZVZySHEc9-*Z$}*@b8hn8`wu( zO5YV*%O>h&ldWX}M<4#cS|)ImU15|7t^t zInloXH}usnwe-Kp-b2v;G~=Ex>lVI$5c!h&MD~A+-_x({Mcts~R^zOH*`4QyR!JMRc7aFQB{61FrnO7TjPWu1 z!gU(DFW2i0n)R-<)jNmWS)GfnLY~Tyt1CSxXU`(%oW2)rzfVqdrRc$}`2WT!l5bAY z?uxt{U*OY8g}LjwH}_fmdeXjBWW9m>UdxaX$%AqPU*HjZf%4AG(`4!_xo@tcBb08I zoA=Mz>zDjL`q^{Ei|mjGW`Or2LvQ7xEmcme#$I-Te0A8GVx<4YV_dG%)*Zq3;tRYzsrEf@0o?qDi-`@ zCVp&L4gXx1nuUKV7JQKj&sjlx|1LGg$}AUrS@@{tp^NRYdom`Y$Uobj|JtqG&vxs{ z+nv}!F6^N=_$D6y>482@@N{mIJ<`!T_e@ZHrQ~j6ZK~y96kfqr+97+R`4QDxVn2@w(7qI!l8|msEU`xO) z{A4``^EJL|vuCYZ?gVhI8==>&)w@11qTZ{3sUBjfx0!nXNWH+C<7Tb*@Ai7# z10(7U&^POHE%jzcws(T`OS)EXz1ChiJ6JYQ-6OD3+IyI?%|_j_7ofb+RqtW?+D~6M zNxkP->UBldJBWIfl&*2p`flu?h&H?iF6C?suGXFreb+|?{_9vnf}3gEV6S#Z@v;%Ww_xu*?Rc_gmIz-4AyaLtRv^$sxANj6-sMdG?e z;Lfz*s!5LMuis03ZX2$gArWo77P#i27F_cpaXk;rmUtVkb&r}bffih@NL;r`egATVb&STV9g(8=m{mHF_$H`8R9q8H`s_%=ThmtzwV0UisjX&K%}fcYLCH8UOp!9YM-BdOx|O zQ~r0WXQ;w^vW98+$0ePtd$sq|E6-5dUBqy2I71E0aFPRn=LGx2@AJIi@L`W`Y|D29 z+qNaB`?3>M|4V0h{53t)-0f$md*`{;ePbL!fBu;szx?IigTQC_y<6ke=4?mj-qDU= zdj{VQaRm1!JA(TX`2W=*p0+-YPX7s|)Bh#U`$}hE_#n@b(d^F~`_TUULE(Mq!%g~L z@?mfaEi!N99vPzg-{jjGX#5L%$?&(>jSlQa&R39QDaC8I7s*Ml*^3$6c_X$W_M_|@ z>*vFmmw!xdMgRJupww}d*x&okAU--r4bb)m?xWle|4ZLUzs6w`3mty`Kf?bFyn9&Q zKVB4Uzq2Ulr~mx77X{ns!#4V`4LaMZ$c0Fsx9_K&^if+By5(-+t=PHSuyg-t*iP3& zcO!PL(0vrT(`@OR+nnlki*nWW*V8=v@>4u*6Z&|*da0kM?e(6X!1WH_{#W41F%I8>wfs$X z_zuC>wA0JUx5Es@zn6#4o|(#yX*P=y5c)NPv@3RK{U)jSO;Wt0z+>6-5_^p}GvaU$oUMvodXC}G zm2p{Y#H3ZQe=0gee30(5;VpPv%Xw6IX4kwbeT(hZhV3T#p_LX#*F3!~at`wXllf<3 z4l{Ke@osy7jWq}4KwIGYqM*3C@mRe{5 zwuX9SjO=sW?&HvMWfWSTwa@~eYqZ>}({hM>&6yusXqgvDOA)jvKAo0YbBunH%RCyJ zmN%hA_%txHC@8v6WTg!m3Lrxg&l5{#vgyxL$&BQt6uf@c-{LnFot+E*@VO)UMfkK5 zp1s?k;op^&dEl|g%0}IfKfwD#1HZ^f#kY{*ncopzle^PZKMZ-$WcbC=nhcMVdg!B= zGMts!CBqkGcJZr4hO6L@=092Zqa>0)e84CTHeMAO1~!%qPk^?-j3_evnuQLV43|<* z;ae73vLk7E0+@B{ZM29C0~a7F&!<06 zCBuU_!y$Mpu$fOM!%Gbs{#_Zq13b}W_!a}d*m=2U3{%^fSK8srz3B9P@WfZ>)BTLc z0rXfvSh~X)tJPTSb4ZDGGYf!Pssamroj| z3JsMHB(0Ht*$d49=n-4|F!c!ic3b?vsjI=TX`|WVayCTfdOvk&>*Z^Pb;*L^lPlu> ziNomOYGl95FVNlhaTuT6qrk0u&?X1Oo9`)!z2@u*eVQE96$NFzH2r1#@G_qnZIk&d zR@-hrP1}Az+l20J?b{LAzC`FQ|COa}*^zC#8kjAMY;7Ypeh;v*^zaAVQKGf&A){^Q zWq;$gO*~E8GNQHZn(TbSwEQjxi5FMtv)PsrDsKq+l@kZouzH-Bir^{Y5&c(wsC%B53sS?cCOjB zdyTgFzIodoKTX^2i`KRSk!>3Z-OWoaZQIs;PL*+61WaMMt!*RDX6=#~vu(j+hP+*9 zv~AAW-+0_^I8ED1qP6XpXGe_N(b?#UpIX}%Ic`&ETluxNwsDSU&$ig(_LkYUMMm3# z-P?AmIpFNmw5=y?^G_%g|5N?dJmM!1KbH7ruHhl}u6`8zv$>qtIQH{Ybvb7;HgHB_ zb6T1@aAul1X>po*q7U)Ela=vXINOm2tgIQ2vUk&bBCfN!Pkd){HqQi}89cM&I}14% zB5*PX`)l%F4lw3>^W%g23gUyF_0FB`?Mc42Me)J*6N$dRKBK5yXyJRi*rB%1NbuN@+a&BuCc1Q=$Rf`sCeYuDDTRBT7 zd*}Fc>&4cRHR1>OwFH(N1!}=?3v(j~RQ6D@f0}vUT9c(pOp_URd}LL8a2UEObDU$G)PKr)*;7A;{Ea$Aov%@6 z;TXNnuKI>{tFH(3tvl}<)W@FuG331eDe8NQ`nHU=*O$_*zBXVoM}33(*wa6TJ~`SN zyHBg7eIxYxc60BD*+y%Bz6;E{yl+q+F#*TWTc4hyz8j@|IYxclKNGFK-%9(2e}npn zDL96X{GU_QS1Rp0+o&%)dVNnw`?4eJtAKAKc$L@#(TPt}r=3TS!J{LjeOX3*8PV&z zPuiCWOlzhpF$L?V2K>&9!YaF%zO6WKP6qUOx~&xcJ0NjkUJW5zAjDcMjQgi=17v>lV@R z=GnDqI(`*+EN9p1O@En6`Ekz9$XqS-$=oe-xy;QnN6TC-bGM&d9MdQt&;F#$zq$L| z>V^FM@CQz{;(cX2YbE{sy2Ns157qkJJS*lWj$_Ubu}g<(d#v><>0?E5n+@m=cNn;< zfh(Ng7$@_c^2H^cd*%&M_o9PZYuJ;!I*xN_sVAGK#&wprU()$vp9!A5L*noYOp!I9 z>`T;(uAdHVS5SxSLuBD+l6A1?ytbKfL9f^d=)C>##=eqF^{e$6>Vc3e$h{6~^E`aj zAGv}p+s;5Qo}qrRZLoT<%I!OtJy<yJLJ+HWkFHU>S|2Vix+`hJw zIP90WVEgE};8*$7b11=gAf3PK6MXv)LC=0y@T*;};O@`BePx2LWxXrdwicS3T*21A zP`=m|Y+uB?)*imrD&F1f3O0|ye#(gJ`~tY=$erWA-o<`N58vLafxXfdB&aMHxUwWD z`#Jj^xvKy19zK7~AoU=9?4RoD+&>e1B|HVLU=s87jDE!I^1Gcf_ML;JoL}`9lxSy2 zG}E(ZP{1 zhy^<4>})?2@7o)42HOtt?00t2!tZvM-F9r>Kml>z@5qrBW z8yv5LV-a+`%J;8W=-_udWnnsAG3ekMp~J+n{BCHe;cUtC;JFf7UINd0p0yTQ_}xxf zn3lB$EqtfZ(KWt+UX<$QL;aI3hlaFJ3C;Lw7z1X0oS)A`>O)l$Tzp)Mu*IUK6 zl!fc9x|lgcVl!CRvxb+ob{lqO;23+shh#0R=Qi;3yNxol&SQtB=={KU;$!x+W(j;5 z*ST*$-@Tvc^DkzuS`-&d;rnHb*1*Hf>I)eCW*2VUWu_jsBt_42!ovT(gkmU?Tb zSH|kixXvSg;hR>TU6y+J-9}ls-d$7lvD!9FZGJzlv*R<~zmVt)RPoKtma*b@8)asl zhi}&V`Y_)~Uk9#74qkWr_RWh6%9_?sS=JvVPr?AN|1r1k0XesWO)NeF1_3zlwoSh{$ zJR?u?B@Uv#)%+d!LYnH&cY1QbC;1fRK0wZzq+oM%&JCXcw*Pg|B6VXQt4CZz^0?Ew z-FcrT$S zI=U9QV6A*Luv=ayPuyok_%)P+C&&dW{L#(2S z(Cd^n-R||a`l#z7>XLPP;cMJmXr-N8Y=Y-Fw0{ns>fc-NjETgP2cFE|*>I4z&7`FX z9GN}~j+#gugTTQ}h~4RUZFm^R@4>O(pzn5)z&dmwqX#h_lw8~g$ z{n9G2$lb?5@EnnLzhuMH(1qs-p$B@yc;-c?r&a3PV8fG{8>VNeynn@lXIpf7b^@oo zV8gSn3(p-w5A6=qvpzbWHwCW2h9@O2Oi#Jcv(bX5COV!Cz*YaZ4NrX+p39}4-&pX> zi;icF)c33nPtW`?Jr~J)`Y+tBlIZ=s3^?T(e9~#YCf*x%)^iG62HL+wFQ~o4Ii9@=Yw^w;F;iCw~l%rD+y|T7~Z#*Soov{ z9$6G0tP=XjpLW{%@NFGu?!GrheNOO5-ZZT~@x#QZZzgbCR)2%~@NFGuzP~<3egC|c z_C0CTCw`b1^^KACt@;M_;oCZnjZhGyzQ0mm<`YJJ;)jV*U%IsK@o!KczOCcf71=TB z`;D}3g;5_q&sg=H0Isn98`Oty>o_(@LX7%WNc$c$>KhQdzFoj=S>C-q-LDmeNAYd> zv0K{yH!=rbDtGdppvrp_g-xAuE5vRfYS&_zH&Uj+y?vVWg@g*7i1v||B0&Skd zU!8{kT)wgTw2b|Njq<&@Um!j$*)Py@!iY~xa@?kRd+@zwpFp2K$tSJ|UVPM*;Fft= z=3KYoqZYsQw0_DsndkPP<7=3cHG1&N*kk3=4f-S>pa$RCs|%hO9Xouo0bfeoyatcX zHuo>Hl-vmT5IJ|G{f#{h_-z*a#Ln6AA2Q%eiJRB(ueZQ=M8bb*;wKJHgRil`-;x)B z|1%Rmaq~L;7I=Ro{Cg&T&Y@}W=+1C^Z$#f2wugBy!!{Ft*Pg~{dE^n}oBu11Jizx> z9+CJrZM-Fp6+L~o;D=XCKC#BJYUjUXjiC8aGrSJ=7iGK}2J10NR+|m}y29$Oo&9gp zJ<~?FMl<_G(OR2~k^h1l@t|-3ai(=DKkDP5hot%w`LCZN7TKtcd_ySEu z!JG%&YjXN4n}3Z>tI>z78Tw$Op$~Q#`e377AN2n>>2!9dQ|}M-!O8vI^nuoQR(+7G z%ioUE>4O0VeGQyLf!}xRIgLI@FyQ4Js0M!;eQ-kP6#inLP=h~>KG<)-%Q;XD{xtfa z)x?jVUxPo5KKRJQ&pA+?{!{CNCKEsLzZ(2$^ue13yt^$l*2+JA@p-&g6fAJK+iQtG zOL{qH-qq%-sgy5X?pWYw zAH3VUllf{s-}RRFPxXm>pE5$t=6w&|zs3Hp*l}8YaRb#eC~w=wIf##l`GeN>H;D&) zop?ZG0RIrN?IrBzifG^T>ibi?y%d{0Z%q{iSi}$Pc;afc+(MIiz{pTtmDk zus@E{=l(wA#2c@SXMMY)7GBi$k?^Nm&ki3WF7t>h(JT8-+kSw3cB8f)*`e(_$z2rq z(5>*Y@8kon9JuCxvggG9Pa8g)J*+{+Pt^Qaytd!e#u}mRGkA^tn>O-=jD`lG^aS3ABerIgX}#u4`(lk{ihc8pP08}$mzws#S}oAxB%zBR`;Pl|B$%g?sWd&;|b5v zE#Q)Gu}#z<_X}jW-0gFT{m|mK+2?D9#|~fb3Lcs23O)`mG?VMC<@yrczk@$&H~WyS z>;szqoeq4%eph^Om+%)k*n|%>T)-$BN2rIvb7YJwSk3#w3E-M)!9{(o?3vkdy)8VL zXTb%GGI6AO7(7QZT)|s;ziu43##nGsUn_fLc3i^GN{%w_81;sn%E6=foygU)DaRG| zJMGYH9As}beG{f1Tnf4P4^!WfWX1|STh6D>j1v7^FEq8Xmlm#%KKQlJb(V#0-YbpJ z&G$!+IfDhfFFy|)$rie)rwqmBh7E)M@H6k9PVZ@LU~RL@@F!aJo)Lr7 zP47v*%dR|IvbK@Ck_LI-<9kaCPS}>&E_jW#jqsfCm#Oc>cHNF`tKV>=;Mqn`vm1?8~nGI;PXc&g)e0u+;*`qz&sj&x7wIT z+Zb=RV_-6K=xYJ|XQdA$uJK)t32iz?Y*Gtn?OHi& zx2IUqWI2`mP<}0jd}#YI;U`M9eMRW40;$J7AEIv>KMjqOoQ~38_+Sj|_^^ej|>h zn_O7WCPuRd62N1|vlKC(Yxg|SZU^O01ViKAV_Z*1%Te;sNIzI*^#3vzd%$Ckg_V~7 zrCjWaMSlp5`>HdPE;>zeu>JCHo~cyq=mWt!%su2-tAU4LfuK(cB4#WTD#u_ zzkd_u+wA4|=Kn{=9~qTSK7;&N2(~Fr7P*k}G4^uugp8wn zq`jP+AEPNBZZ9W)M=s@A_HuG@WK%xGUQS+(Gbm5Bmy;tSo$@~Ra`IjDqdd`G&N<>_ z%3bzy&h^Gq-q~A|D;XEPKRII?`UG8JFXy~#kn)4}a?Us&qP*Q+&Y8n~lz(9_=ltK7 zl<%^auj4Ep<)7HgIhXVq4-Q@+(+&KZntl)r8- z=lsGSDBo-^C-(nU%3spU>$QFpekLC7Wy(F2-;ts9Y1cc}EMDF{!#jIEtZ}u)zC+~o z^Q<67yl-kva2`$W ze3P^1lAkPx9DMAX*C&2a!g@EgF6q4Klkykd{n|8qx$|?k51!3@KLb4O^O7|CT5vVu z8^Av>R^StX=Z@J0?44*Zx!5&{4rP+$TNNKNYX@>24)ESDxo4qe8Fke<6<@`}#lGCF zQ+-L)Gux3^IhhxZ}By;Qkd8$@`oE{H9^b*oD27;M4f4vwl1^`0g`` z+`qTt81qU4xx|C6-rm8L_ji2gP&P|Gx_RUzzKz%eKkJaEvXz~cuAy}WS<1`)pDP`i z{pcIZH%IZa%y{U(-p?J7TsOi|7@Ypl+!dwwUi$u8M^d}Q#^=Tt2Df*HHf8p5*H!b^ zO@1}EZ}EyY=GOAJ6m=obJnkI2jo60cpOd%Fpi$}3BDza!N`XSgO{BM zZ5mOd1oNr$7VxhF_lSpxv2!TFTjP~r^U2WWoqRW*)mH!dj*zU+uo_h-eag1a7kBWUXu@bBe!xns-Zbw$;arJmcU z$Ly!_U}*DZsqb;lJ5o;zKB5sdaq4B3dcJ}tufVLaz-)_CZ?(X@dm^;?X@Oa9fpNvF zk62)Sb3C;9mjd&G1%@0I8!Rx7LeFCY^NIzgCSHBV0&`nOX!Fkn=5-6qws>`)1?KW& zq0J8fa~5UIT3o-}w=h_8iRG18#>(u6%!8rLcLKi<`#7bKGInuq#rJ#0^Sahcf|4I? zXclYPB>33H9U-n-;#c8?k%cQt`?%}UJ9jnS#W-bTx<06wJ#>xYNUSSo?0$F=>oDv~ zDN8)@&@3r`|6^<}`9HBdm=s?gL?+eQj10Gd%E&T5@laQHA5=aOE5+DhnCKJ?xfe?je5~u_s zT4JnUZ4;o?2@(4SwG|PW5N#8Jy$+%%woTA_n+bADEm6|;UIVl?7_~$wdEV!r=gD*C?6ddUYp=cbT5GSp_TIwhvE%EkRi*tIp*GrR zW2!c`PjN1~l?JC@EtsthokzQ$VxoYb_mG_d8~PXjaWot&RpqtvX+puxS& ziKI_$;N!m2_KfuBSZFS>v7dlzD}6TbE!1z{q~8Y8hUhz2sPCeIc~1Y`8U2j8{{~>> zml1zFwkcEkEkDgagL;Loz7d|5UT9kYZ8=Y?U5mB1-!)QgZbWbF8Uc@Ki4}{X!!D82 zoZ}drr`)E`Q-m)rb~f@W6Nf*0H?VKSpxhn-zcUXC%56=T*%{DzIrpng(5yvUj;=k> z{l%cXz8ZO5ioAXhpO7I3Mb5`HC@;RCO`kFLq<#I!>+$9H_P>O@PNpyJX1s~4-aJ`# z^jH#gS?&Iw>i3Ka%4#FO$m%R)wLK)Ooyh8pkgS$HbHmE&^~!rc|97KD9{ZC?PE&Uu zyc1hJD5GPM(T^gdW027flZ=ir$>_8*f#w60ixC+OOkHk*Lrh3Mj||D@EBrzE92+K| z1@6V_J%^S?`;=LY3vOh`5xdEP`e z|4{OnWOGbNHXC{V1AI119+Pa23CU(7&-andb&|&?f34H;Y$7nH%a4b{c{78~PuT+`XB zS-Y2UyuP})leGKZXI_8l_5Pm8*ZcABc=M3C-IvDskj34cTbA*uV&wg zpF2=L1E|8A|sKFQnw)+yO50{b3T59eR1F{c+3G0@rj;B zCIPcu#z9m_4&U&mzBb#DXB{^ngO>tZ#!03wgU8j`kinVMQyXWm%R~m}A%oMX$48!C zXs`%5!P(aFnObyv1~NFm!r6~cbpWQw;Lj~{@t5c_*LtqZ{eem5zCQ2bGB<+#KBq^-bE8NJ~mGtqMHTn2@_Uh-!+w`-+Q=8n#+21+{>@i#j@pZhvh zK)*44Z6W%#LBDrH^lO8DZO|!I8{Ox^u9l~VryDwzUqPFoottMl?-|hV9_V=wwA~MF z+|W+eWgC$9^6cbU4jop^($ACgm-Ae~`$FjBhF)%HHTPI^NyL zKMUk^Kfs~*vDvz!&odDwPf@5cWouw~ws*-_9k?&Jc02C^qlo1o7* zZD-*1`ySYo8tFRHHd&SU$e+1-f$(dG?aaIb8RhQTiEG^b^$D&c>yf!4Q}ZIzDi1>U zL{(FaJ@D3h$o4&FKWN)CLhZ8JJjFr%5S$Ooc%I4ljabfHe3G0crNlKwc6E~(=PL9k ze4S`h>o-ZBW7V}=&-sq-e3mvAT(^KeO>G)i!2JNU;Z@NS$gJc1m*Z1-gZHNOE`O=u zpQ=ilzUz{?;V$2L<@eFXEZXCuE!)4#U9~b6;WL@Ti47(C@HqNM^imsoX*~MkC}qD5 zjoL!=Sz(Ft_C_gqEJdG_3Vph<$%Q`fbdWx0{?7q9=UwnR!T&t)`UJdQ3*n`)_V-!{ zul?Y)--K7Y39klh6Wc`WVfsCDx#o4QEp1z*+3K8Y9c|kKftl(<^{uLisF7s_QC>@- zr&!M3S$9zR*DZ=@SvT3@FB?%ccj@2MnLo#<&EHN?pS0~=&{DQH-YfZRUbVzR|Gmq% zUfM40cU)h?X}mM!3ZD%NLWRpp@(s)Wmw-l}B|l>55{ ze#U%@)9Px`eL38vq-y6s;Er76uxT5T;iHOW@Nfa9^OmYOh21p98Q_f6;qIES``3N$@oHndCNf?{-dO$; zDCuU;Zz6acW1J3-Lupg0b0%#Td7|Twt^AgZNxhA!&cBD~A>SL1a333OEM!c{*`oLi zN=}5@Dzqs!(MIk7Fxo71ICUnlMf?%wy5+oe$~nhS2l|ln*HzPx$m84K(#K~P{q5tf zF(*9zLb0>1?h%*2y)%GLR!f?AUtHz#UysjcBR-!p#_#3GgZ+%>9gN3XemCIrapCh3 zIk6o%k@ovQ^9ks%KLlTkP$m0A@LvzXuR9oo|JpF{-?$L`{|+=qfd8e*_)Rwj+aPl@ z-+Jz2HMPM(AG{-YnDXoo&;C#z z15Oro{Zin(Xo9mo()*eT4so>28wHLj&;C#z15V$kfo7k;`GpD2-bin+2~PXRfoAUV z9lJl2C!N07AIf9E+5SnOc?)@%b8*h~0{@`Sl{-wi-<$FA9p+B1R|Cy=4w)}$Am2*z zsW$`7)#MB8i|RQ2t?{gpCbL$W!kXy>?Ba>o#gnj$qrA6ZTiA#j7u(`x>W)_5D;C=# z1KZ-{i3R#S7Iy55j6K(IpGS0G8*(}0Kj#eC819Kf+Ze|lIM>EVjmI7t!p7Kk0UP5m z^931)t}$wtVP8C>+ZQRieX$k$;%VK!NDi|v#-C?j{4?_EV)n(hi`W<2!t4v>3fdRH z)n%FZ-IaF<{WEA^B=_Zy3AZm&LiWXY-M&a3!oDzUiwWqBpCbpw{%TAJ(k4$!?(3qT zbeqd6HpTcl#Z$Mruql$nrT`DY^s}c^aQ2W5^fMqjCq4%oonA`8s3mYmB+CGWKp~4BkeZ!mU2Wq*vw* z?_y)W3+>FhNZNZ(p6F)P^mlkB9XTR(T?QX9U(S{EjMG8=_ib#*Hhi?H8s{T^Hh9j) zCh!U3C+FylX`0LXJHYy9#(`OHFFP108BO`1K7_Z1(1%5R7t)90P5Ln99DOK!E&8zG zV4!)C3BKsVaDD6e=wFBL>JBvD0{ms;41Ji~cOiY4A$aKeaOgau55FXNbX^z9GlV|m ztn*>>h(5eZ^60uQ1jll|J`_DZTprPf>5@m+bs;zf=j+2y(FMch5q&sP;OM%}X#4s4 zuv6d+lSlO78OfvTI)gsv>%%uqc|;%j!gQwS!#?t$4|iTjAO8GZd|+<{_4si4eB=|G z>wVy9D$1)N|4xpa(0}4)jq!F_Py;Q6V8`B&@kE#4`b$dSSHdvy2~@4T?|l{$Q@cX?R4oZ+~v z!0N3IOJAzPw|buoOTS%*Z}sjAOTS5nAK^U|mVSc{Kf-I}UQsii3w8Jr-YH?}^K|$T z-g#l^b9MNU-sNHGIXe7EZ*^GubRB-A_qnk2EFFHNcVAe#oc*`VGQxW(EIn0+Kf-I} z3|lk)<8}BWyi>x`N9*uMc;|(s$LjE-yvxJVqjdOD-s-S)O@|-leJ(8hvy{D-|&G=0q1){KC}y!7o98R-MSBLT%F=+l%J*Bb0e_lMtFbm4lzK; z@t_SizHjN2a2qZ;WLu8YZOic?+p+<@X85kewsf}JOmevjxDd7aM31zK3rTTKbjerpAUgcR|uCvTad1br(x&H9ft-k z_kxSL4eFF(=hB|E&xcnhYii<06Fawj(9Uf){AiCuhoBukNUJ^ElPk1h@0H%~_@{LI zG9p#Zd=q}xh2e)UF=U5>AAY`b{f|TUq3dG!4MD#>A^K&6=+_zQOZs+}VdL6ZyO*(I z*tf0NwY0Brry$n0%=ZuYT`f-lZ039^$8WlS9j!@0Pz75uY z=D@jq>oL)8Y4Yc+AKLvKziW8?UDzhle}#-!8Mi7lZZ+bMCYk#0&tW*oJnC(D-{8M@ z!oRbwV2sgxWW(p`U*dn5AL#2>bAEenpHv+U5E~sctlutZzmM+d@qJwh=fh`z+rJFI^}RszAH-e^!Pkf%3!fj<{rCR> z{=0$Z4^8mXN1hMA=z{*fi_<6V{XnyBe~0kT=U&-x{EI^VzVrQw7l-e`wzZh(zm&V$ z!{KMKH{oCI-#-SL$D80+*@uAtxBmhBKZoWp`n+p=-!^1sxUVv+FVLJXI1H1AIb!n- z(vQRB>BARSevv%w%ssw)kv!X(=RALrJaxwc&FvS-Q*k2Dd{FY}^T|-#!{(DkheG{3 zOdk5TdDKPVIQ|}Jc3vb;A9Lje7s=D!8)&}cB6+qS4*E`qYgZk8@tnZX=fI)1hs}X2 z{uF55XUdZI2rdg^nS-hsx6q-j7`*&vs<)j*H}}LvFr)kvtWD zK(<~aPZ2z1H_=hYIgE~3?SW?em;_WbuZ1`0@N&DGHK3qO0`C7?$_#*kz$k+Ka zyZ$qFNyJ}ecEadb2IM7U-$yk}U`bVbp_m^tSaV^Ys@k_0V z@SU-0#8MHr#QKc##CXLwl`%aSR~Jlkc*xD8Y(V=Im+vIMPfm`JJ(g_I8KIY>)+j{5eYsDGm!F3AD*?%jRm=Ey*?5Lm(y;i>qySy8Cwj~bl z_!H4kmBbrilSdOz;asWt$6#;n2VaSEmRJ@yF<%nXlg7M1UTks72z~OYPtsyY6FAaF z=eq?hV^%x+3uVs+5qkl3(I+&a9aa&F4Y;0{r#)K7Z7gbRWa{F;s(XleOdD zgtc!D?bGR%ALEU_#N{Klh1l}ZeRgc+R_OzwWjg&tyX2dFvGU zbr`AVuk7n*F1Q0YJDy+Q?_*xpLw&vAa~*Z>AfDy>_y|ioEG<8Mq+s_yR;S(=xJgz1 zZu-XLk?ySB3jfhacg60Cc;#J}Z#j~wjdV!-OC5DCq|7>eWK)=X>hUkYTS?t9?#uOZ z-zQH6d=u+lxqF=!*O#fqIwS^0_83T6iS-bAi7!mj7?<#N0lb~>GxiMG)yHqkeG-k- z5xy?8fwI$LiNhrR%0VpFB5;N_9n z$&5>f#6XLWWCu^3&!-pZ{<$vbl1|@Dd?P65hWeH@%(qKJ-|+3|u|4pG?5}#vb_ua< znoEzb%K&dVBSn0Ije$V6ErvKCxp(w^a8oK*>o{>U_io&PTwN1MoT}!%3146% z{te-;ZtmmI>tMfD8omc3wkLHAcl6?a=!#IY3PZH5;fV~&6*<~^a6!w1mj~CZRkYgW znXJ62ig-rGyJg&ZVyZ?rebuTq&!SA(v&#D<<$m`&)(z8uk)ZDXcAUx;9C}zslzvG= zFTNBX*s^ql%5lfJa@UbxcoteOVP8pZChaC>DR*Hgo>t_bPz|I3`G zZ*E^YJ{tLEjx(Zf^mS2PmZWhW$rR;{wy4db`#h{qskHUQcVz=}G_kI6oE1u4@-3CV zwWXJQiqkWvcWVA0mtbI@YZ?_MbaiR9f`68*% z!NBF~S3v8~SlILTK#2>vVE+CU^Y@PeCI5%-3O-kyJNL=nz?Lk9y(RC;={?N*WxU%? zE3f+N1DkrTBEDsq@jNh=8M|(LE}8I426sgCAeR|qD;ls_WXx|N{$F@;S#I#TD)d|* zdj1`o#+J0cxQy85ztLZA;F!n7L(pIP(1opHwADQBzfNBnP5f~;_$d>AJS{Xg>@oO%)c-brAe%45AEJ}Y{r?_g)1X<^i9pFq z^#A>%LVXSoe2YGB3Y;C_fePOLopqvI*8LWJaz*cA=P66P5@(_63fCPU9M&R~%= zkp;q6_O7_x)4&#fa$&!@zwese#`%TP@9t=oYp$!6xbr;P*eUph1Uxy z&B;Kv&{<^kMAq7N)8Cz(%_-yi*q*?aE9qPPUB*A}Sr_SRp_TOQ-)2Bhbb`>SLF5Jf zl=`rm-A^4NN6Ri9)D5C1-IH})xtYFw%%I+5w1hmh4H{re5l6QH}e{Ox6|N>q7Y9g{QlC;9nwa|{>6A=m5C?5B|H%v z`+7fYmGKrF`@F|`Ap3+i5#r_x-Q&#QTgRp>5&uL7sSu_*{d|J;wqiFTx-9 zj-=hfBk)88Jn`>mLp%b{+{yd1ybl%6VZ?BJ$~@_Ue8upY>yBcWMGqlSWYFA^ZU5%l3HPWs|aL{9?8N>1{<5_blc8cK1 zQ|zD3_z1+F7Q7q5Rr1N+x>xuel$C>Xif>E{j6FJXQophH%cflV9vI5$<6Xw$7{;QT zF=QhRoA)u*6_i`AI(2!Ut@kl~`e`e3FBxBRgSNema~x&vcr>a&pN|{!NO`Zu2AE9$ zi*0ZI5A0JN=KWuWZV>s<$-1RR{7E-Hkx#J`yRG!Mf88czf?t=@*m7cnZ^N!L=1suU zZPSjpz?SfRQKI|XKRdf+v}crl=Fwkh&u@9RO)c;f^V%$ak>}5x-7@AI%DW6%Bry{a z%99}dF7tTVQ`3yCpBkZlh7D07d>VmnNNcPue%N!lw{=C+Dv_7$3m`^fOw%u9Jn$R; z#^!h9wEkB5TkPD!y!)s(X)1dmblW=EKe87L-82?DzNOyXJ69>sT%px^)`Ktcc#0z} zjjPgPwCk;JT?*s-r+fY@f0*?e6*fFQ0J9^$+uZ z$FHVd|LU(Uz5dlXw(F;|C*akozux+4+OM}}QtnlYYL>DE9(BYh=YyViO!zTn{-&21 z5eV!W3RXAy=4hM+TAD5H%fl^0a9o72BvWRQhXX@E1?Gbv) zQ-%H_mDXPB(T+UpDLD1)l8BPuFkg>QmbxkYKN92GbXWE|-*Oe%Ucmny6V~}QX6O0b zQy=oJn6}n;SN3V&!}>SNky;reoGI|#33)!rS2OjCdcF~AQU zoH)-Xc`uz%py$n)n(vdm&S@^{nB%h~T74rXSbVmrnlE~q@)Q2s#;Tb>-~!?<2|=P*=ALB zrrsZ{$1``Y`a+IJ)z4X`GG?g=&s>lC(H#97zm?Ke3Aj^iz1d53%2Mz0NaHeb4bdSYK3Kfr{#T7P|1Pds*f;p})R;>P7b5HC-0t z`gAGrJyS;QX}nW8+TFibFaA|!Z1}5E&9%I5;Qdm~x#3#I)VxtujVFNdd`#;fUx5F= zm#(~bsxQo%2QNsU-ASL$o3C~+KV5d=dYMN~zts31rmGATUP8t1s z7rZ%TzH9e4m80l+H6{A_R&7-mJ^;Zrzdv=>l!smYrndKTr*(HJIGy(8n=rG7i`1gA{=YfX07Cc&*h z#nmm+;_8+wd(*@G?}%Mg_a5{y+AQ*6GGo&U?S=jY5$Z@;P5uLAY4aZ_ga67@gkIm` z)AJ5|ADiZ5Yh3%Azy@x+q#5PbMR2YtWvXcBo6w<T{ZK26N;Af_--ISTW_U+oU zfn5f@%(N0Zwc1>#h)pb!GLHZUT5Vc?GO$afsg891t?p(0Q~4jNTt4$a2u@w~m<6VBG<-M1N zyqKojZb?lqpnFqKk33QiPsn=0rKCS`DRvt?fzvD~uP)cd4W*Y(rOkXhZM*)tfM zn85y+N_?axN_6z@%e_^=9fuvy{3E+YY@y5re%YsK$FFRQcVSnup0nOF$Fsg_j-0oo z=Xv*9ztqJU9)277Y93O)MV9~rJG2a4Ah>!aYq{TH{o~Arr)dd{SdbPt7=b0OyF zF|sdK_EM%gW_2?Ka&*zu$NEgPJGKM&*b-kB_%S-pBB6*pc*)Q|vLR zd-xbt)p0xi3^)E@;m4UC4VzW=ep{N-GqQC%Qu_KBeBM}{+`k{&x@?5Xem_BZe@~rH z(S}>V-w*y*voAG>zw8GQ{KuVx{|poUdf5>ES6fw!ypQ9(n=;8~ZaGjmk8_6NAKcVK z-=5|_SF^R}@$BS`j^MdJ$xY8Phs&$DdiE>1)_2>W9cKzx(BCEX9e&6whT?yLU%XZgJDV4aly{CdK?8JH8q2u^ zA@~MN9UkLJo{jiJtlzvA8IT(MeP8gm$jh=APr^ag0=t;s%6i%KNR?=e-9x$@m2cE5 zeu>%H-qVbsS5KWi@4P(=Z8#2}X4v*_wLM~ari~cQVEaY(r8;-LdGDs{;ES3BIa9?` z+<2+-)#87{FYbL}vcF93oBf%6xkpYce%_cSD0( z%GmA9*?8*jq}w?|+f5xZS6vFv>EAd5OjGOiHSIseFb5?c>nI(pqae#!BgW2>GBpor ztiuxP3G77RuogIQ)`PKbrqkgY+&>S$v_Ok$Xd!8g&7D@E(HC+#gEPmwe9qP0y$PP; zo#c6dJO)jX@w+lzszYd>_Qt)N?C6#azS?4EM#C!B4l2^|3EMf_DephE%01w(L06H9 zJ7Pqp2IVC8z%D6eZLWYdB3VQBMXF}j8Fr@Kv~RB5QQ1RZW<9EQI|QFd?Ce|j&D~DA zW4db5)2gcHlznaA+;&O3HIz1tL?z@P*PYCtvx#xml9d zVdQ&sPT9(RbBjnzU2E~Bo{8|Ku8rbp=NZE@&gbx}mejTJKE*z0SrcK6&hLoy6uU0j z{!FT}>1QBF`&Nj(CwOPi@h*jSSt0tW-_M2ScIQ_~yVOYQp5wiR_sP76%kge_-c~iz zYd`aW&F}+BTBfFE*_PgrTZmo_%AdhCb&<<8QOG6nA9_aOTd_6WG*SEmW17lX7t`^Y z@I_rWPZxQkRHfwYW<5>vH!d5QTOGpQ0eeF+b-e$b5{kB0wX1=4bj!Lys9oFic72I<%?H1Ew5z~ptQ$z1 zj?;!V+VuNSn~w1AqD`NTOzuB*r$+ws+ZC9n2F`pr2LFg0-Mkn6LeFQ=#@e%`HMPvE zY96xJ_&C2q^kr$D_1(hIx0;8dYhELLAv}IF?UR09R@7$8>+6o zeAnCBai1R3AivdriY`^n4f4AzTlOpKHrDhJ@R_8Y(fq_^J!U&_LvHOur8V_ugMB0C z!HUm9XrBS^CPmgKAMOv1F?7UZ(r4+dBfZi`Q|Kc(6U68j;m6+~D`cE0V%4P{Th9%- z8L?{CKg%D#qfUcAa^a8LLj3W|F#ec41b_TEgoDsZ_+x7rfBZ7UADq4NSlfE~Fj7tL zA}@WuGo51M@i9 zqjna5?dS(xk6W3iBfGJY#Eu=^^aqRHFF)HxJMWh|z%@0V^I@0+rvhJOhhbZa?6}Z5 zk)K6f+b_0T3pA`@jA75W{Ep{zdtl3jVgxy3=&^f{6T$=KAsOKwLPp3pw@F4^51vBH znaGG)@W9QK#}3MUDvSsATAm2W2z^hh?zf&|;sG~szZ1p-g11>lj1S3(UEnHwu^Aa5 zvVuK!x{Tum@ZM-fc z1g6M{S3gHxtzmVw8L&b!BD}5^zJFdBf$cjWBTmr%wzqG{Eq|!AX59s3gz*0%>;~~i zd>;P))n`E&(Mim_AtTzNgEHcu;g46T)8LOB_~Vuke>@$=9|wovk7q(Sh>Q^axIc_P zo(}Pc=#0i;WW}__)cuv$k;>X>SMc#I*hU1$=JS7>?m{CmTipNZ}Swt2sUGGC-Wpl zJi2*q2dvyibh_Yc!?u($>}H)>=8VRi&h}P*uK2dBiJUo$FG`ye^x3L1gac12C+nGBaw{v${bfKJ6>~QoVa~*Q#;v@DH+uCUUU5`7` zg6j~&#Z?Gy$Mc>pzUiKytdceNG}St{YdJm~)&|nR_j||BZt0pLzJOHc!~E`cu=dGV z?8VL_Ua-B8Gsont3RxG8g~sKK<=(ufH#KeC{i&QcA?Y$7v||srRsYLwzRMY%lCP0@ zI&(H}C{G8p;qH0LV~0sRu)vlyef=&JL$?Q>8*+`EwUzL-)Hd<4X`}G@t0QBy^;_YA zT@oX!^AG+j@wNP(SOgcp>(6TXS$p@=Kho~W;3zQ#=#3V6mwkc$6Zj6JIde^^7Dx$%zel!@ly2#u3*{(;uJ`ebK&bPg_W|_W1ED}jx;;I;ArfG4N2;% z6)Q4lGe17E`JoXvF2y%nW3lNme1?z7n5+KYghMC1H`IK8zNQkxX(4CDk751Vm^U3~ zKHQjq9z%aufx9coQ~W%8qZ^RFqDwvaZN7#NB4CXmwlWr5IF9+pNPMYQ?~k|S`u@HC zYF}rR+Hr8Cw&TOGmK}dfweB$1V|%gJ`{1j_>WTfG)I*;&H-UE>^BtkxXyzGTg&y5j z&!)rZ&mGTc9dVa66x%-e;8Wi`&VbP@-W^rMSF5-czL&4@z3|)|p9kJ*wEl2+qw1e! zC-^518_Q>%xcgD!Hyf*O?A8AHD6ul38kO@jwT{+(HK1O0kEbDYIK`dRlC`>bE>XI$6)2fStc z{)OqcoT(VeSPN7~beyghUrIdjwh`X$5gy%t$bEud>BsH#VzmTkwKj_U1Z&#k-0V%v^Pa3 zN$dt~t*jGygRNTgp7P0=LY(`j*MD+~_``!S0wUiGxgoSp->JNt&_iY#c7mtyPU6G) zyZ<(oSg8=Lehi(&M<{gqsn8v|O8e5GtE34Y&7u#;vtRre@FzZ?7NM6xE3+=74ri*; zD$?P9nI{PUea#m1-Amt^H*>FG7y8FkjN&}Ba&MP+NR}OQ4fmnO-C%pnpj#ewS zL^iDYc8K@T{c^tu=T0c+JHVCpuH-2;i_FDzeuLLuf(NDTf2Gen$VS^V);t&^Uz!|@ zp*N>JJSCWR>ds%{Qy6Ohkj9zEoYhJ_^&=vVNIzCXn?~r1-fX#=K3WZ5-Ahb}oRj^M zWnA4B%eeN*I{kxp6+DdZ)Ubctb( zUf@qB#!%pqm$i@7_l$fE`%-t~2W!e6$2jzm1xAlYw7+2)~9=-m63SNjYhc)U}rw$KY5_<^0hL;MjF8jv3<%xU+zB8CXwL z6V&L5^s`Po@|kf&hWihVRhthI=M|1?_n8naKV(0ff$R7XuAQU2Bf-@uXV6sYb%U=< z<-I33W7Ai5vZgbDH~!eu-_khm&B9nb-zTf%sKtBNVQY@os<+Di4$`vkIyNYquqSn! zG2CZ*>dperkb_r3e$#I5`pg54++lrlV*S=JTGiHL#6z7#R*MYIV&0^H-Dsf>H9Loe z^<6c<7Qd6=EOQcfv_AL9b~Dz+9%bA#9h7upS>4FYLeganxq;z!1kZ2p0d^+)PmU2o z=L4>O*XT%3@zvxVLtdvnC81QaR?0XZkKGDRx^F2?zQs;RD6I;8Q&q*s@H;#}IkELK z<0mE%r(G%MrsS%I;$x(5<^L9|+(R_BFXgH&Ti2kcPlDrwwPgopsn)lpoSYR@8z_4` zU`grwfiT%R@txVoikbGvOMx{9ZC9AzD3CEbBY;g}zs0A^nhn4%GX>`qWnG??d*);vtCaCk5x+k9+sut))F*FmjeU>v$7L^FYSUQO4sRib z)6e|w_H~Yw)07h(Xn!#2nY29_?M~X(Gs!hO1Kd-=eF|;K0CyX>r+$;YDA=AE^1B@V zxa#R-Z0I66ADV@rl0|0&IjJhSPi(d-&WK6Huf3S(H)$91#}j7I-2SS@8?|`&*OXg~D|)DRM3;dqABA zUwmfGa2kBEaP8P@)6hGtiFjq**fyW@qqQ%`TfUZbF%t`YfM#UD;bL@>KR( zcM2SQxJ47Vvu=;7+=ITeu@BjXK1)TPEk!m}NleDchn)l#68fP$S-1D92d}^}V zY>QE=)D^4;vcEcwGs{wm@91=@l7mwu{!2LzGKR4w-qIdf+|v1+=53e;t@$=91N*ep z)zUdd^LCMbhh%?>)UX*`cEAR-)rSqMa4!WzLfFuH^eG=(ow}GbzWsq@-e6 zOv-Z7Wi4OyrN9;VA4B_fEwl~T|KR;F{~z&x&>HO&+QtE2zNbU~w}AZ#^|VvZYuK*l zyiTj^?}qn;-sp5`R|93TrV<~BEY6@W+C=`54!ujLqlwGg>h40P9Twc}u3TbLz4Bdb zlh^t781K@Lg3n#QQA_Hfjm%Y_rLPZLW5Lsc-XGNzN8MKf|6_h1Chh-7-#&@0f~~q; zvpFj?i?b+3Wp@DYNC^K=fFu0ET)jm2M{KY!0HXtZ^gQ6RRQh8o@);ka;MGMQ){ph? zSxJL!B&}m9_~qf(ki1jL^C9&}`7CVpZ%Tgv$4$GZM&l=-KjnM+w8)e?;(^m!)sj2N z^FcOys7Pmz4*DWrZAd2mxN_0Xnq;xp`e=ju4QHKJb^Gbb+VV+hwz5eo zW0_vIbdIV_w!{=oqYZ}qupvL>&W&%I&P%mfo%^?}ENd|M0+RfgyDt zO;pW|A)G#@eS&AfXZSPegYMC4=9Tb_@cnVVJw94B|1gwS@>x!C?>PM7z6AU03GQhX z{{d~u)U4hWX9J_MxF2Vc-BIV#Qs1K=9!swwes`Yc{dpEKe9RTQvxq++fBHAZ>*?6N zx(wOQclvmyd>30-_|rx{b*t-);C$mq)w~8gXwUQt`p5mo)$N+*I-|xd)!TGqoNB&@ z?~P)sX_lf=VkRHG8hcPh7unSvKf2Bu?Z`M&cAy>_O#m*=k&@b2wW<|5FTJ%~r;Uw# zHu6Y)wJRsAYGqvJy&F+Uz-&paw6SV|zrG5anK7{(+SaX&xUpTSoORq^Qo+3?GNy?o zE2#@a%}QUr;yjo_x7Qv0Ie|RYBKH#noNBYk)K=!;4cz7UmrK=Vi4Q8n&)HZ24cMPs zSL8Y)bPyUx^M5*z7?GP?XD(mtI&=LJ*O^i)cjvDAMKpK8H;LcLHmYorebil(PW@!- zZ8cVhBYM=`la5`3A1q3p>6;Rfd7O2pQ>m)uDQG2gw^2MDPrd@p9FFcASbw>7#p$lg zTxX&;|Fl@ymQFHgV$Y0W-WOdVv{?w>Nm=oON}W=-xv%b%xihl$1KJ~gD`}V9A@?W! zIi~3k`ZKBNeV*$+8MLqIzg^P*VuyEvgXo=}Yh3<%#*^k+I!VPWouvEJ^F${uK~H@d zeRTtR>w5IpV(jnhxWm2(y|xJbws7S?t|?rJ?_8g!^;%^HBD?-f)by;`s$_Ax{l@xyHT}9+)&${|IB5UoHmi3#{Vjd{CTk$uiK*UBU+;kr zWo((@iSJ70G0~fTs>dZh2cLQ9OX+*5&%<|i&_j2-L(+uyYe=hG`)G6@?@d>^W<{?~ zys^}$RTf^wIKG_cl|28TDg#C2pU&97yPVLp z`nL=Hy*5*HF#B;Q1Y_&1m#NJXtDMGrcLnzjqnD%CHby(N3C!% z7JlxsCOdBo;oO>}c6C7~Nk0uOT)dy6Eke7ALL1t-^`TwS+bqdNdCdLcBb~0VQeJ4# zny8m=IS$Vcd}r)j!*`Jhp3!Pol{Hvabc2kmm-#00K$jV;`!P>Rl=r3Q+VKHC*em$viSN(pOq4a;VERaS zOr^OeX}0o7K4@VX_24AYL&~;nlE@>mXJxF4PO@Z_GiFugbund=uAH##dhvtZFtO?S z>-bNOR@vP*xLWL|e_EV^d|79;S8S7YrOSvz)@732A$FOJ4d(WnMJ{>lWs}k}%5=G8 zXY4vxkGZk*iIJ5url!al&z4U%)pOpp!d~nnZ3%6iLmZ#jHMVyeR;@!XJWCzj+G-uI zo;z2(Xd6?X+;dI$r<&+l?am)b8zv)f3TQ)Tdf6n;qss>B!@p7#{a!;c_f$GE)|UPJ zACE4b6hj%olQA|Vj)L2t@dI7~F42s6DceJvzWUd|OsC%N=!&inxnD!Of09c>$LE$! z+KMjBT&xoCZ8#eb;bTo(IVplzHI;Vvq(=`0O74S>-9`A*(Ps_xfzUK&)I*bMjt6E6 zO^x@|#cEX-`@mJ&ib+>4UOMS;{?bX$frmLy_tC&i;fKIw_&+!|?-^n%<>Y`Dg7>Oyo$1$Bp-Mc;@>MAi9QDx9S82llDKs{eSoY< z$kgV?Bg-qMklx09oy^N;wsFs9s-5*V&Ke1>TO~D}W$XxVsy70=4F0QypHuN&y&!gt z&_Q?zS~1?TqtCvg>!~heb`N+&GpC<}J>#IxyO8A@mlPE(N`F0B#(j+NgBDYa>GH zPYV7+v1Lf(d<=Y)_7$Dj3;GEV)ea6De><`IS8A1EyO5h63FX!n7PvSX>1ouMD zTY(3nyOH_$uk^MWbQBs2ZbHXpw3qXdvhC;ud1rrwEOmuCjR4cyGM9X zhrcj=e-J+WD6(1drvEZ~_bSoEMgNt*YGr zv)bgmOPLdppLt8=72NYJHlO6(b2e($-8QRZA9+PzOTLe($0_3pUYGgI@@XSeM0b2d zepx4zHPOqN_sI9G%SWbs@Fi6#`O2y1AbH}LQ;1FNU_S8)_VSy+HRe3O<_tyidaw~6 zgWa>a1RiyBzpMS7#$p?KKXZw#D06MufpqAkVTYw+2dIrpC&_w=vF0m2?V`wRx5cmhU+7xq$O-mHwM5oS2`{=zZ>7U=+F3%2kkc7CqFHg`Jb)-r^T|*MEvIBFHeV#<&*6< z8f)a4n#MS?JC%0XB*TCH2C=JVnz+G{7<8x1-^EvsU0Kotewkm3;I1=u&in$q(Eqq{ z&8mTJHcgv>Fa4Y~@eBn$?`SN}Tw-z1&Xx@Jh1ga*bQ~n_jb|6+$UIx}8Q&%S@iPl@ zJ{B1n8t241oH=@WK~4{82R<5XxBi{^iT?ew1vy=Ow^4`vDeP&+r5*brPjnw`u)7kPsV64zg#a){fR*f0F=BKB(l#MU^LQ3BhE(V{Ic$EtoI#9CK+pd+3;AZO;s+_^Q1b3Zf2gt zw^?Mjwej#h@DzRc8<6Op@&2}JJRgF$D#@%o}{lLBn=DRVtFMIHAjQORUlrX=bNm&C$m(`i9nPu zumIoVjqnvX^x!)^oI?!bb5UMtUtr2upDkZ`dk;l<`)Dir(w{L#dH)UGzfN2CBKJSP zDD!FeqncONTI2A!{KK2$O;S4(dhyS$@M34CQU=-teoH;oTFYT{UN8PmX+!tT%75A+ zb*DDfR=8(NpXK5I@W}XO-Y;pjvbTe^)eTk~`^+cw&7Y=~v|Gj(mF~&vJDjDJ^cT7O zGNyC!Mf65#et5&*OI>}`<-=ZZ({AxC^wU1yG%dRZILxh`NB9pM9g!P&KPqD*4a%wS7EA3uvM{kR<3dHX`-`n5wF(Xj~K=+KgjN;OBt z>z8OHi@;s#EV8q^c$3>{9W zjhBfGJbG=ew8i}i{Dpt38(t9q!E}q7aie?(uB^{SgR6t@c|X%CV@aQG89$>)e@>jS zkf$4*r0likGMrWHAci1sn}%QP2V33T*Bh_JJLl@<5@%eaKgZ6<)t@Oda(GIcq%A$r zIMs`7g$+^8*&654HgbTrC!lGNzR^4WY{Pbko@ryW5-BfqHpb!+c!SCLu8fYEbw9w4 zN~A9Mbywfb#6Ndr)rsxeeWlAU?>6oYY9l_imv?)5Fh+GDYt?Vyo9exaJ*b+V?j;R> zyNd6l&yqNIS?DLyoHkN}kCKX}TPDqrZ%)zsCf*UgY2-ZmbK17gI?%RY zyP|d+r;X3kwjke(Igf8dek|o4zzTRq+HfKKWAQZ#{?dMP8#=(Nk+_Nh9%C#SJSKc^ z@L0X@mrX0#$QU|@zoOtThbwn8{PlwLt|Is3(?zh9;(Ed*CDUR3d~{qci-UH zk+uivVBhf#qkl&X)xTq$wVWZ8(UD#EghP2JP;Vx_!d|_9opnheJf!UgE{{up3_~x@ z_&Be>1)iC&@XN;cLVp|mFXLDI5o5R2-}Y0Ps}2MEBkZ}03dh4$v!+UmL;U5v=m+%Cu4DAaUgEWmAg`nkyLI|z z)Ge7bE+;czVt?WpzvG<=7W$pK<@Zev_yL?U@r8&iHPU6Q3vTkQ4gAM5&yhL=hEK0! z>^XIK)wrDfq3%luc`j9!lK zoD%Jmxjf@Np}Ssd5m^&SyDx-G?2CiA*x1`<`{=<Z}GQ@cIzE z{t8~4!{F?!@XYn3vDWbscm?_JF+;u?`1}>0?ML+U4*FSqb~%;_GmepWY;;+GAVbWZa#6@ENN%pB=!%!RG}Yh&^nH7_@jk>wvK6ain2zTXZ$GEUk! z_r%t9wYP^mlQSt#z)xc5JoXYyBqmH~I4%hHHPW|!pt7Z`%%M+UZ=SM_%azzYfi1Qg z`_Vo(jpKh6q_OvvTXNW)ZqRc#d#QB%Gth}f-_4`${g(`m;auv`c?o%8@X1j8;cW@l z(ebNWa@=nWz}sDI@CR|rMjaB%+?mB5Ioh?J{j<_ektzRmgmWU1DZ;Bd?U=(nkWTAX|-F& zMD))RKkLGtvsW03)q5- ze{?=hGDf6-Mb_BSt1@?(7pNC0y2+2@v^H$c+23Y?qBA&e8PMhIgs5L znkNY?u^VKpadu9HhRxlBErh@HE#}j2p`YKvPR$h_TJ#r(x8ewaYF3i@%8HD!jhqx6SbWiR!N zwTzn~>j2&C6Tq)sf<9c8tUaBa7ptac3BR#k1|B7aR(nyjMI|J&Rw}S&0_zm@iLD~< z3SUqLbvfZv{I5k7wC@G}Z9m(PT>G=SWO%Xi_@SuWp1()s3XWOi|0qS~&Z$kkuWu}V z?e!N{MXz-}a~*qO7oA!5coDq02p>E=Sy{Nvks`d?C7>eJMV%&Weu;8bjHRJ z`Nr9ya#owH>l1G>%9+UCEWydb6F(s_A@;ftdjq=6`vh4bcAWH$ta(V=WQ^f^aGroJ zS-+`I7Ma!?wKW-^kgkJILC4!PhXcP*i^yK7-)BjgkvWmFPiU1@(CHZUON??)UsSH( zT$7|`_uwy*HQhRky{LzKE~GB01HY>?ll5kiI|92eNBOT7_(!kJ?R|ei?iek(!nS8? zve?D8NY;gU+U+XduGJ>jNW8wqwxXwAwKTvpGJm(lvDW!pXj+}$(tC7-H`=QFx<5Ed z>G3Y~O?)1{)^7Ykz4(JF@F8$!_O7l&k?aR%KOM5u@JqG+K+P216+h7c|AJEnXKa6g zws*tZC$Ge=v8FVA1|N*4&+}*_fiNXCl*?NH&P#H0E$juTw5g~M^pqQ5COfa^jtuA9%t^>56F1b6YV9pbmt-7WZY_Ika=QdtI`k4QU0 z^*YzN9*>r~+2b5ucOAY5zct0_&Kj&g(^P-i= zuh72s(VY1)MEja)-z)!v_Tdv59A8!E*L|V(H4W3gcKWb8ez1MjraBrPQZv8B-Yqjv zj*WHAk~I(E!IxuPv$_}PK7r;kcF`WsFCjZ7M#dD^$n zr^koXPfWmU1Geth$F|O3j2W<4Ox@BF#pf8(j^?neZ+i*Bg^72vE|3YmnL>^tJjnd}^P2J2R4Vp%n+S+=z znrZa=&2vZPOvN7|Wu)JwFJ7KADhFLZAfIkNy}>4Om_`mZ>j{=cR!V@wMj77?TJU#4;XGX68c`GF9vcJmY(id;8n+zZY6 z;EUQ3YDp&hK9Vj`K6Go{3V353ywUU9puQ8jNn0{aa`I&}e&0O5k8IQ@`bf&jdYRDW zizXa6$ag>{3cYl>$ehQJi&Az#E|M;C(dgs2NajX-FADLn;3?}gA{T2-y6*cX997x* zIEvgbXd-gafM3TraY!ASqb|G-2mNHO<33XzonIxcB*dq(4lnJJwi|Wm>+s-h(A`Hp zhp};DE<|^9L=X4T8vXo!#;6?TGKu4W@g07z<+t%o#(oVkY}vAA0YCJyr|kcRuE{mE zx9q0#`SL=tcsO0N9sbI|$6<`eG*ca};`8gsfTqIRVjCIl@5VM{jndg|Q`sVS3>kYd zTdt3~ZTN7Ex>=hYkb7mCeZa1~N zA@}@ti@cRGvR-89DzR4#yhJvX(C@lzAQp2#=E4Jp%$0HjGMDs!#TMm^-2q-L%RV2^ ze}xCy;nBvuf#&~0rwQI$)TkWMC4%pks8O@T&Q4@KTH1z+gf`7-<-Uq;r8?v}Rh zCvG8?Ic_0*CF?1NFA4J1cx}AHZi4e>7#!mGbh}pI$Q*vx$#ePp<*+fZkM~~22sWI| zx$FKM_Wd7W@1qz8#uyVHl#IKwA<{UPOkc-sgTKY^Z_`>=e~bRn{rHUaV47?7mQb4L z1|w}-D6J5gZKTOsKq2>1%DaA^(lEZz*rfCQj#B>(z-W^@6o7HRz#AF-{V>045`w>f z#P9B5V86lqNB7S`FME1$T?(18TIBO^<#r?tUG9#HlzTLO=yH1e@i2I<95rOQ7Gn+H zb^)L5<-|Cj-@evdIb%)yv>BHNURv!Re%}AMXkJV!L^h2F5(=PlkC}_b_keO3umcV*dJwOpHu|y=GW3(Rt=!L|yn&x^hijB-d5<`awoglE zx6M@9vKO$5c*)Nwv!6M827Xr~2CZ$fDp?U=Z{QmD{l?zFZv4*@|Md}f7x?jS%HDs8 zKa)7Hb@=OFV2@XY-J!>U#o*hLIItK=BYs8T%ih{){MUzKSn`NNI;z8pZAvE&txU!` z>qrt8mFHt0GGo+9{!-%5PH-lWjlH}&o}`KYn>`UF!oNnDXj5F2%mLO|$IcL5T4S9! zqfLJ%&XDuj*5I@MHP1F;bEF-1;7JTvH+EPrZIl==&Ndr}&p8cF>xj{Lfjwq-MUOrw zMkj`}npD^9tMIMsa0D;ro4U*lwZ$CcRZa}ZHCl=@Nqcgu?CDrzO`0KNRqB&^dZO5S z0A04hN9%~`tAp1%v1hu73D1T{Bwlx&Wn4uq{d)i2r;?wPxS;)4&5otLrK~NO^J6FK z`Ae(%g1q^_=>`X}$**Njffxjm7X00Y;gTXK*aB_a=vG&9^r6WliJMm@HIvN56jw-bvZ zZ8n#A##E*TTaQ{7vr+#>^4b3!nE7u!Q+OW==QW`+HNX{` zud_~A)dO9LnM!_;a%-#;W?W+_%embOT_T{%24ig@Yv}wf! z@cpVOUpM&a@+d@WGmVN(`7+4&l?%WtH05)X@45@*`=Tjd4fzT#kT2Vm&kapxUm%~; zl&_F{S6m=pnkk=+d{ZxwFUgdz8~TYZ9;)wRO!+d%C+pm1x^dsa0N)hSzcM#9e?QHh z8e^Qiz=+uM`##_2 z`Th0F^W>SCy+7CWS!;dPXMNV?MP&BJX=4vE+YkBuUhU-mdhY14ukKre&aDR7>^;W1 zr?$R$e*^wc>`PQ*5dbFsi7Whwnv*y9giI!noe73L+IwmVB0~ifvO8=^F4EP zNPa4I8Eilj%Zy^Kr;VXwcnt0PsS~fSc;g!LW2#>o>*Mh}qhkvELG|}w7qBKJX0iJi(K04zC>L%7%gl+cY#OBnv@TbU*f0s?9Y};jv4eo3o4V(shh}{M52;SoB ztZ9edOJ0g1hs5Qj`SewSd^6-m*N?m;d6F|~-+b|IY>1EFeFZYk<;XghA@f{{>@yqR zaPmqx9kwqxd&T<4v1R(djjP(qz5fB$-j1u9lJdXCRlSAym49H4$clSEO2>xew0s<2 z#cxOQpJGc&=IX*<0$I#IbI@#fl(}s~mn(Zwd>dQd1Ri#rIdJqyqY)l^FcxE6IBIg& zegWn1rD%!r4tRC+RFc1$IxDg3v55cSAvX{>TqQsLG|FP{8`)0#*^tqYBeO3`e^ICG zQe*X8X?C1{V5^saMeS8oiL1Yy5tMB>XNE3VXK)UVo5$Tk!;N*A}<3Gb5?>n@i zI`GA+9d-G|NsK`;RNu1ec_M;Cd|gd@T{DRfdn!-JN9oACD`W9+X2>pW%!K}9*O{z4 z@!jrrBHNO81wWfC<5cXL3}lMy`i3@aav1gS?W!ui%g5jK6xbe8et)6lPhEVIDTguPZazS3Ed?QaZia5D$J7YsIoqw)8T@vfEM^C=&~u1odX z_+86y>8zFC$w2op3tNbNlqC)uer&Uu%PerXH2l%XOZ#nKFYxAcoRP0r8?gE|_xi6s zb_KRw#HqOHiUGwJM<%u7_M8J>=mJ-nYoy?<(cq00`GNfIpet}=YbWL2V2u=`oweHf zU%cz$SpZrbi7%$w@6C?U813K_b!r{rYw#E1IDB5LBi8vs;qwr8a{*vTAJ`Pf@M?e5 z#)HH2X_s?S3ABj_f?rd@`fs-T{B#VY9rPE^31tyC$n!n3p@(vwU8Wr)$MZjCgXq9j z7K=&eYHqUq@3c2bf2r6yvL9@QSF2B-IclnV0(~Uo-;?B&6|LShvf}i)p zvl_7_X$9XK4`5Tm=Ol2d-W)d;S=Fz6JMsaBM(sP9o%>I0P`qB*kYH~Bu2}GvIjxnUEhh$?s59?GAHp$!EG;dkuTLM)^GZ? zfp7^t+M8k(?Hp|sO5`JX$(fc zRlSoi%B*;rbr0eTSA&n7+EzSO_=O+5Z&*z(PU+aPo0_gU6MW+Jb~sWGKg?Rh>#MGt zGix(@r;FIrr^9eHO zb^K1mtHqy!b+!%bC)f`^a^~QVX-vC;It3pq_At23b8H5uX>QL0+aZUee)sSi#@Fs^ zpIU=#@Rao%#!S=uLEBgp`GD4zNCkipO*uc47aPyL{EgGUC2+-aBZpcGifph~?rxjsF7v zOZi`37RjX_&5Kwn5o;{_B=Uk}?AQ7KJ?p1&r}YkPD8zT~j=#m?`|8fAf$u@$7+CN< ztz#{)jIW~?xCeo!>W-xD1Y$a#X-#{8Sv+Q^^mc>rzqaE=MOqiFUA&EB(V-2*C9v0S zC!e0$8O6%KIrN-k?@{z_A714`<{N`{Y8?0rm^q$nEge) z>D=2A=fOwL+L^9E!%^a|a<(hXU@d~H4eU+f-+wb(e9&7^IPxnqJdS7b!^wb$;Nu*5 znA}N%Q_ag}I0IPMP!8TwsQ&IJUzvPs*PpdMH|A5D;YSXPwJSK$w)m{@!!kX#zin;A zWc=mG8*{=((OEJtTTV`lDX-Y+npfKy@KtbbV?U~43^qM!4#Rk95pMvyqZZ*LJONoh zP$Hk|c>MX#%f%#_p^3WyFa1hJQ~W^}YqE-%OHuB0L^EMW0?jbDpd+=YY*hYq`B7_3 zKgN%{GX;F-U85fuES)g6@nO?vnR)iSWj~>EOIcs7d5}2YDjPkXJ^efUekc!HP;m1{ z*uMv%fdTRe%ye3P&FI~OFPkm+5B4GJRu#buX8}|3PsP*u4(nsMjBV1P=0K~lSV6J9 zV-mEczU~4RzPmk@woMGSCwk_>9%Kq*svQmf5${&(#(r9obz4l=LWC5CJS?6zwepW3flJhE7Q3BR-+wXBP9y)4O?CRso*Ok9_@`vPtr2bCyaM1nIGM7=p*p{jd_07Az8cW7uPqAR9^=IwVwC>Rr zb`E2k9w!28!ChlWq_MMkE`N6EyFV`%(9m-u_6}Z`0iI@puUXLBh?cngtK81Y?JIdn zxlp{!ua#UDZO#eBlJSUJ+0nMrvA5NE@zz%74E{IH*x@|8zXv%v3Jv#6GzL=s=fnmV z=Q-ti@GZ}8>A8rpe*w9bJAq#8f8o7bdmcerO}EML_}Usap@vG|Ij6UJ-P*?-YyfN_94#2YOQD-J_ER>G%v2=c7WLmW9p zJDvH{y^hrB;@j{9=+N@ zeHh+U<}ix56D|rOqgcSgh0Lou9@DbpLW)00*C3f(>mj%%^ILpi4LP9ImqlC{)z|w* z-gA#v5rlsv@~zWzlW1P|{Po0!49?c#(It#uvHgJAHl1r@xvw)KllGrEXGkKPXv^P0 zTihW;($VdxU&g&I&NI(+*0spHtubWgqdy5WUDM3|m&Lr4*RmO&=`oWUpyw(NcJu-E z?#;BH&G}gJiE<>KLtZi`^_Z!xFP>E|A1gN({S{m zwAjnuKCplo2i?S1K<;hD$^9cQ+yKUn0XY{4< zJj~s{=BPFL3D1J8lZUvTUTDRJ%gpY4Ab7$c9|$TQ*DQa*2?e!L&8(}};F*dTZcjy3QI&HY*CDEtuY z)z;Ujqq1)&l-++X^We-$1j}jC*^3T4_!my1+v%7~ov-n`oKLl-cWUc$e(O`uX7gQr z$A4=KU*_3QhRa^echSp5(3NyFM~K&}dDR@NF4lf~r}F7K)B9GU6OeAw<1vck@qP@> zEb+X^r|0;nV;+h8)f(Q1hzZm|&bp$gBfsb-%Eb3jdk-KMO(}g#PBFAUArIBs`;)5= zxK;gjbWD!pd^HeUg4`uJ2U-YDM`sEy1fkoDs2`U_kCN}O6aN;qeI{8n$o>`NT&R8M zAUQ!*4!*FY6WlQ*lbC5!jaFxi_P!s`mfrio*W{(C#Jls>)$x2j<5Kkvg4s9Y5%-Zs%RcuRA^Mdgnm4m@`is*ZOx{w--Txd9EYWMx@CeH*8AYO4*PxGWT#zbQQ>{Da)x<03U6L+ z-4j;X?@w4}mu)`x-muE9p4=E-RG7pVW`ut=;gWFMxnBlOv9So~g3;&wD`#eWp7Ap1 zHn$ym&iHo+bFfkZ{}505BI~jTf7RKkU2Q=lrMvde#=o!TGsNdvrf>BX=A_ko(EIN+ zMn;Iu{BbAa)pv_?alkOTZ$aKM@{PTJcb*loKT|qie>b)BbtC1<5IM>EjJ3-*Y8>i^ z95CIU0^{&*{AAUq3mHfK)*oKBuN=ER-+A2|_oS7F78kTedJx^f^h~ z3ynaxeim{%b})Ba;p5y5Db~FYJ$DpXZN@KI`+UL999v)B%Q<0+Xc&JU$yq%sIRh-z zSs*;vjH)|~x$xFb^6~9oV3eSn>LB)O$)CUK z2>%G(?nkG|(ued>>hpZY1@E={{{{UUe=)+kTh`oL z|34Vd*`FFuR>W~~JXw(@jpv9F&g5?6E{zd5eIMM^{mN9WF}N`uoRK{11@`ef6x|~o zzq<_Fy8;}%99+B%oV=7f%GqIX)IKl%fcv~5C+FSUiP!zjF_cNAY#MdOg0Jb^ZxMgz zhgSHl4X*Gzo0311AM|=~*`iP3s`B1n%UGk0HUDlcH$ucXf3UVLSgf_5i5<;+)}yh` zTP)r$`x^XB_C8H4D&X($qh2d<%@hylX=IY22iQN5QxDNrbVSp-gHEG`#cx@&JS(I! z@w}qt}zp>|&<$s!IU*$J8O6Vi9{7>=wN6=0UXB5RTZ(|JEu3^3Av2IthwpWD*Z%3|o#&iLxtZkw^ zB6=>EJGw<{^(W4tO(TpV#{81;p4(JssLEZ}W_x9|SaBiOhr4{xmA+E~3ShwJ6X`{21{uakXu6|3JrrbRMptld<5 z3|nYR_?;*p3|2qG+&_I6J2X4>WL(4#Z5??mH*nj^3H^>#W8fKL1mcr3{mvv~I(Y&I z3QC%;xe7afgr9BP*OuG~Z+i$Hf?nKTVDrA%U2^`}#`^8(Uyy5-e3Z2*cN+X{2*3AI z@L?hCJPRCW-8`zLXQI(!HgSeQ59={f?VKC-JlbRLbT9n}Y)WcID<`zEU2@Tx>R&xv zU;GZ;fUWz|T3BbpQ<1ME2ko@=L0|8e&TcdCWbW8+o;v3aR{ze<6QwatmEKf+vVRJW zhrz36?#2`2Q)VO)M}xUbPt?Tu&_a&!z&Ev4E6_>mj63eH=!C)R^~il6lDC#|L|n|_ zLEu)yKR9B`mbz;uH%H`U=uXeIZgE}B>F4bX?jiqZ_Zzbb)!MITZiPJ4`RuE}zKQtv zO|+w2s@e-1xMNT(-P6z3iTIvQyHDAAk@U#zv~fFaKE=8v;?U{y7j0Cr=QK;MC8w+4 z@S0mZHY4)V`zJQs0UQcxLvwDXJ>>C$CZ4@CT;@LdQQ6LYQ9aMPkF&R@871II z)g91<4|@)4v(X|wjPOppU$`4zH|1W{d09^xUe=d;4m+oW=$tdofM{K`X_%Rz^T1h^ zV~wxL;m+wJhs*ClAHV%!qeXVI*Ex-%g|vM-%`apR|2&%CjeJFB737Oynm+^PkR>IymyOe!*094-MJq&*k=RBQ`5FJ5Rc##O)w|Lndn<5&d@ld-u?KQH;GHKR-W zdB0u14SvUY!HcX@4RaFhZ04*f_!S%)-!jXa*3t?5--NCQfPr{Q8*TR@SBkg6Kapc% zeC;T*Pdq=>^~~dpI?4=WYt}fXr#FV}%smm^b#flF>GmV`+gDl7A?{XIF#cx7Z&rIMS>oYU-ufy+= zds>9Yn}+*TJM*z8Xk;&F9BT~Ji_gH!qc9xTF5Pg{fCug_Lq-G zpOToDclf==9xqeM_{H~x+tM|E_dWRDgfsI!-EWuXtR1F-ozFOae-efs?A@J@A8}BY1JB)WK zlNq@I+`9ulEuB*{vOuZP%GoE~-wJ=sPwFqyT+1~dczAi*6$5j4-}i-a=xd24SpMWuld}QlOghv|BrLUjZAUxj9d@b}4o@wMDgGOTSGa~Udiu2arQEq6Z zgBb3dKkw!a8QK4U(~unK4>Pt-=B60h&D_mJJ?I7XIXh4?hyJ0zx-Mj#1H{hp8jiz? zfnNm-=J3D%vCHA_R}7qUhPf|C59DR;wZn671t;0VI$Y(eujzF~me4L@JI_+McVgbV zn75ua@oYIf;xEv)V0rxRImiuVm%W`0s{v$d z?Z=MTzC(Mb&3>xN6hBwOYdE$v*a#vUMuE&E#uT4qPV%q{L(AVab~$UeamdPma_@Gu45uFTTT7|H}%U#K_|3z5MPEAGdZ*gdtB~1s*=g8 zhP>~-o;m~0Bz#n4$EDmaPUM|&k*EJG8AN9p<=D<3Ut5;nQ(sbi75rUxHi_>p;MsRM zTkF~BcbxGY+MjnHF4=E0{)ohQd!i)$3eLw#*qAtpCF+2G+WW6xa-jFp(st!S4Fd<= z!3L2Vg6CkD%vhe|nflg#djD(4+vFqbAunc-JHR&l$AeRW!SVggKF;6TKcLsDDDp)* zbC*r=k2!1fJJ9{{zIj0mUdhO#-^*!z-nN4AN}9@yPE#gm*$mod=vL$<07`*#_WHt2ii=$ zkT=n3zr{ZMw#VrBDlmU_tYCf?cRR5>h#v!Y?tzs*c%Tb@-h)mg>6H^36xSV^+IEjK zxBDLKe!SRw?1aC0XUAZ>3mOt^S8|4K1GX8!Hs8jR+{hy84^sag@+{Bb***46PgR8bVvO}zq8~AtpY_!#W6nm^Z^zuh41}I`09-^Ic@EPx^o(cY<@m>t0|O$N$7}A7I>)aiWZEC%Gi-I@nIf z>u7EZY})bowN_d;gE@=dnjAA<-HEQ;t~2XYoqyI3IYBn;gy!rz!!-B5jjfA1lGFd+ z$T;G$nT7*+%XZX^y!yuz8^{F$-_`o2Lr=5d-KAmC#!AJcVP}rtp48 z-9_BroH`fB?VL|PV`b9MG5U$^9pYbE%**M&o;JLJUtNO?|HO{E>BZ&8$LhYcBQPSc zb(X91lJMj;*Q`ouU9xJbv%P6n(~RY_md{u*YsHKQzpOj;7!2ZjvHT=I&qwl94%FD( z#B+sTvhkFi^&a4>eQ8){2_KHxQN9VhC3C#`cjOfKtK~!{z&Ef9UOC^ENwWRl;hpx# zPWWA`*@is@dJ6c-Th45ME`F#ey4UO3>22<1AJ*NNeP=~E16@?2j?tvOGxaGLe#hhbj4n|*Yip5b4>}z!hn!98w!k;SEA#q$8CS{& z`~OkCT=BXcEp2Zad$(+v5e`3fN!T<4Tkk`rBS&lGzE)SuFR@+Myp$Jov-6Uz|0(-D z+6=G!*M8)SzRl3r@w=}M4_DtBeaCIc?RxDQc0K$1UlYDZ z6TW|#@crD(Q|139;ron)@7E`Mza!!M`h@S#Bz*5l_&%8MJwGdVZ;rs6rzDE7TIJ!<)U`K4KXx1*D<H&w$xH`9|ECrSn93K9sRhaHj9sPw$Cw14GR`-x2r!PoQDxWMmFEdrb;^jkD!O?J3-ySQA>mVwt!6 z8*+orY3!*c=NiL5f${cUU<~{WA2{L*=^nI0XKNp8nKKYrSIck9`OSZuFi?X|bcWyK zOIcD1j;*@N7#k%Q0R8~VZH6C=egCu8xMy68bOExZw#exiU=LPa;scdti*a__S8hlF ze&{Ruz#Z2ockJ7x{rbe;Uw%(?@nYbq{NrQhO`$G)WUX=ixE2TY4;qhfs|h}>zKmyb zTXg1@TsQ-HEvP#`;)ZB{>sqY+Z7wjV02bxIWDc+?z4B7tiy}g?X-Lhh8?oS374f z{$yWdEXpUglQEmAjd|=PYmt@KI?kSbTzLme&zpUGVgcXD-{>$Fml|k(+W$CSE~ItK{Td{PUGxx(^%KC^(w`m+uZ5!1gk} z-!Rgcr@XehGZ2iTd?yxjN3S#Ak3FUvyP1Ym;uR~8>+7s>^a7(}Xd-rUj$HS<n-awwU^2VgfIkaVh zv*+AcKN!JJM!JqK<&k$182LvTV+(Gq8}u(rO1I0o=I137qa1zL+WO^J3`EFZT)xaq zuP-oK%E9pnI@JZ_+7J&b01q%^$etq~LduqVk}3<*0>^9B-?;gC>Tfl?_)KkqS9V(q zhufNEEwtMz$XSqgaAu&xK^|(=-2vRFUzMM~Aa4Yp2gx%~js2rrF?;x4Kptg{MeA@7 zc(U&79Sic3)-KGONSh12H|M#2JoIusxtsC13!Mvomoq=#i~i7g%c}?bW^=AI?#gqL z1A7b47v_8;?>y=+EWSCfp!gekKH@&>-a9{kVcsrmFq1fki>F*#JU_3AXGz4l5-(rP zbB$#ZZQZQ;d`B^sRzN#$?1ogIakg9r|IEVf=ZlQjv35ybfpJ zB5y4FhsLO}onUM$$a$->zl44h$Bt~C5v^pME+)P#?F8{@DS*~@V6!sCT z&o#W>Wxz`7-IQZgaoNzY9Ggw;k@Kk=Z%_W@efS)z&jS+!9Z}+e<1^KuxZu_1xcc%Z zf1+4ub!BOra*vH%eZx%ba>yCi!Jg6KX5U9Q+fl}TBRh~67_aft(6$4N<=8}c1Z{tt zXEiSl*?u94gN`G_`eX3UWAtS)4x3(~Ywj_#_5K;nnfIOKz7IN&=Xo2O&wZ4%R={&3H%v*GkFuSCrR^Z9f9R6)_gX3 z)q|ox)7&ph2u5Y*<#y43?5fR_}YX1IsnaRe_4Z%Vi2DKeb48+%3M#| z!Xe?!$Ic?j2Iv~h2z+sU3I253kpXu&#`V=jdKI{9?pi0tu-!>q3H>f`PO!^uvB%|#Ja3mnCf_l%Eeb#EMQ>OJTnB060P~bD zS|ze?esX^iwp)Jg^9Fj+(czEPu?ZYp1D(CkxLoj>IL+EQ9PkD5ybTY5m&y-Q{>m0~ z@y5p! zAInzk&R>C-ONVz5JX4N>0`$IuO9-8$@PDo5%lMBmKj{#SvyAP}(3Zw7JDUT%Q{QJ} zPes1ufy_=LZCdU0VoUcOl5?|>mBHD1`r64jJ?4b^WX{l~$X%)@JwhgH9MidYBAUxx z_-{QzAF(nM>@q3B2iB(bsiAEezvPn4^can*oZ4|xCJygG_8`$}C;HB{v^Bt7GzZ>y z90jku_?P4pKY9r;GoBpU=FM(m-;dq@6`*H`=emFL<)IBRJC*DRaoTKHNKaPtcs!4_ z3t2Y{J(4T(SL}UzkdI{VCR`t=P71$7ogU<#xc>Z2em89ecYu}p(;aaZ{G8l>e&NtK zf{sYejfQ9%2Z1THS~vr~WZ!cz2Hk-?Og(hie&M+0lL_tL#CY0~I|O6ys2c*%#EcYp zzI4Fu{-V?0ts*b{Cgh%Ec*QIK@DzWIvZA&4dU46NKL3|P*f=sa)+ahK_KxJ59Uklp zd+-mC4W3{@JoNRQkFfT`W#esppXUb81t-C%9iHpuK3OnD4z=wLglB&PH-rPNn`32# zZ?(+PgB|rE=CK_eebY!|+CkPuZGDwBNW>-4?xXME7Ym;ekCIP80lzl`w;tABV_e8s zTK|pqY5N|0L`5Tt!KVJdGTfJFQRC2>J^K5hs%PLcDi?p=&TsoYW6(UrFEy9#!{tO* z7V@6pTE||6jGnRMotO=g&5u|M>-Rdx60@}Hq1YRkI_I*WDRg^+HeYnISDYl7Mrhp?lv`^&5TX3JUN$09bJK6lA>OvyTs-_f zllSrG!a2RWg?HyEUdwa;+#l$A41QM1JvZ{w zKreR?I-~oPCj!3|i#4k5G7d`?OR29bcH>7`QaUM=f*gTQ3Fml^{}4Q3HF^&>@4L}^ zgeY?(_A1pKJJYcL>CR*=^iFza@_$6?h$)9wqnLfJkiOW{#z(q< zoj$$1V+{{?YTqD-E%ejSiwvOi${Vcpjp$+)nd7@ZcBfQ)LcSt5ZTp>p62%;l+^qIR z|N5=?81d&(;=h%fRk!*8AEMpx7qcf;Qr=OfQhS7r2G{I1@a5?aLkUnn)^jj3N$e28+JD5o-Q^m{ct zzJd3TElVB0$GpJ3b=mmJG4&0_JDBI15u0+~rTmU>7!9ib9P-E*&Va-!BHyjiu+@AZ z_qW7Zo`N5{;L=9Uo+xWr%igEhh|nkckIOmJU&fjKQqJ_VIn&SLOkaxMXDl`ivQ%L+ zcRl#Y7K-)*$er5Py2cw-zZq{#yNWqPoh6)!$GfqGZN_dgx^JOn-LX_} z$MyN>r)~&8V(_Gi_Gd1K_JFN{PN;8v_&Uwp;{}mrzPaaAzV_?!b9@jSm2Gh`vTl4#-_RaG{tMNYAJ+}eoQetP`gE@T zHa!AC%lVbEVOcn_+ZF zCi1`=tv_ta?PM(+NAKNtz=ItSK5Kt^iF>fKjKkso-n4NhHbN_yulNHzBST{mP3SW| z)?aZSldNO}xEP;JDu1N8ghZ=6Y8KG4GhG;E+Qc=TWS;m)L{y)mjYC$OZnQ z-xB^gS4^vBeBJny#romQ4ZQW9z0qI77>fD+3FWTgyT9q?1LXxS<3?Jp2nw?_t~#9F7d7?xY*3OOnEbiDGRP-M=l0N zZ@zE}A9Bd4I)?Z1*DPagoBtAQ_Z7UJ8-qcPe>~@+a^Ss@IbBbiKc(%9ot~)!V`Ss0 zbs6W^_x~8Cam|x)+w-i^C-W3d{nW6gH#?yRVs%=kQItd6LE)%uLEhJ1McY}#asMjg zJVp!$_5U~a$9(jW!buPCF@aCBXbOHclXWkJro4%}fd0M*Y%J>GuU6qPtomfi#pPGd59{Ojh5?0iduhd# zeUbLB=TkA?MsYs0pcy|jjbBfs+8kA%3~zF|Mup~bmoO>SSdzTUAA&xru0$7JjZQRj zUU#m8d5ofsHPFOX(_Jz8TSm)^wTs%{FL)vMAZOIwU7q&c{N4kPdC^P>4NCj+dm;0E?A>MSUep+kF(JJxH^+qDcaDyHz}lucQbPGF zx@Hutcy>nG3hZ(5hcZ?ypJA?8G2;VgTEqmuchFWVcrFbm^)q#^ zVN8S1+cq7yuQ1XU(wAa3sGKe9W4CdT^YZtw^-ed^-DyF7J7zS?gln9H~7R$qOG`Iz72YlZvJ%J ztLVtf2CpXWwLx9tdsewly#KP3B)5*(l@{h{2F)K0RCezWB42cmfA)*mmQ3e< zp7z9}W{$&Gk62yOPbHhT{96TkIdw|P&c1Y?k8{FY{O-KO>?pf3wX))pluBnX0dDWkG{aZaUmc2cE(v!{6V;_LUdF(>ZBXT_B5y5+7YVLB z+qN5+RUED^_VF9OP@?w%=s+^(YVw0X&GFK3=C(I7Xd0@b}NF zvy1*E2iJ3snMD7qp-IK95MC#K{w&&7E1@d$` z_NC62+J5w*BRelKxho_{jrPT9`rYdM>5R4`Hx^stjInJ;#uuxvR_9-4m{)dQ(&KEp z-#1{cXOd{o6ezeFLW%Lp8TYzc+%z=p7b)N zo-y?C1$>#s6X;{|vP++GgEI#&a)f)_otFroeYY9hBD^wC7Rv&Y@nnJ{Y4sg zl@WMK*|HJoL1ac3@vG&FDqtk+qF@$?esq#PLNd^&Tfzx_h^vUDC3e-i;nEA2wJ15Lo9kox+Z%4Z*Uf9yjY z2c7N;H)rg+^KAQuWcL0J_TFjdQf89N9cnbw`z`=(yTFzDWSs-teX_~bc*aoX{=>`G z-HQEfxj8CSpFAq`ekuNE&QXz`?=@~sJ;ylw{&M8d@6Fu0htK(pWs^C|mZ>K%t!rmp zo|H`e3!XRf*~l3aIYawn2jiHgyJ*)7)$L*Epb`I^&AgLb$$r|=%4b~GJ!wn&oNP(Y zV*eV+K4!6>or(`Azijr9mt5F}81rTyj~@O3J~SgDVf^N}^FOXQWfn3Fu_ZV!UY%wk z$H=DHx6D(y3cS;EWL@sTOuwIJ&6YQ@`{rK2{}y+YZ*#Bp4(s)I>|)Ew zg%X6HcnSg+&SxDZTMM4(jhLb9le@G7jnF zyJ<^n<8!7?)i`>uGdlhUd@SQx7&z$JbxxQ2&pdk!f1S_Ttr`A&_@EKJV&FfH;a_z= zHYMPAGdf;tep*Y$Yq$4*h)#4BysnOzu&$;4e)%$}&&+~-xow=Ot!4KU!HFo>iJSSv{k-J+JM#Dm zeCJO2-xM#$+;@n46|s{*|V{h)`+s;|Gv_0#Vl=Tx8r zn*@LO4rd7Y)$Dh~?FrK(1+0G%y`90>7GNLN$+`ryn(P>CMgYD9|BBxqZtli@jIvrU z;k0c0em;0&Ln>`>4v$DTHjQ=KZm$!*!k<~E%+IV-1^9x_Cv6q}$VXVmBI)af?FSR> z_~Lf>u6t4|tr@A6Gfm{O)aG2RmF@;omgQ7VVy*nFmFuRQO7^h^D{U3eCsfv^tEpcTt1YW&vWdri-N`nMFwl2^^q=BpNaS@-?2E(ze*kk z(PGAhvj*04rV$;wE_4itf0gMuG-cx|w6x0MLO(M5L0=|v0(8_299KOoUG-T5k{=B0 z(XRrgl5=WUi#GAcYiA#CI?Fg7@9zX~6^-3NU&5K@Jq}-tb}}MyzxmJeSFA=g;4dN3 z5$k24pCO(nxaR7!h^=dx>D|_~#$I&E>qL_}`(Fyq%tVhHf^YUF=Z2Eq<3iR#Z1bsG z&VL{6rl3Q59-i;PS8n}Sqeb$9o~_|rEt^&B9V0)iTipJw+QsdA&{K{6Qp!&ssGHKh z6?yMA_EzcN_JHTozv=mV@KKF*Qhc2AfG_)Im_B5q%AFUuVeDN5O~h?gPd{J&Z@zEb zqxqZ93ak@+SmQ$4e#uubzQyaZLS|QWyJ$oAsAN{YU4-e4}QAOL~4Z$=DuW?=KJIS0?!N5NjSj4ZkuI@GE=<{0g6jUr&ER zAB-uVc(8H&YE8heTfna!;MX*3`2E*rA8(yy4jz5=tO4B@>O6iEWq!guH13jxY53e^ zMso0@cY(utpQd>*&S~fqnkX}m@+!k!wH=>R_wv^P8)(bE2Tw;=5$7$!FTwBojB~8r zo@@ZJBNdeIq5m4%)cDz32YOj+;o4DhB4|&D?*+d4V1KRlf_iWNRQf)m@gcBruV1Kf zZU1Cw`>WIy-%?+vuYcU1Pw$7Ax8g^9o*$$Oda~&xztmTZuDQs}aZ45&f{wOGj=*+Z z-^E8F;G6|riN0G$8{77;Fd80OV6?cQ58cO%<38q`kM7+U^Govh@7Rq$5qA*iW?FO? z1HZTJ*1gLb*R6)%UkeSLeiwlrL3a^p9y6zYJM^@LJ{Ljbb@UU3*LII(A9JDO6`jH3 zbhcSzx+}f{jqEAt&V9ig<=={bR9q*%^}OeEC(=f5vXQn!e3kufD{|2Vtc~Q}MT}uF zd_eMRX!-r4xi}Q`%ui^a~-jffpWh;DQv13$d2X`he)_l!3jJ@d1 z*R3%#LyH+Ju_Ib`a2}Y*I2WO>UX8x`R!4GZ>nv;t_}opuw?2%1+L;pRM5dOWTC#L2 zGWAwIB~z25&6cSvhV|52nAam(1I~cjXC4HXPU7A0o)#rW^JwOlxR>2Dj9;t3`IyY{ z*}d$?;Jonf^KdEI=DkhW4HMrK{u`h_d;e!1e&*&flkr!O_Q zm=s^5jIp1T8#tR7)0%Hk8>gR}V|>1;Ba?n4vs%)l|C2tX!%OS~-t&ayP30O1q90G6 zGHc)n{Ge96XNqIsI^bi`eOYtk>4G2Y5cA z@({MVkMmB?9zZtQn)buoHTJu_$_J2*9!EBcm5)7t0omw&WFzqn?g{Lilxu*K?#s5C zDHWTL4e?3nK;Kujn6c?uHT$#cyccp;froX=tJ`I_qi4HR=Kkt-jXgAldsu#thW`YS zfh_7hdfv|5Sh*Cxi+APhtGe?Mzf$?ltQ&V_>Ep(;J>>@8bxz>=i^;D@4ljS z;LKA2&Ipg*1y|}hvpfbqJPNKX=F9`{Ua|(8lPzW__kQr>R?a-!VJz8duF37@*<;|y zBHH5)gK{CB-JiRLGtU~%H0aq1byslEnc{b&H&H&#)wxE8Ut^+QKmEGt_YnQ+40S*K zhUj-L{Z`X&J#DRKJd5b}O8Tv)U-c2;+5PleLBG}Xt1(9yPl$f&>9-HN7W60Bvb+7# zaS0D}Ho?cBQuh*)vvnr(^Ii8KZgBSyx}M1e&(mMBJx1a@=B^^Y*i7iizQ-Q1inI{K zZ=h@i_Qk81ho0A=Q(py7*ojfOGOo|R*x?5!PN z>0-`vnR^9vsxwJB=MkMZuY^t)0fSm#PywB4uhqLNp;N8%T;A!~B;X+%d!PL-uW}Nw zmp)<+< z5x&jnV6i1L{hm3pm9}j#{gTyue`vX;cEYchSj&7rweLHR&wo(&gf`reeLUe-jp-t@1#NTNsrS1+# zdVM#0p>%8O&&M{Ga(Y%r{Xafi7`ok&S<#GtUcCNP)kj{&SCHHj#Cf5OGo2|F-ze%f zjvpmv;+bL`iT)38zbu(ech&Hn4F{Z(*NL$PU-&t{dnX2btFdRO=U;H1!}?p9_!M!s zXH7||>|@+6XxPs=1>0n|_>UL65_po}7&a5KNm)a8Uucgm4#?|D-$9|ODa^y)p z9*=EQ@Twi#Wq>`Z#fOd-9#oi&ZC(Mo=!y~Myt&|OW3AD!`oh)Xr}aJX%~LW2`7c-t zt$*Cl_bcozK756J=+#|2eIdy!`Y|`1&=(f(b?U>f--1)0ND5iZZ zua)Yn{3Fa;vD9Vz@I11RVWx!UVxMtUE3&5c`%>v=N89?@_m^WwfsWQdmbCaTSyDC+ zUF4HFKpT?zWh*A!ew1+t2YuL+AU9Pd@AQVskz=GguEj>J+Vs_FpQ;fKu-^JExLSe_ z{Yvf%^Ulxy&H11(z8>*4O4K{JSR2)o+~9J#LdnR8@W2Z<>->P8nDO*2WUN8(uvT_X zjL!`WCF4rR!#%(TLuW1IFyZs*Ivy`@6JBRRr_Hn>oeXD>G~KU=U)I9wlF`vf|2&81 z?90S0h{b1$>mGDw5}eP(Po(qn-2?4{TbhH;(b!DYA7uXO_anw9-?bfl+OiQktLA%0 zq;KNPl10v^hsJW32|Y_6@omOZ0Y2_TClN#@lrF*oo`OMKACV1C#dQ+0ZxiqHFvsW# zY*bF3sWKw+1@>dt6--XH^&0rxMCwo4cO~0rh*i6%hh$f%_MZdB+u`{xWDa~Ai$*W*n>1!|@5I!_gSVxjM;?nlIUW7{ zr}K%#o z+HnR-w7)iX#cUSJu%X38p*hdM+IoeqIC3oaOJ+-OcQ4+p!0ms`qbH*m3o? z9r$5@JuNfx5`4IaXSaYmttuzIz4NR6TCXnb_TxEFj*$bUzq%55UxQujym0Ttt67Ju z7QcJZR~Ell4__V5kD{^QhZfOX@x9=V(Bpq|EH<3%O^TU;Eh+LRaqoC8d&s%$M>7ks zRc2o9DJkPQxXqhc${pW@&AAm$S7&S9Wu#-d)C?;Fx0i}Ml}a=6GX;A zKZ0`v96kt-9)vd_uP#B>sY1qWus>bU7r!gbqzcKdc3sBsHF!uHFe>}|(6o=i`CG>l zONh2a4@Y=^6g*k~&d_#Z&e}N-WN-B#vX|BezU-gLm};E(iQ;G0dpok=RmSm2=rrmJ zt{6zhMt0_G&hk&K&OKyumP0<1+*12OVW?pFje`yoA5QsD;v1)WQRwc?^f5lg6L}s! zJaeY(*Nv)2k^46xFUU@M)7S9F#CC%7&01@3ev9?mqV}GJj+Ue3E;^uc0t3H#%CXmY?c0OVZw4rr4*W7B20r1Ifj|0d@DY3m)n1I> zq!TB~1&_=^PQ`zXG5!`C(!iJ)-*~H0d~!m>b2jqi{|r^7C&_L&#eWyD!9#RF@lE9~ zq4w0TcwU^3^)uGD>HiR8{3UJHxMOYhGe)(kyF|6S{B>kZXKcL&J{X$z5bFR8E>u6= z*q#PonA~mRj~wqq?Hl+f>JFt1SzY<^Hknqv%Fo3%APVkTT_xSCkPYWA4Gu1Nt#&Y8 z*Io?|EkPpdc{Txls74`oiB{=Xi8O_>l1&yM~On@i`Kl z53S_xm}kj)Hrw`Dfe+&6&OCHBi@#^?>^oWhI9~gl85J(hBQwFtN$>_Q=SZ!Y_PLs` z1qX#|!YRQ^{2JeKYLG?N-CmSS%>&m4OX@fOO_9~|6dbgYCnebNQulb(-1 zy4pZ~oW~s^`{w*S->^*Y(;1z+UDcmK{W;V>KtH{-u?T+e(U_nUwber#Y7biBe$uS; z6&Nk(ehb_1hc5tUwBCvzqOs30`=?{$QjyGDV)Ll8=aHXNW6#6GJQgrN!O#b6G#?B9 zd**ZDndUP$KA*4s-_K_YeDmadXfJU-eE-aR1b5B7=PBs;IR|mIVmK>2{)x_u6~vYO zhOw>pk!yVi;W<8Yf_s9QT(Jp#dGF)+In&<(@Z~<_jOgRoZBWKVnW!__-@7H#*T?UJ z$iI^~lPxkWH@r4YzUeP8XTeAK6Gd*Q#XkX`xQ;Nmc%ti+P5#03t9-q}H`)Mi)8J7Z zU;mGx!qsK?n^Mn(&3?RIg#-H$Z1WFNp7Tn5AN-9w>8cs@t?_nWf{a8RjrYin2J&@^ z{~&g>nq$vR$U0BO>h{6!4vF90sPiOqc?v%k+SYgv+#cu^!@sw8w3Zzt?weEBP!h)wjD?QZieN1 z8Yq>0MLNFG6a4YJ;{oL9wBNBl%-wJWI{v`eYQD;R`K|bbMu2`Yn>tMDdO4dp!2J=F zKeF#ypUy*@u}|!&qWnVql$p7b4`zB{bee+Z6b3KHh`0lgKxwg+RIAyXf zfqBXmK0eZOu`#fPF?3$d82GI*>s;B*JhZp!eJ^ACgni`3#@{A4JI7a;=mAdpWy8O| zMSFvl$f}g-LEgPx>&frs*c12g{(#f0*mnfo0^?M@Ezqz0#`n|jfcl$jY+LRaf2vL1 z-AJ1s%72jeHMIG!w5i|o6U$v37??-7Pv}GAR5{`kXV|})i|ya*F$UhJOy?ZzGC13+ zJ@KYRjKMoT(9t~GC_F$rUrLv(JuZUJQOEO?kAk0N@*~5}B?UZ>;v?M$?jNDuIZmrw zdLHri+Nts#%)&;<<==>W;I(ybF~2&!zezB>ki4Yy(OM`!s&SEx(7ushkE}ww->m!h z=*7{0jJ6i#kx!vPF=Q;{6_?sUcP4v_PGUQ-H&l;&_j}c&$%k`xu^t-}1o0g+|9XXo9!^9rRgskL<7L z2Ym7Wr|1!$6)1)t9v6=qR}qC5tG;+u>w}KHn;$`TBX)#WI)@@-+vex4Z5Li0KsU+R z&t7BEl%BDNZGW73i3YUxY0$xu3&bDC`#+)2*EydCvzt1C?pUmcdzs_#SR$q1q0K)p zGPeB;f5RX+DE$n2mHKYR4X>J)A^Rs>XyjLP8+C)u4BN*~@c0;84&7PR@*Lc%$2Y#J zmo<+9Kj}{nu#V!(@+pp*m{;&F+{^tz$ zUk46P06ywdyiUAU<4~W2&Y$J#{zP+^FVr9Y5!>^ISSyvY*MM>ByY4XK<>n2qSEoO= zUfM@=9vH4SFQMMND037~QyHzrx?{kXd9LO2!F2Ta;Ffp?zW4*;$DGXzqrYZvz5tm? z`>UDdt_Jp9%)QH3muIY6oR^Oui{Vb(6>8 z6LJpHzG(4<=up+`@PJE^rS$uT=+HF18^v?U1{OMeJx6A&5D)nh0VXjATNXZ)ZOg*wa5%G%VE=IXAA~N5CGI!Y4HY&s zPVp7V7`?z;>*sYsPm-1KiSTUZz6*Ft?vBa(xyZP=k!{F42cZ$wQCpIqwQkb2{E=7@ zdZ*ud{u008g%`d+JK{Ui&*$T#+RJ>5QgbkRcMQk-pkoVqYi0}{WEk#7{B_U3gQz1} z$38b4clauhgCrYCUfTWi&@{adbG|_L+~Hk3VQe+9I`}8$lJ;CQbHs!tnqN}`ckkq} z(|75HezD^OFi7{8>FmS3;kIuLZT~)Nr9S_g@h0NhJz2)Ijlfg(x$I(;siwa3wh<|^2`fcH%H zHsx8A-?H{Ot)-Q@DK~nz(J^JX&jauh)^uL<2bsQ~fO~?i_@DX~?=rB7(Vbv9HuT)v zBg4mZ*mVW5_-^!ZVGnpOnI*mt4#KD8@2EXM?Q3q9X;!F@rr|!+c0RF7iYTXb(>#Bs zb0Fnqix^@)s@Dtby~O-$=KQ0v4YCL4QRns1*pbtg&0nz>(%pA2YY>n7qjOL?HvR)g zwQlM4P4E-(PSKEPRejz~JCX?{3k&Ck`@;D;Gub_f_ho|1uQPpH7@PWz^Ipa0E`gT? zM0<>JCVTuOzH1L06yITPJ?vfLGjDyYxHR}J(iiV5Y7WPEvpp9*AKoJefVbc!UKWIg zzQnl2&x-X44o=r6e5_EP^goqPmG6Z&XdlpC97S)`+i1^mtk&Du$XOEIk-&N7nVjgGHrq*zX@oPP7zQepZ4|GHw;P*0YAHZ+5{S^0? zA=+)jmZ;HT^#xZnZ|rqWlP{pH50|}POt+pHDeGda)K{O#E=xLpgl)yM)$Phx?B#Ag z_^hY>>pvQrwwkdB$GaG(aL|Skcy)w*r38N4jI3eXIB>`0<{m|Boea$p2d!@sdiH0j z=Y`&yxi68825UV}W7nJ#+tWT}pu*LN8WkAWlHM^?@J0y-=+ z#odEGX$N!uBL2kko%k2$DEDFKXnzwv(xY9Ldk^=SGcQMP_$23q^YP!KJo?avFJf2K z$(U5OlU&WWy2rc!LRs03soZGZ`+=ciKS)n{_XVlrrN?-e@-9A=J9i3vL+{;FQpfMV z4E{s8ApHzb<~{T!@1fu8Lw77cUE+0T$91Vs087P+9=y$gPTPrYJBd4l5!m>jg+Cm* z1d03bb@|$6i@$M~S*g3svm*H24;TjLZq9fgJ4XfYV*XFY=B7Z}1cF;UnO~M?ko`8U5-3WFh-Z z1$;i?tSI@VpI8Z9oO@#OUV6j_Ep?{G@))X&;lY17Cle z)*GGK^?Gif#dsHGFX|r&pSAa5^y1uuA%oerI9_BQy_4*FC4GtqO0S{%t0=ScKV|z# zYypjz{Z@6=zvB8JSNIi+eJACXu?IFo$CjCC$1GKjn2F4PDYl3oGml2rVGHXic!(E1 zLwUij%$ei27@y(*=+034k!=S+9DrEa@%|iQ7>r}yWwfbr|CsW6_usUkvBST&39gyM z#&``pj}Dj97$g%a4z9`{;*21i?nN#H*ZK}HU+(w(XNoaCc-yRSJkGfKkPIRCZFg{P zq>rnKQ!(`1r~T6vW1M@`n18wf+()7xK{nlnd?eL8xX@VYL^rvHezx+z#vD~qziTr29o&5n17mb8As=-l?^p$s-J@u;%rus^ zmCkjuk1j=iu-mhE{wD1NS>MF_XcPJkvX|*T`ZDGfkPYVN+(+NHLwg`O+_ct=<@4^N zMa#N(R@peL<9;Wn!&z{=<&BsPuEBlj?H|PKq2b@#Vs^K(-Oh-7gT8l=t0Uf?-o@Me z-}1I*Qwwd1E?0mP$O*{ro`~+zZ+BXuLFf2Lw(KeS|HK{7-;ou` zcm|s@=n&lE?2{GY3=(n`l=>u#edJh`%l>?!aCimf?m#9ul5GqW3f|mPDlVAr;Wkpo zqMsT*6|?`MP8;Th#7;}w@o3tJL7_<3>+m6Wf1#MbV38x2>2fR?dK-u!SD5 z7~R}Y>DSh?*t!a%;fAN0bJzcBLvA&Cxq4*7Yn>VRByg|ytA}#4iER`7%XjRtYJAQ0 z!SPEBw9UGmE={FwgKM>4)l+JoChbKXIA zdX6?U{*SJk?fb+Xuw&Z&j5+r+kB|AC!9KB#Gp}pBc(KR7fxFk<*Mp476{=-S$X$iy zjHep@t?>wE8c%@t8cQazM_&IAa#Eo)YC|sYn1qb7VVf06WgRovN2^%FC_LFpi^aQB zY$vUcWw#Ss$1di>eQqIn=UB%admV$X-_@>ppTqu9ZDiZ)=w%PrI?fq}QQ|uGP7H+m zCe8NE0r!z>b?(E~(CCod-RmTW5zj@d#6Pohxs`FY^73B%?Vu;1J!PB4JK?3Z^v-ts zb5yKs6L_VvZrbvGq12~-geAzYEopZcFOIJEr@;EQ~OEy+L6x3U~Ri-PtkEhcT^ zbGP?&Uqq^#L&}1Z3BX!x1Efiu!a&b+%cAqqXdTx- zosT#KV_GT3C)(GX*76Z7Iv>#t<%@oenKheqlqu@7vaoklBhYw^i{fn>rztLMipDJ3 za~I!tZLBO=rQ-_&*6e54zf}gHJgh zx&wvv2A)AUn=TmVBqn3O1$WmrX+_4|*{bTd(GECUbpv$NKjE?t;c{GbfJ+i^Y4%54 zbZX%;T!sh9F*nFGy6*!%WZxy6Xziyi&Kwc|C&Gos6aVK`%Smsbw5@Qb@Vf{jyaKS! zw=SKGFiy&G(*hhlAgU4HtZU0a;;{YY{Z%mY7=+D z0W_EjdJV*wgz*>O_mV!zod?^m3VJqd0mPeOflL?TDLO|od)g?h>0v$=p-wa(q<+NJ zgIm-317`+NKX`EQpYaB-TqE2Gx2@A!eOJ_Wtx!V$BAn=4-U%{1*Hb^0;b{guG5_ck z09=4?t{T4MlAnFp1^al>dVM**&{sfj!d1PDhr0d=M;brAfX2IkYt4R6c)^GFw=(#$ zGHUksy5D|aeimca7`8CXC5cx$!$x-DAjj7Pmw3kmG*yAFQ4GF(3cI1apKUzC^{-6S zS+4iUzabw7exP-IRVQ(lf(Mjz{O=~am{XpUxRQG)XrE&^9In$NInjhnJ-jp>`G9LzD2;J*`LoF`ib z$zaesTjQd6syfzCRt&+|1pT4C$ShwCI;$qeute~a2zhCL8rU~3szc8;o9@1t_Oz;K zuHNT7))`vB-i86aGv-#;yG+B{NIKsc_UR|k!*$WGORE+%NkZMup`P+H9-Z1>omm?b@|=UNTnc;Ty1l&Xluio$nbOc&V-1`kkqTU}PXC(4W!Js1 zzh4vJ{DowkyU-Wwka5n+UN6|XX?+!gGaBY2egn>B=mDQw8BeVb^p>$nW&y2F`Skc1 zzB@^u)=u!4z}CHQYY#BO{@Na}nbR7fAnYs8N4hbP^#a^e2dHWCF9oy?=k*w%iT$|M zVEY#!tvA*}Q@!2{;m&lRb!BEaSZmrh z5Pr4+bk2Ya%CVT7rSWRRN_|Vk@Al@%Q>KW&DrNKLpfmZSi7sK^*mKsiM}j}d-c)Jo zT2X>CF{s}L;5!_)-Kx*c(9e!4f$tQ)sk&uU?Nv6%_X3(#u^^w7 z9)P0Q$q?sGgu(9q9{A(HozA5Wm?z?#g`)Qg4QN9HmYzwRMJDopbX7M z@Y&;>3Po)}YXzJ!;RaqL{!_!*48-^FO{MaPw?IEl(1~Xr_Q!Y|tdpX(mkRJ*zE;$R z-4-$)eAt(0jr9*|mn~8iajpaH;fwiAC#FSE>uKO)9?olU#oqF>u)j3<5eR46H{Pr{ zAVR=;Mc=30OvXgT5m)>w|WetZ_6+T(1_TI^snKcV1PHZs}f*Re`0VJgBoDU zVJTy-Kz#?c0Dm3WA9|rKL@%PDQ-kmJ33z94?gL|vG$_Xh`U&wl?K4bkM``YfF*Z9? zmT#rTcYD~D*gd=Ep&ZzkvbRxtVsHFyz%K=O^Z*VSXfyW)!L1=9BLuWw3S%18htAcA zKplualrI%9H2)zl?Xho+c5%gd6>kCK9o7WUeicTpIO_%YzNmq(9M@}zXL;2Dys7-% z-WBf*(HnFSVvb04^TP8nJB}tx!MCJm$?Hvi+bhUJWtC!1C=ai|{6M~UGuaH`D`)p^ zzJmIw!Orsz`P3Muiy5BDKSA~<>I3FjS50xy=ey`&?AecSYJ=f8@90Svtc8GFlJgCf zMc>ddg;75)LHFGWUn1d4b+c*u-GPU*DM*HrJj1%6>}=WD6vRV>2Yiv)Y+2wVDr*tS zqVr37V{I78^m0|^fC%egIw5T?=pTl;pErCWF0eIdpgbzSN((ZZbP&iOSKJ}r`{*Ej z71j?>9$FX2_9xKZ5Iz~K;q-sg0eu%^r}~tI8mR;e@>Fg2Tz&8deEj%ksUvW}dK#)* z`QC;4IMYeXs#(ywAc8v#{hf4O9=>0pGf<4cOH_AiOIjyKJkRTbJ|gQgW@tm| zFUNs5wIAj=I+ehq0&rg$yB?^59k>j8+`=zoL9&jm8|sja{f*@5V239u{A%IIXHzm4H(N6>zN#HYFH@Kc)687Kj*x4*7p`@6aU_9n+W zAMcNlhtmTzVK3+4+(enA>m;c$d3_O2JXNaL0eTbGs)J`qW)N@DnH8&mj{)|XD1a`T zsAy1z@egy-mf1@YPW$sOX7A0i({QKt5U(&!x+pSTNM?ce3FllrMfi|l_b7sGM2)Az z?hV-i#t5?20(?R|qCw|&V83ai7xiJHnGxhl?$@eL85kE_i#b|R+%=j*qdek?M$|sQ zxfr}lJ_)SX^Ph<@Y75Ml@`#_pSiOB1tG5?-Cq1|ybXy?M2jYm`$3eCQh?>X$?}Iya(x3v`=Jv-M*)FV_D79(s+r5BL^s z0{;)_LHQ^=9R4je_&X_%*80nR3`fu&@cF~nsNP2n=Yr_L|7oPyls)g_0O-~SW*GHHdd)@-c9q9M|?kJ1KOL{K@ZN z7e})H*%(KZdEM|h_YH+T($&Pz81HgeRPS(nLo$oj^+1-%&nDRhxl8>FZJQm&@}VBu z40Au6O)?t!NltS8TOI2I^zvVIgm3H_)_i~tO>sWSH${!?&p1Pf+SL$yYQM&LB<0|< znl$);d4uaKuI<716*4+BCU{Q)i{R9R(H{MX;FG){I^Sw6Un|ip+XXllm7tAea4zDV z+U7FOFe3V#Lp%S))~6+C$Q-~r!%hLPJC>t=Gtoampz&9aV?>YN` z{CpDqY!kpG8TATnob(K5n$)2K!P^Vm$>+q;r)Bn5!VUT@edC3BQFb|KRRI}%0O0|} zRk?JQ0<|f-ehN4ee`EotAE*n_oXX=DadhHf8nEkdh)(Hs%9>YQ7M)pw^HESv2EqtF z&F3nB!^1~axwD$m`0RSX?68d@=Y{O2d3)z9>WlTUmt{w*24#39?bAhdp2WpCt z7w{F5OjUz_mE-~G^!oQ z#v$2RHC*2aM_Ebts&c8_h$l$?6W{RJvr1aB^bt^+yH&YFpPGGT1j3Kq!Fea39nozs zc$N5t>PkGbkRrKuTT3Vpm3I2gjt$Y6XZLYc*&n-ORXyW!Z2_@;u!>~e%teTWA- zLf;^}NITSt&c`CZ30GH+PJD#B;*6|B#inH>bBIuTwjXu?@KRBaYHIK%U{7X;!xF z$DP_1x`w}ztS^it*>>S~Uo6Etm5+J7Kf&9pz}P3jPC?})A)maQrYJ{?aFgM*0{|Zpyb*f-Jm`Nl(_14w$yhnPEfG&`Sc}?;qP- zv%Y~Zi4Q6in`f8)8dH5v)EHiKR_`{)_{i$okgr}c-TDsJfzh~W_Sulte{bNO&VQvk zQdz1fUyhfYw@T5^h_|S1i+w$_i1yTfP9iVWgTAA@;&;d$|2XAYPc)?NW588wcy5jyjL+y*(SV0SM~R1 zc-dLdBQU=$qxWNsVm2RpIaOg^F%^Ab45NZR23?a;WNm^+8KnpY?Fl`|ePmHg2EHbpur0z&5XM8DRb_SN$?8k^Y(<^35WXH^Q)P9e@v1Y* zago)V;VESYU|%a`luz{{Iv6sBWiPN+TLWRR{pC>_3ZpaM#_*KGi4HvQmAe7sHwNL# z*u#;J=Q&slWXLG6bgRPm-VIP^P4May)CqDD>q|29i_zYRj1v1DHNm8MGfc*C9pGfh zQz@H;bOMaM8K67-7ntW6Le0VcIZ7G&nn*+G-|_IR81BSRh+}263s1F->L9@U;$jWj zRnu+a;Vh!tbI^@1;~^u^?z0~+>jlDb)^RrNojDKmnhCn4fMzA&uPkTAuNe1*sNW7e zr{L~QI#tzzfuLaoA9LE5O#eMQ8oFM8S$=Y{;KSsbJAS%c*8^@vpd|x8T`2hBnf9^! zq{ju}9>nm{1l_O)`v>NuCF+=oK4{*6!SXy_pO^geE0483w(>acs*EZ-k2No62Jo1q z8)qr*=gdWljN-)|JjGNkgpu!u%E4SQx9ErDW)#90#ua}IyXUkj-YGBEAl)GTlE3$3 z(_w!L)YlVreXppDKBSyI2X(wobp#FO;`t1od!xQjQQt8<1+v*xsG^mDP0v>Y9zZ1~JNg%#ALrd}eZT zrHE0_euihlZyV~W2;5#7U0it@bzRS3zXF~@ssh5Ou2cs65xFM30gD0=c9i%7d*G`f z4)&rOlo!4}wyyGbcf4aSglroH?cNcv1#jmyNKM54_sXrov(OP|p$+gp7IMrVGI`hB z>uu6fheeEsjM7fcYM&YaJNGNd7CMg}_0nnc1LrMIa$RJEzhFF{oUE|w=49Tgg$;PC zW=v98rJd}TYN()C76@K8W{%xB!ziY9_=$b#an?xe6xT&MzXEO=Babcel;QrgA#YV= zvUaL_(ty-4;CsqvgnSX$W9kFozeG8<$QLoyJ@w{f)m2v}Yp*g+R!B{DR9z*-T6wo5 zzf>1St1NAj_A1KT26=526jIwDtvS-ZG*Mke<=Q5@rM^sZPYq7yrBeU=fH-+s+6uhX z`3fqj?kMXCPqEBBNj3GAX~s%-MlXuRq&OZWRxh4{uoBH zY)5Bab{U?jZ$HKT364#~cU%@otMASzSsdjVmPNE;{7O_aR)$XPl}fVXC8Lmf=8{|X zE0Edz^2x;~J1J(LSIpcO%(yO^rsRc2BzrdIQAeDH`16TsfrL4FGwxGr+h zW40_^w%v?noJria0Oq9`c1L$cCdM3)~Y5WIz8iSsR3=efz%yw^^ zyz&H-zK_~Q-j?{@j_vzy4UXe;>ZBX)KRj-_GJFR!<)QWr)5^{ixSR77v#H&^X-$L- z7os=u3DKL8(R&@zkjy5U8u5~D7%642GOGGr2lN1Zf=lI-e8RaoivW8OmCeU^L~ZpF zb0EU^Ipph8X22feQED@aljF^$4_&K@F{#%f*S#|x`|sVtxa}=scCH0&J7sDnIorU0 zpNT#5Kl{1Ce|K^#`A%tkBmIDMgXb8>Ut9K zn-D)8{4c9JtJlc&7}NvnNKISwTNt%A0#2DY+vq9YdD_+pYa_-0&K z64SySI)MRSO`eZ$uYf$zX$<&|ZCdjIi)c|*m6(`wdW{ryVE)r9HZ1M!q$*?Al`C*n z40ZjH_3?&J;0Kq>m*_Hf?TG91%#*GJ7k!((z#B{z3Z)l%hCaex@V!F)vgQ>ql}swr zk0;?Z&hi zTQHicri$84o2_6{7A{#nbIv?Pb1(m4eflv2YGDhKvRBPOqB&jE9`xsmKPZH?t_HF< zo;=iEc!~h=V3ek-u`HsIR>KCG8tQ7QD#}WV3J6qEQi2UnTU(pY=exSPHfCIEUhX}; z7{?BFmR-B!bjIdo#qVB}G9MM1Pb^NsgO;8FwwGX<;2aR8R;^lTwbH8ZQef0H8fa-Z zQT_9!!?a|qdNKw&Iy#IBn-Widx|*OSdQ7Oo9)NRXWwm@UQ7rxo!Q8o zWS%nk^h7OMZK>KpwI^yy>gMYG)Z^6K^ZN0Gyd+*ZFHLc);z`AZO4k%$D|S$NsPs#T zubid)R@q3!QDuV43YC17yDDm`)~bC~LsaLe=BT=Br138Bw(%GRH-!X+G=&@mUBv>0 zTM8c)#wi&qx+qRkOjkOnBvhWMd{MbjRbBCg!f}N)U>Uwmm#*LeJq!CDIblpAmd1{M zzRYFOtNwhMJuWSK_p$sNTh+h+RASu%)80EziNR)2zb~!k{P7cl1X3n0YWxI7EPE7B zh!ikGhmYye)6;EGUw=P(_U-S_L<%FtqDYa{Do7F;9V3j5HTiqc?~q{OACmnJtWNkF z2xAiK#2*wD8y6idiIEC}`JyPPFf>LaP2dNG34ehVcWetSC0t zlrM>?ez7sNW?}+mRgQ>~#7A);LL@Pf0@Q-48W$xeAtM!{DiK0qG=l^|LPjdFij@XS z;-pNhC@NGe#9NF|5XnR#JX#PFD`eOrZ5UB#lq5!I#S)NW1;OEQ03{rc^pT(@!KDVb zWq<9M@s9RPv?N{_6CK2e1ooX6K};n5NyFl>XDY!CVuFQYsel<9ClX2tWV|3Iiiwc~ zN~DY^Dnw!hK9R&EFk(qms8yIy91V|#kS~lHCyJ3ou|ymvh!F__K`)b7evl+eDiB3c zgVb#!#z!z-6d4!Ek3!MH7``NgNF)`;#PUV4{18c83^Ryo&lm8C9feXb3_nH?&ySP@ z3n@vkC^iVG1fjxUYYvI=f*>hh5-k-;P$J+_`*L|>!5|5IU`FjPjTZ`|__2}@De5NV zk3*4^g9BnsG$@Qn0IruYf;0?_5Dea9sSq3|Q}S()j|iHD|?qwkLgj}zd;(HCl>RggeVqm>LYR|;&Oao&A72F zhbXG^@3xc`$`6T=L{ct7h5aR33C9N!Fv`J!L6<~3@>u~yz`0_6!zN0b1fj`;sBx(o zNkmiwCzQAXWKaJ|AwDca*TMkJLkugH#8V5hc!r9`foWt^0yoA*TQfalVh|ey>Plsd zDvB050eM+Ws8GsAP(6_oqC+Z>#(}xTf>8VtWe^e-EQld#7#$~N^OBtOjs@GtCPW5G z#G)XOB`U&_9~&Wxjz&35b%J0~j4()w-dHEHA4n|V#|T5?!~(Wug>@2m5;9?sfK(BP zSJVPep?C@uN$IiP^iok!1p3e)Q+ZPrxQY^NhzW3K7%O+FG*t2r6^WLJ#b)OG5CKpM zX6tXpi9YlH3nHS|)TE*Z*E1msCF8huA=3E?g%Q+TT#@|vFtn>QCPCKE*wz#h^;tp7 zR;RjIWB@@RmM7RYg=`nb{9}sH7)f08Kc1qSjRL>@>-v<`w#z0bLFr9f?#R<_jVvanKWxFgh$DRuqIFnTRnP=;mYY#AkYlVrh5^=JK)P%nTsh z%{7=mhyv>W@^KoX#|a8MzrJ+8^&c)J;~`U}NQR=H^J^uL3w?HMh3*3B2^#*^j}nAZ zYo?loLAYRn6eH8$r3S~sB;#4OMn{SgL<;{XU|889XM#HE|AFQ#C)8663$kAv1_%so zF&Kd{mSglrZP}jnH?+FaPz*IHK@x|S5pX&kG_o4H6NF)`lvo9=Sgl^U$Z3IIf%X%{ zhG7f{jY9iE!~}~mh%+YEwir507+V_~z6oPxWrbf8YYQ4q7?U_B6B|43NB+VzQo@{N{N8E_5EcuKjmzN)jK^sFU!QqF|69VWNBO(IWCuHDvpyd4Sd=u-AAq?M{ zyCdApBo6GwJ#=87|DouHj5k!{7^)iEefVa9aiQiIz3X-bwppr~3|)lf5DZ>Hl9-sd zXlbnhUp}(4{aB{oaGf?t5*mfkH26gnz6S{gy(R#tTd|6@DXV+1qbMofq>w_6 zSkruMaL<8$-oE_+SEH}DJ1IvsgBU2DWWgr|4kp7f+47h9wk{1#b!iYL959s7Q3x7HdEoTSB&CQ^G2Mw5hU zg`6ZDrfITtBv)g@FcYkfhsK39zHK zkPhRJl~oOltJD1s0|_Z5G#o1{ns%|k*dO*y4iKHGHvdv4F=e7@=kHAHTaD)73j#45 zbGknSaV(=zyFxT^jEUuH1)a{yie`Pk!Le^YixLmD@B_u*97gu|E zKJE?Ua7;Wq=%R0OIssP)8hOluh34|Qv1%9#D~Mom>XsZ92cf1KAOl5EY9dw>FNVtv zSQvq-r>ix2r+NYvB8mb1xcRIlJ1EA|fF%_tpmKk@xG63_S_1uurbvW&?sHtVoinbTt&ZSZ-{p!4*-VL|Z?W9&sTdqVdkweuF##g+?aQN@Cei zegFkysvlL&dNNKSdeSAc28?iKdlMh%VjLq@;>KhUHabQU1oH|vl2fX#d{GG7xgd_} zA%X;k8R$TkH6T%U;((Jzz@k`dF446fP``1BgsFh0t&l<(%><-lRZm`{gz-?;(ZS?n zdhP1j$m=H-MF{0}<_CiF<@AcGtr3v5MOKU?2A#rP0NspZPjD~UM2NrS97_6a}k@lIH|;x`bdZ% zPAoOI6oZ*wjNVM6tlgVg-|?+>F9mD?uRY_A&_q458>@ zjNwQlw~LZx#M#e#c+YxDU{w-KaM8utaf-F3qW+|e2*E!Ie98DprV@J4v&8U{Sw_ zflOd4_uB{y1Zxc?wM=Tx{@y-4eFyp11Cs4p)zym%wIqrD+x{WTj5&iCHHBp@Dma$R zFZDo+7Q~W)8a)9T4%H5waGXfO8jfHeWW|wuS|w8t(L(hAi$SiQ+KKSGNkfX>DJCb6;;CZ+Y#l$Og>C|L)w?DXe^`d&pB%2Xj1H`)Ply&^N|wCXf3{rKhZ=DwT1_^at1-Fd%=Oe7?D#D&Bp5a&n5KhCd-T8{ zjqx3$9raJndR&JVg#phD3X(+GSj!Y6TqNJXfU%K;S7!6Zxq)Koc$W<>A|im zpQ4Cx0;+q>o&=2%f(Ji1< zsUF7V%S`vq{Rj4hqC>nQhXh$u?G!MxvFb8HILf?UDD32olhSp~$7Y={$>Uq`Z98|g z$E2?--x0s{L&EHjgTt?3=N7}t6@!^Nc{(ssk!cjueH*JzUHKjV0aWxqfP$(kB1NGl zESvgaVIul>wEhIcu6`KoG}gzrM^An&wr8CTW+8Bqk%13tnwTc}bgv&2I}_#*qDMY7 z-%z;8z!m}wX>e%p&Frk~96EO8+jpuT3g*%M+yaq}^ZT!>^j+>CPSVhX*PO(BbyTTw_Fjeo$w{a#Nkfzu{zx6<_-@s>9i_7_Z zgz()xddoYdSw}l7vWSzKXy1t*2@xwRnWL>n=Kkb*>+ex3bD$`cOq(K^f0D~ezPTFt zKm~6%EBj7-GjHhlqPR%&8X(xR;KQq(p6@o$r$!>a85mWH>C@kel6(JtXGcyee{u-^ zuky1_!{3>Ks$YM%fi>$`PuCpGdUjcvLo$oTrr$)x6q(xoPZwuWVcU{ZWh5aXBpo=Z z7zGW6HKbQJqqB`U*O07Zvlga3$ki!p#;$M=lB=0T=s%WKtYL@adU_Pl&@J~ga+*}2 zkc44uT%eRSlGdl4)T%LS>?I2vA0r!azpWK!0A2aER3l$DsMa)TEDgP^=e8OQ*^?nX zG2?|FmxeHCpsWKMx@A1|Pg30EX;^b3>-!?68b{DT&OiyYTkgVaQ5H}a3{fB2Bl!cM zp~@05fHHuf$o$AMD3LUMkmFNR`=)_IIG&tKtfLIs>9VroOyt}fr))=pD43sGk&}jX zHpAdeRxQY0sH7orVwN3ZgJML{(pYnb%Ze$}Zx1$t&{EN2_=TxhX&vtYv<0?9Q(RD7 zOblzHz-)!a*m_v=@2U(e$Rre~N~UCtiHCXi>NcJQKX)&6!i z@9%67uw-OoGog>iV)TJ(D`D#-2p&g_2;GkJi4z_E841`qSesl;aMh-pKz7b9hxI>V zh6?>gOU_%jr*IY;Lz0JKWg@%cAQp=& zFzkwhSSAG5u^7-~SLT23#3h5Bk#(?fqqA(Fl}&oVO;QZuv`9u~;;!WiU^U1NtokTZ z*H|wSr+EUOAS^!#Vxj{X%FINDFzj&4U=YV&Ck8_}{yO5X6GK`C!zmYR4LxBq11r-+ zCxP`<2bf@u4QGa{PG>+WFZ}G%&i#?6`8&+V=$J;a!!?*`eOu9Ser2d{#MMS#Brk-6Vo{SiRlL( zH`eXOz{Q0Scv0zzd*~S%jm2GZBGhOz51G|oHsz*;RYVO8F24<$0=d0V{5J>J@8r~o zQgYwZgaCRtnK8v+P_Dt)(TpWJ^(1n945``ZGBF{yl(P#E7&r|Xb|{FI`}E(&3wL}z%C9N;`Ov9Q_?z&E>hPErK#nJj;_LDgNMjN3pCQkL}PxVs|!#79_@^Ba4YK!PNqpYAHvB zO$2;N%7CXd7~+WEd?sJpAm=v?$aHj!$iJufdrH>OQ-4CrKw!bT#J22jH4p{^MkLl& z{bp+o2K2^)4_U*bk!rMat%l{Zjfr^H0L~}pChPHFsvYdCm%YZ16?xuSCYSRYt5(Sx z7)5c+#xVvJ>&vP62;fTo{Z45p1$6{2J(q%euY(y9^yj#Tp|o=RfDc+qpqmcJUaOpjNFRD_F-aOMD1JC=l|+ z4IJ4~6~=_>!@`4LBAF#i0*OLGgE6sj9FHGcdokVY#xp@4PB4{uI)!lJ^-FGv=@tDgMw_=EMtt25lkB`U~@4{ zVLkZk%HU_q+P`l8hO&68df?+*up}zNp`QHsCW?(8T~9nd!(ihl)f4YSz;V6S|8xE2 z@g?=t56kY8DF6LIf6mW+o4_zz<2nARTYr2{!sh=_4}2j3$F(v2&-{mPw%F@?J^9f! z+3Tk)zHa-g%F0(9$HA`~uO^FEuO}YBlc<1(^~9sTY`ktg@hFdt*RLmDQ5N62o_Hl$ zylFl0%CdN?dgAd-BU^q)Sv;yz>l!P28@uGs^dBdS?@~{G`Bz46^~5)omEWtLczk)2 zL`4j(Cq7J8zF$4@99J?-n3w!XmO*wKT>G`&H}Yi5BUvXkUWk`jKQ-=X2Zr-bGYn)F zLkbmc?1ecJdr^9FLC_2FnE=KC0!q)Avf-Ee&W)D;ilWS!pAHj-4ref-=@5$;j8E(x z6BzbRA2P6){A&j8a|Q~cPa4=uZrPN5*MP8>%WJnI;ly6m5AuLN-`~{9FHc3^e952x z=^kCDA5%WMnH(m)>er@LGZ%9|@o22YMh-8>6r@sK0zwM6zVr$@T% zoxX6RYqjhh{gS;hW$%#Ff4od&Gc6 z<>koZ-2sc%F{1Y{=sAo({`%GE%N_$gI@-y~lg*l~=w1HY5zk12?}WHZ9*#Xes?$(E zl!rUicy_GuOyiKBmm3+c@$@^rt!<5G6fB2hi-N14QF!$;75onu@kR27N`=r8_3-IA zp#?FYqq;eL@!ga-P;0>4Fy&R-3a1(BT`n3yASf;I4gC>+)coUQ50?f}t_F#2xZz`F z*`0l<@q=-Oy~IgeIwRS;V*&TR@Hls$bb`CfqFEg1IbRkp=gV%lYp0W^Z*i-3xR>l* zUVd0%?fCyGAI3lSD!TulCWa(UI<#Xwd9pLWT2f4eE49*f+J9j(Bh4+=+A75bKKKP=h{Wxih%EMLCkE@O!7p;0eqt}u5XVx5VbZK3Kwi3%%R@o`i-Vd{Ojl4gr zV%)$1-P0q_*ls;%s<`WH;;^HiHH*&lO51XKK+px1P;+yg=^Fb@9gSLA|A@5oY1+`y z&p%c}SfCL;&ied`snacE7Og8iy1q|{{?_Q!J|{K|5TDg6U39&z%8EPFJ8P9kXNGGx zJHAm*P3+LRZ@iaj-l1Kc_BJr+(mVTJKi>N`f})a=va$*;`UD$ae6yGQ3*EZk zKWo;x8vGAe!~fx`@4N8-ZFr;qj~6~~|9^Tl{(pQm`7ghk{+C~W+DqZzu>T$k=>B{C zS5u(bzt_JM_?H6zwG?Rn@AWSQ{-wZwEd^Tqd;Lp+e<|=U1^%VLzZCeF0{?Xs(EIoL zmjeG%;9m;-OM!nW@Gk}a>nI>!J@Na_!=rq?5v>KGbvv}CL;jo;I>Nu7cTaa0XD3Jd zP91G+tgS5DTbP@fwlisK+@_5Y-_Wpit5yaEE%o*F^jfrN-W;}7U0od=I9i)Dfwvp( zcKGb!-^a=TEFPfs1hmM2797yZ16qVYixHIR!WQ&OigYQk7mvMw3oh4a?v4d7gfYVy z$=>UR%h$8W?_6rGUX-P7xP1Le-SFylE3$IraOCS%>ZX^kQ>hy+U!P)D2M)W)vc@_~ zSu}fFW}_N!DbRX%aDe&>y;G5@3(CWzJ8uelwP9)STDw^fv^M5HOlbY;cG0ZD8_V1G z-KTzd((Wm$R{1Z}S{+>&ti8Y0C;qN*udxR{KXyBkGWphh{j%qeA_wH%DnC2@=Cr}- z;-<&8Mg&i&2>kixyl{^1NS;oiSS2XPxl!ephHCT9@uRwq))w`B?iO%6W^CyC>(3Rt z^}40h=jZf>HzM*hHdSuT>nu*+<9_FS!2{QdL))K;@7cwTeA4r3!#fX$IbYwl-KMae z6Z3dL(tx|?nhCBQ%`49}GCs9;@QS4euQu9rIDc)vf|=SZeQes=lm6x>j*P!|+5a=Sa2X;@~vi$e@z)0TB^_0ZDl*emrUQDA3#LzV7EqH|Glb-ui2(V2{t00eMbR z=Fo|#G}d0M_fjPyYFx`(zlz%5oIQNPeeFxVAB9Su9H^*xe`LPLk^RkY z&)*eLy5Y{9DT}YKJ9XloZ4bN?_6ux)lma4laY_DF+OI=%N*(&Gph+QXwSGD^T^k(&j@Hmad zkqTKV>Z@JswaTW4s>U=O!YC*AKDtzALjKd7_PZxKw%pfX)%+Wa8Xmd*V*mSxlZ>7` z(7biy<_!0zw?2N~`zS1G+I_>z&5leR(eA+4;`m*X_+I;4lnhs!6&>EF>OxnQkdd0a z##L*D3q(1A-)~k$2zp)>Hyi&{BlF#jhIjp^DfK(CSFwwrUFeo?%>u6Z^b&bLh>z;t zJN$ai!{K)dLo}bbe&~A7!7t}f*6Fncj}x!%>Gi5A&&J@>;lp!I9K2w&c(32)4Y_TV z?XMkNtaA6V`H;u{M?(vZttW)+&F?KZrO+~L)ROiR3x^|`x$EXPIN9=v^6+`@6|6`A zxY@SUOAfTRJLzZDUg@+rxMz9%au;;!6y~;;JVuZnxHI4Zpm;ZOYPTx*2o%o_n?k;1BJjXKG+&)CIrdc>hra2X)1n8}7Kwo3&hPSz7<}yN?@ZT{?TCviQy7 ziv&?Z{V^o(^I-H&!`yUNYjc}v7kVaf24NhazShXgxb z>$f2%jrTNTP79+;TRH~(JY=!6xb2R2*IaU~rZveB?@^zzt*zJj*Sdb!%sr?46&5di z`Zd9&!+x)}_R^N>tvXvYQTlYqug~6jUIU{a3lnVLPYL+-x#Z>16F+}VO1pO3IrHMB zO8Ys}3RKfpj`u&byNhtk);{Uex@u&lw>e*#r#|aqj<)Zp_hYog<(`*a3M1!gosfDo z8U9(p*zNW4Xz}qRxA=R#?%FR`DY2S=I*-|ulG5^!{v5ZlBMOFxrI*Hg6c&nyxAwKt z9W%wg!D_3PrjHDm?oF8V1AeyCG8YEsq+a*STh#1y`J*A_?@uk+a^n5%Wrbf~9a7EN zI?rH##ADml7Vj;lzfFj^u-z;ArljSl@75Nc#l`lILo!uQCkFYK`nM5&_IdGnkHW?i zSuI1;Qf%5~W|-&aoPJ)B@%X3HrO$UJ{w%$EC?YHA=kPr%x~pgGNpwuvwyUjy5hoI1MPIDO3) zt-TMI>EcXX{};n;eSfS747_{KFX2ja-(v5=l*fCGL>v7guWE#3@Tv-8kI8ty+7bOXKGWTb6C|I|cCbHrzcVkZMW>XbLu~inUTX6?;-=jrUgM4nUqrOsvLMUkm+gDg^CPU<_4Nrf zo@kocrp<~HzV7F?MrOI=EeszYZ0{NH!+fkpce8SXu~sL??X_Ha?u+$~^$s15&087$ z?d8z1Im24VuITtM#=vB8e0$#k<6=HGm-bwBJ8t~L*;9P?^+{}|)^M^>;MGYRrAY~k zTf2|Hkf}7`=Y=y9cUd12&+IWJ@^Pi3vX;Q8T$1}h)e7JBj~a3MRkLI zi}nxiyXjH7Px{JJo;yFN^!nP*wde7+$vt}5U3QBd+Q`+S#M`~q+*FVLVITcN6*>;; zekg8;`knoQ=N|j+SCH*A@TtzE{F_6h{1G$Wh78+1bK}rXX=S4n|1uvr zyr*PzWQQGNl3I2y-aqzR(Yvd=pB8S4eKvA$w~~m$UoY)K^Is@8obcSScf0qE_|M-B zUc3H{pgiF1^QRq3Z_axAYL5N3vcn@quRr8B{dxAr^&e}+%YS95463r!)~{?i;oe{F zr`A@)<&6C9^@{nNIPJuzcFrju8V~XOxWrApd_((-Un-Z*`*yj?_iMnY@r6Si4&Kw6 z{o{U@XWbth`ZD%$YR29tH;leKdeO(>;k^bCci)F(-Pw2V{jI5ctZpl947}FyO6K+O z=#rbG^xNLh{x$lv>5|o_rdZrRB;rUH;*qZZG3F&PM?bzrx&25 z&Y%0(So6G8wZ|oo$QhUQZyvj9bwJ^Y>$06`@joILTD!Mjq-XYO@#^lImu>S8UjFHI zo293dik5sodnn`Tjwx%GHFaFQE}?QwTS;DeQ|0&-y$fttO}_qlrA_DA^Hj(5nLE0; z;e1i*)dg?XB+Ys7%{?{QTWNNl@tIk(f>%yEsx>sZWLN8?>-Qf{^}n<@C1~@2=}s-0 z&ukcTdq(E9r&-Gn^0!`53dydB+PH1f__CZnP0V*T*)G{(a%RW&7E?cN=}^{j^SD8A z8{J&?Z#rfEJ@d1N)4J@RlQyhbeq{Z%*i(5g^;B|?Y;oN;`*QOB1`jUp>AJho-oQrQ zy9WtVcZIdNTQJ5;_n^vGzkJ)(X$NxWJU%q7$l&n((Zi3v?Ua6m`oA#rP=4w;_3U>$ z^O_7<9=O2n!()esVtAWJsZ=LJyK?OJ?e6D}O?^6L&1tZPD23ul*Zc)?RBd z)3o%ezWajM_1pPpcAf7vL9P2vyUY>i=C{ZT?W%E&nYl;g*uO)AF>jVX{V;6$%;hhf zug{zz%q?aSb=hjXZ(%Fjo8Q*W4!=51r_t66+ly5bkE8^gRJjqIIeLF{gOXwSS>@fH==Pod z^2GI`q$wh;4i_gzc*UPw->Bimvtheq&Cep+%Rgf;bLtdxvJ{~(?3*Y6-GDdy+X#7H1FTy8a z%H1#XuLr&?+xK|P!1?MY9A4#3DD-5)4jCP~_S*KztObLEYy=f|+9<6JUwL@*Vby!b zc}MM|a(DQttLd)n7t^ZB{K}@rGbZkr7N2c&xlnu2)+5_3JR9pd8jstQ6c=*lo;Ghn zz^|OEF{{qUUTE`iw})EsPDQX16Fl=U~ihzdBvh-{eM|ouQ=e}?#JWO zwN1v3nj7D&xYHGD`>bu=a~k(7aZ%(?omMa|t>{_PuOmnN9QMjPz5Ae#Mk>kT9)j+- zvah6`xtirL#nCPC&Zu@LqxQ}oH|fs13#TM^S`E|Ky{`CqYn{(i6SnrbE4tWpk5>G) zk?T$z=yGNXFX>m->K7|?+iKcp+D8pAn7^f2$cz(X?{~L+EV1-HaC6CrsY>cgOY(P5 z^lrGJwe{|nUfpkaSPhH%>*Uu5uQsQJ4lvBxb!ozt2GS*h10f$(lpj<7-(?=)9N>3s z!nFyRy^d{8`VzgM=v?L(*9F=S`FoD{iMAOVsNHZ~i(S1N+g^+3z5Q@{_o;>71xIX- zUSHlnIeB?efZdU#QBP0t^#{InHFdWQSo*02v$aiN=KDo`j@|HhxUm1{FCQX}7xcGS zyxFGtvhTPJCzk^08_Q~T+B zE{;7Ny!&IE&-F{29LA>Vwr;#n`Gl=fUfF;y!nBW5Zs(_Y%*Z!dXP@S_x}>c5!Qc~9 zOFUbz^Z)8_T-44-V7)Au*R#eYCc%KYt|oM0wQrz`c*JE6l&r zuuZoqZQXe}qn~=Vj!&MrXjjSZHY;Y#{q(HeMfVGyUk5e|>G9M>CBd%e=I+VjGm3tH zEt~r6#@9m~w+{Ym+En{9i$^t+8cWA{f7ZIAKGBU+tw9|4B#ftoBBuEmqPIs^O<%LWkVS>GdLbrs=)hpEEO*ZoOQ8+hadp z7=E`^o_73jhny z&@V<4TMY`i*s1df{SCsx>^aKMCw$nn;X>i*wfB`@ScY3pZ+vjD$zYAdo)NC>@73NyIkF)OLPV{IxLkK3j81K^y*ZUXt!j`)t2QeThHCtB4^Vn zlPOPv1Fw8l8uQqq?b|5D9@mZ}JYTNWKP7uq|4v?S6a3?x70d1BSuAO^=4VK@s@UNd;aiBQRpjp?lSW5sd;afY~0oDozl0tXM-8QGTHCVxdDw#LyT9gJNe;k_{3u;;vY9%dnj6< z;dFb?;E$^m?kIM@KIh!R4Xyiij&`3u&$U;&+2WRdGm017HnzLj`fJkk_kMnbZGRg1 ztRMQRv8d>TjGkXmp26jG>ul4Jj+WZCsAKWk`3KI%Hm+wy$T=)XF2vy^15sOlJKbk#mF#UMP_BI=jjg2U@8 zjyG(h8T@$4u?yQ@drA`b&AaQncVW!x_ah#@oA@Zf*|gdA1!G^TkF>JfVtpW6;pYp% z!OKOH3|k-Czgui(t8-LZc~56o>&a)|+`gGN-fvt;+w?1gjiyITpWN^Cw^!!^+U6+(9plBO%Qo7)dDqxL zug?kB8!vX<@^=lImGRwgUh=4<>)xL~^}IURNKa|nV&5^hW`fY>4)dfE%`M$Kj;T*dw{#}QOS07u1gf|?w<5c3)_JZw4iaHo{Z!+g< zU}4+CU&l<1%3jiC^v>C<=Y4j!s|r1}+_GyU_qCC~Ui#Q}_IBB{_syG%@%|5g2tPK; z%o#JelUZd>*t6u8J(`)GYw>9I!MP(WMb{QC8FqV~?VQ=38Q)tSb87FV`F=(7%HF;E z*gkiBvfr(2+N6pHGXx{m?q`3R(9d#6R+E={dW&=qUAXeNN&fJm`y*5qHyWWhKYP}g zo=TM$Pp&gxt|nb|yyCjo8oNX%_1ycf1|-fugZXf(+*bjb`pSD=Ex1@7{nlsu)Ye^}$7kCtzV2W#v1po6 z1J8zr+om1Rd1%sBtCP+`Wsg1kU)?rVe7b)6+V8z{Ej~Lp*?4*7UF|b($Ig4G?AqmS zOv$w^PTIlSefCxC+ceYX@C~Dr)8d}T&r)!Iz36r9=oM>jzv#O!bwIDJz-Z_ER)_LJEY*%Qo--q9pRZl&@mD+3%&VHJ-SbN6^jEJiq*K78M?|AZLx{61&eTz%_h5N7tHx#4_4!*54#t0d_~ z-Q3pe{=J9Ay-swx_W8WWxXIt1?>+xxv95o~#I{9m8zif*=^;J5tm6&-+y~E=%-r5n z|A1yv&Ynr_I&NLkeCCE{yEA>`_&PllX3QS_dcX6aHI4U1INeeW@QGPDVUvgT*c&g- z@>VvD)p(otPFp!;c*~eJw@hxo46t7B_Ony(Xm1ziJEIWwLg6_T`<*>kg{Fx&EnY=>6%#P9KX58+%dzO4XOcntgs<*m381o7I8u2RZdr zJh|9t=u*mUlgr3F{kdHjSuYoD8uj9ozgFbPE&#-ZPibFXkYxf2Y=b13$1Mz zPSN$Wf7U2Rb%;{ox+BuGAsUG#AD0WYju}TKgzV`5c!S=EEaen8w@x;_?H5u1U&~}) znyhN#{!Dej{7S9fDZyGrUNhAtXY|xhYvi?jakG6(U&C_-$I=EHgpcyo-8tmCuJS6o zCUbPRH{lnyw=I~IXWMw!VEeV*&)IkSblv6ScVCwgowhsPI%wxOW!zee(K{K7@}Or{ z6W>m@x^5KQR;PVs+aoXajBQ0TjaO|N95}AxT;Rjb?S*~?dBTeL?c#2e?ZnG=Uysmp z@r~H~`B}nZi^&OHO4d%<7sgC6*r*p@m@_jzHZVB)$D7LN!Tia7m#07T3-xCFOV6(L zch{UbY9>!_l-1?R;p;tuhpR0(=Vdl{uvhZRJl}>o?R~f0v-1qRvEB1rYhSls3$MGq z9r^ae8uQo_4lfp-&X3SK-DFGiOLuq7zBE}7e(}@$@{2=_hZc^nJX3hX-sJJ9{DQ~d zr0eh6P3d+web19?&AYi>JN)6y=XQgJexAFcpj@%JNqN?TZa>A>*Z(}z%B|9S@srAD zV`58B&UjlY>aF#@{>bW8AP zx14qUPjWQVyJe*`Tc2fpU#mb%abdy6tFedLyS+U$bzXU1kD1|l@A}N%BRJiB&qeix zY5lirrBw#JT^9E)cG;6Q;Tig7Q7EU=Ij8Y-&gl#dgiyxJ z8OuBrLdcXTb0K5Okc1>d6fz|G?zQ&bmGC_O?|a_&`mXD}o*n16*IM@)_geS5*S)4w zcJqbOhbbd{ol{nA&77yx-DY0yW5&{$^0P}HX$GbGt-X-?bBbU~Wm3ZyiF=QY-_NIR zoT##PWtyDQ%F#u&>kqreuGd`Bkeg&K$Q}GV_2};C9!FKTE9G6?zcw#8EcVR%SG8wc z^qdcIn;#y!XOAgwdm}2^LWPgl-vt@6m5g{Z2O%0(qmtbmoGybs={jpRp)hM zUv)97eRbfO(z}7YweJ>fvuVys$ZVE9=KOux$cNtzzFhe5>_^ataYN2FUMXNSMvF7c zK9|{)1@?P*`)-=^?Sz1!`}sZ>?)Topc(PIL?2};?JxbK&Q%g=23$BhB+i*2wY3+q6 zb7L=j^jLc_@q*IDGF7XwduHq%t9*RC{e0_Ed!tV!Zf}43y15R`@hr|a^JEL>1v>53 z3Vg+FnHKtnHSM}VyiZrt8lRm{WE z44d0^qxI(e?W%=4$CXdQ_@IPK>y*>Y zpG?29_+-JMTh@(cAA5W)T(R}@$^yFySIoZb_|e%lE-S`+yVLmaiy6yLDDJa3KmX%x zwXQuA-&#D{H|gWJ=h+K#8$Z;4J~VH5l47rE{qGry-cLFbuzh3abG4ryDqm+L{#dww zma2&7<9XC?)2{U1U%OT)jmp%UT9$XF_XK4JPPdplZf{jv3u0|Cc^+XK(iC?=Q$) zeLdjH$j3b|WcOK<#hKA(R+SASBYksz+NL#OA1~S#3@$rU`iblFW~|^!ZMtn^?YBX% zrX9M}!%1z5lUCuTWA+DoMIWktTA%k}iD}=3XZ=sPU+%Q;s6$SV-T8a#Ds=jE*Q{SS z{ZYXjmsqzSA-PreU5n)J-cIU2rf$HTgmv%jH@w?A%wKPL&^wp!-BRB@m34DSz5=s?NE87;{RQz`u?sAmO`NR)U+k$p?fXe ztW4|GyMA6Xele?Z+o-ORr@@+%mp+s4KCASf>Nmr5oyNSPe$RS|hG^O)TTUFXtSsl= z&n>+@@5CI9I+C;7O$^H@l&)^nH#u8&SO089kUM}@I z-AjDKyn1-2wR^@6De|gF81vMSFK`bT-rdI>hCr&RQGENA+cXasx;&N$#^wa%z2Bs1 zQFXND^Eo3&)rIA2{dvWv-i%UXO~)C14_%pao>e<4wmvv9(e(22n28F8-P7`q=%^er zK6HPz%;1J`Nv=DrU0n{opa1n;??&CD8x1pVm5iS7cC_)pjOWYds6R-$R^xV6&3*QK z>AtXpK6SmzHPp3T;*T6I-NPLA+1+FE9520%hX?g&NNk8WA*&RTyWqg*FS(o%>M81Z z54w04e==(-6x)wk=WFVm8uZcHYGi`ZaD$uv;Tbd5Cv;6@ub+8)cBh>qm5VCw6nZUN z6n$@B!mgOfPO?k7&FD8OzJ7%C_3e*i@0^%2*yE0eQnTFjrFsh(M;2HQDV>+H6dcX)+>{D49Tlb`3 z;OSknhs;aT7$9HkJuhW|P#QjQ@Y&b1%VW#5<+@eQwRk)x?BMvrmFp|#8Yg|*xciRo z~8&rMPT_SbjK=jB(YE}7?i`oPrTD=cqBD(w3n^k_<)n&qWkP4{1p9KEn%nzmTQ zA+2zWT%IteTb{V3@AXHe!&oO*x<=iPoa3dm>r|?EzWfXKYcsMN*F2UZ)Igb%&6ZB>fp)C-fp&z$-? z>PKPiiAk?2)A~ipl(ZZ?eC?>hzLB>6gORNbk8}6yf#o&} z8RLUnlxw12Z?p(p>FdDvyQlMhSN5JuXAb3NUHd3=xzqDanla7;x0sKvo2;XmvU!#0 z!i&MxAtT}yB6hf(zBK%@^Vsf={SL79{}^Rz>Y^CZ>#+KkzIk=U&DTynhD~6oQ8n(^vn?QcF z*9JE;U&BaNZTgpxS?SS*NjEn=>XIqD)!@4BdZV5rf(ye+>gM}S85714Z$D6lJAe4; z)^ICub;2vZe$Td_(*1a6Zy)uT7sr>3{Bii9M@5L+taJIso0IA-($?;`e#>>uI~2aB z*v_W(f~(!j{cHQY8#Pev^;DM$iO$!Yf4Es5yn3VlRQ<{74*@6o)=iGNe=SwQyge;R zVZBbb<0@~Cf$uYU%Z){#M)0uH$&v&t>>;XyXu_f!iE?T$vf%DX> zkqvVm&si4w@$68Wr=rB;hfF5iI>r@T8Ncb=n{nY=`t(*fDXV4FeAarXYp8{8H$G?E zm=&C1I{oMNK9;+2^!uBSldlJrY|#H$?X9ZWu(o{A$@8%vcF()$6C`Oa4p=dKo=i;& zLpkHnb))=b&_M%gw%^{;=l3w9LcV-Uv=PW z-PWZz^R@7iGCyGY{ko-*=xo{NSvrXY0P+8+b-p-cRd-p}Bj-l*`<~iHlRJf_v=Y z*(XaZ?2?|O^js6>@Ln{ z+ZXSzzPbBO^<>Arafh`)>c4es%L^rkBcnp6?$+p>6Tmr9N+g1PlS&_!Zb9-fs&rW3t*PiV$b=uoE6T*)^U(#jv z>K%4e_fKd%3JtL~?B=Ih)Mw%H9V7P2+FW_0_2|hDr)m5~eZAASJ@=j6n;YnpGUe1w zciFOtc)xM|)m9!#GWjrfkdZU*vRaGqqqMA-xpOUh-pb&uJx{$@?Q{J@&5yds%v&Wr z*^i%wO(s??=x2FM=n^BbSgAU0R_=Dg9j|J%9?ja^J$CRmMp>$dN42k;H+0~fuE5A-mUJ{_tJ3Z$UKs5>qpxC?^@53RON%GlItb*aZ#a20^_t&T--z({I!DfA_fR@_PS^cGQvasz zn~x8-C{=hB98VM|8Odwp=&ru}Y}U@(soJjcyBrQO0v$>dJ$juQX1Xinb8Kzy`5#~F zS8UB6`DMm{m^gGeNC!Ww6-NBdn_>>swZ(eAG5IHSwfAi zvEIs?YcjlZ)b4K{?NoTa_*-g4;Olo^tu}AFeZcleq^IQ5<+Bs~D=vCUV3)f(4rY@%gRkYJ<5nsm0cuSAil-9Je^NzoCIdAo@&eE>pY5ASgq{&xAU z^; zc|CXh5KVEe!Ss@yT;{aC<%X@q0RCDgs?%G{B z!>+jbSCvfqJl;&Z!Ki^ZaB=v6=KBLIa_Z+w-(2&)*Lgg684U%viUOeyE}_=B^PL$CBwZPs;3 zh#GOpmmidEI@pgl%Wr3qjKS{w0hb>Rm}GIh0n!vQVyd;eteZwMBH+wrL1uq}fDeI0)=!=6xRHn%G{gg45+jqRU*|B%xhMuQ) zZo4H|d;N%L>qfckyY&V0y)Lc0XS*wFs{gz{n~EToyPhsx}Q4W)ra*e z{kC(ZKcCjT$u^$8=%d<*6Qg}jR7`rHxcv31hN=L=hj-#@HFCX1?zg%TGRV#G>ddG> z&8ylIy$@N+MubcZ+&(9KrP@;Y2}e|x>2J#U>grT^JNNmN?5ZWtYJ(a5SJurOn)_kO z=Zj6@P7|H)me_pRxTag#KHcc!-)#rI^|N<4t@wJ*Y}38HviqiQD#~14yS;Y8<*Ai3 z^Sca)Ygw*6DoOEH=1wh6X{q*wGS_|Y-W#b6?cC3Ledk$QyCiE?_Rx=6{n>A#>s(-w}U<6 zv%4!>n9H%GLFc@8Os*Rq;+b=-XGK|&pdk3wg56^>iem$B`Gh8HySOklwXpxnPj}OP zM8vM$-?fr!I=uR=V-Vf{UyF`hHU6p6n01dz2d+KeXLiu>vqzr{(sD34vDamAih1+P zhZjnNIl33}c(cL^##+ydk?HffanG45Oft2^m|=jC&Kyu9!P?v+}=Ib>OVh zTf6YERWEsS-!Cf2ZeI4|n#%m7hVjEZdzx-NXKOI!$@J@KKPTvW8tsqFbV{0_d-Cbh z?8L`EHYttH${E#rW&I|J+iBT`$1_Kl+g`S|&FOw<^P`e3VVw_YIexwEnjLj%nL^)( z36CT1%=}UC>SFIMCtshK{_OjN!=0yu&-$5{RKY76n9-PGazMXd)>a3fmxb<2pUoa0 zofrGTdQ{M4lNiknd*kwh)QNs8nDhGa4_=FVaii!vch$Oc(`NO#pKvQEd&t`$8`1j- zcIRS^gvE796ZgMluR4F{@ylL*`9~_(^j7HF9yTh$#omqo- ze)n}&4FcOyWl0rfP2TI=GkhlB^IRA-;PVA7OP7?x;?F*p^S%%7 zljzNw8MBVLV8r1_XA{QL;jdQ5%D?njx@)I+mhH3CUlI>WQ)7(EHrG|BE%cNG51Kym zM7nB0H@BNHlUG{YP@Zm+T)%UfJj*icM)#~E0p&|HyARF%xF)Y^$+Wu#*T-Dj7jXNQ zlIEv9g~jPZ2;IGEvu8N>(Bg*7>iI_HW1p$&iJi2+PttvPzk5XL;av0kDVgPC`0Iyl z^t^iJpl+=7o`9(ymfmCL?^*fe)qN)o_ z4(_Z!rLkh~=2Mg8`x_4pslR*C>nEez7}w~ym5=S*KN)wad1z-PRu4^eOz;}-c3-LH z?y!Rnu_~Y2C*{30kP6g;s zd7NUg)$2e~WUyyYW93U0i>dx*LbSExES>uc1~iLy#tx8ti8A}T?fmD0O)>>X9Eyvd znG`5Thum9aJ`5Zew3IN#s8#-tX4AVZz0@ z_um5FXgpW#mlEDvQN6+^d*q{HBh}3t&rKXZzesLJZJ}#^EZ?gCpaUkRb}Q@{vo%+5 zd7jN+9gF>{y4S;adfzTb9uM-|y7*4yefNk%X~$Pogti<{UG&BIurj|ndVbOH9L)_~ zujNR8K91??H2C!Rol!UQGP-6eg?HXH?p(lDaH8=X#TWF*ZS&4_jTO!mxeW7Y%(mX9?wZ4y;M}c!mqaKF2!ldGi=A z&fD1ErT$`c@Se(sSPxY~(JD=CmdNLLZt{d-tYI4;RrZp(6REsng4qgb`fU|weP^99 zGae4ar4zd)njmW~V-U$2gs^;n>rhIgwLE znYI2Z*N-LeYX;eJf?76kALw7#NcpfOZFSvtQO}cewjRjob3HRGaQ@qwGxm%yJ+tVp z;>^Q^_cc%R&-H#ZadlJ9#Efm+XC4o-)%;6T#a*m3&A;cnY+p7~{j0@obBikvHLgU5 zt(vd#O>V&+#qT|QG*nNOE4{7GJZmSX6}muL$Fv&haLfAg&_v@IOV0TC$8To1WSH(K zQ7%m%>u+Ba=sJ95yyEBB_Zxes+MG+*ntk`n!p=FHloshv%soBx3#%_LryyD1;mN+E zYj$3$GdVW$n<%TvA*kh*I7Z8KiN)G@o0WZflrJ#8R&!LzSL=<#)NCJu7yWv2_LNV< zZtq!WH%h&i^}Fvw<}JTILidnGU~T^e<>o8PyR%R0?ef{DmDqK#z-7u>hpQ(x=7hSG zF3EhX(6rF=&G$W5f2t0cHQD0YHi1iFY((@dor+D*BMtYcNjz@Jvic~pMsJ-ebFc86 zvY(1tRb@*?-s>|rFPuN?U!G)=GO&2Ae%bnm!sI5qy3;=<*m4^><#coU`ogu&ab~bl z#G%ZZS<&?mK0bOEG-uD|qamwQC)KW>vvK_MIZ4UoyCyvBHM8!W>$`oo2kTGsZXB|>B&u$NEoWx< zWxot7)78swG`{kmI=0hA^PgpkZ@=YeDnDC)Ztj`^6YE}O(fxnLc9VhAm%P(H=`_VT zzvttToM+O|XIqBp3l|<)6mOIlm#x2LpZ-q2$fApHu3sD#=H8Qg<=v(eizim8aJx77 zNZ#8WavOH&{-zHB9(I#FT=$kHzg3GK;y)I6PFe<9*48C7l;(uIUxLVRGg!o06@| zQ--?SW}3^K+4-*e$E=v8zGs`}+OBzH_wcEyv@2U6Ha`2^ZW#X&?#;t|6Rn8rUQ|y)O9v=OV-%oab#$ougd2F zBk!>lhgV+O*w_&D^L;<#%%wa zXRh8DKf<}-nQ3COR_Txj#8Iyug;MSB(`~jFSn3qW4X+54sV!KeA2s=iR%%d<{o1<6j?&y63IgYu)@UtKx01 z_)bCcP1o?782pQtRWAN*6ha@5MRWuKWtn%VulugO}j z>7>wdKw7!BV8CRlQ+Qt7)v>v6gr1>gHQd*%Exw|o1C|QQ`p6#a(QU6;;J5x)>*H@; zKY!Jx_e8(C)#G$^O~>RV?>w^Dvti4+JwL3&eAgyEFYLd^PwMvg<}(MAyzrot3d)bt zhuO^St813h{ju(g?-R#`t#~JEGj&#^jXcr6WXO}XK2<-qcVGGOgmR>MzErf}_PbAv zO?&U|`MaTp_U+IvmD(Q?YV$D8UN_-V{6@|V87U?h1)i`~9ZjV)+q!;^D?R3|y3*Yv2?WKb$m7a-H zPQ9#GweVduR8u4UbxEDy8Y|mN!Gr28VjKeveyWwdy~UrMAo+Ri>#@BnpI?aEq}wy) z$Qied3++GGmom%uZO`zp_$(9ToIhf)NtaQB8YHim5_;^t{Ueq~NS)?dnRxjWO<_Gb zbKb}Gz?Ky*iW53*Qa*A(GikTZ&v#d@ebZTIJ<#jTtIV``y)s?5-q&MN(iKy=&g}-BsvQp%+@F=7cUb;)@@3_# zVx!t6pN!5~A6j%VYu`?Dy=#$&Z;TtI)v3?5&2M|3_@LA2;8fXEVy4#<)mX-zC2M_7 z*L8BW=($G{=5Eg6Zy6V{#$Z{x{LEwJUJJILn$|GwsxhbZ$tgkj+b@^C{&@LfLBsQH zW7KE)sBRl0qdsKN{ikQ9JrDIi=Xxi7q2qYz*6Vf)@=bPW9UQUhZSb;1qwDkMG{^~l z1oRl_UVi_^?HTS_JFZ^e-#e|Wzj3;nqV4fTYcDO3)Oy{DRQkLv%Z>fPa<0aiz=7ww zF*z?fCC#rhmwBh8Q_b4o_FQOfZC#ifb*8CI&t$ha#lV9psh4ABCR{ljkybPKT>6xG zUgO4getzjL&*z(Y@TxGo?c4O0yC1bV?sdk2XLIk7VPg8`l3rg@cC22oIO|T|{=GEI zCw2Wf(qU`k)gf<|2JMu&{I+g<=A0i(4IBCnkWX62zvp!+$!JGW^+=!XA%PbM&rp!e zx*)8P)Yn8r>^Y(EZmhuiSeNv{IVE%TZohcv?&Eu;Q#a@HeB}p$I{1cITZW&zFI>SK z77cfN!;QlcVSM}sx8Dxb6Ax^=s~2B2g>FD?z(-Q ziLR6#q9q@lF*c55nAn3V5OnE+{B=V=_#_D&Bm6)_1U}ZG?T5*X;)?iO9+Qo3ZDodW zC6WlZJ^FWRxE{XEmLd~`a##s%w%8|*tL;WAI_h?4E))sa=3*`{)I5}9uE!yah#tL^ zHFY!wsOolgAhZVbFt*Si-p96E9_;S0sqwFhe!JD@??S_JwlgO5CD_JYs5u&_M)O(#LY)R#$XKJVOu?3D;_) z>LR7?BSr}&#7>MdX6$2sV=q z7rnw=*G#y>JRDt2%@j*S0(3nuzQ7p@n=T&6eq1Q zm*e?oIist?fB4a;Vn-J*qj*IOXmW6Se26s642MkMUt0#;SS+NzJ-WXdiV<$rCgNBk zU=Mt_bDkFw!fx*m7`+5FFS=1;#=$UvyX8$7oCq;1gomn1$0v!ju%mp9Cd@h9SaZIV z$0N+e31ah5G2(g=d}%ys>EP+?v%9?p{tza%@4gGUv6WWu5Ruw$ms0iIB9I_3loC!o`mU~r+7MexH&QXT^uGL_l^;G`M8aB z^K6Z;T~yvaUUcZbX15W2f`uFE4CyipnZO+~+L4LX_?#buzI;JMx+0omLcaKRDp ziaR{QOUMbOiK29JnBmA(pbkNfsv%L8}k}PZWp}T3t{t4_zkV#)oUB zMN-^jK`VgH3#YG@4%$cr8+t+!11{VKDML&+h{B{;xZ*{KQ8N@U;1PQsDJ>BwVIu+YQuI1DO&5L^622@F;kD}qlz$By`NG&dTh)EnhSEJm5*aQP9C zD-$57=!22R#HK`d7k$`iy9==%6*79VB@7W)2quuM zQQLwjsgmVD+(;~C#0jMEMFniOR74gApB2q*s~Pakq4o(x9H=PlC@9QWXj(WZSBR~( z_C|mV#zhqieMh>!;`ot3mC%+caYA&6fEU4L;M_5|e6~P}YB5>wqoIzn!l1^6LV<}< ztzclG%?&D%BqB5dx*-u%uvnmW0^tLMYX)B$9Rk)=VTh1KLb!8d!DT3pU!1^ahl79u zyo5tJ3}b||cqpYL-7p9fIwis#%KMMOAvPso3wR8U2n0(J4i>~X7CW3wwUe(K1B4(c z2z?@MyAq<4LB-}q)je9Ef#wUHUjzrD2Z0%yW1A=h4U!bD7@??(gO9-CBc9UH2fMwc zm?w~s?N$KeC|sq;CQY|2R2^6-rCe4t3X=m}b-U#H!RSPU2QTRJfiREDIvm8Z z5H3`1F?=sn0~|Pjk_ZsHF|#CmMbW5sP$nlqbTkYu*d)&+*(t;hb(=%9ej?JN2h{$E zaDfnHR!X8Ld|Y9S335BsA~qEu#Vb%LP!*4ujD|5}?MLkx@kID6p^#q=SzNd}dZH$R za!8JKQ2VCy@7h78g_aotoDT{Da_xgNPqRT_rl__;Rc4W001NjE@QL_bE(aon8=X-L z00~OO0-j2alaN5hVnB!Um~N}EH9yzt%W2=xDY#3XC(UlrF()Yq3-)6p(4hDZrcx*NQfmU%U8LdsJa0z^cS3 z0N{Zfp&)`UX-iZnD>?#t1r&L!A7~u73upYv5sf~D6qjlxWbvS(a}v<^g%YH{Bnx`w zFg`9CC^4vBbf@e|wqVq*IUw;9?aWd>gaow}^&qr0=mCU6@Xi(h8z!q;2$#)*m=O0S zIjXN9R01Ob7y%;tUuan&kafbT%`^6a^TCDQ6}VG~G&B_Wkr;QQZQsD}w{k-%rt?ar z1}fB_=}?ij(NL8eaS#S86nddQd=;pzQVA}Q*7_*oLc&lD7bAHPw-2!p`ZOrxw)`NC z4fLzh4w(!^1wo7x0K0E3Xy{^r4zV3ZI5@Nu4C!paCxp)%24y5}=L+L~^l@8Ve1Y3? z8J@uCF!KlQ!hsJ4EsN@z5L*$$Xq?O9qcN5zs@seJ7f-hU2I}<5z83pM`1h zfF1}6%7+pT8BtuaJ+xJX31EYI5W2&n&A3MqabrPj;t^${$q@B)V8A8rMkP|5wP(Em_k=##e3)Zel%jb@3XejPT`x@6?S z&=U}`V!luWl`ItAs2B7}AZbu_@W<7IN{U%pn}keP)W0~59>pN*xUbt(7xcvmM2J*R zVqD8-ND=!*;($0BL{!9jK^y@&ER@4UwG>ge)rZ#ngNC>Dohn8|Nm?^TGkj5Wpmhnr z0U%P~VtBlbYCWhZSt1cDpWLIXK{lJZk&bUy{HPKWHAP z1LH=d*W&Na2Pb^0>~3>>7Zr*B|rG}ze0 z)Xdys$k1Vy!$(+I+t`k@8#Q{2Ju8F_qnWVqh{z~jG+!W$5s4+z*tqzF8PfuTrZa>8 z?mjbd*6cY+bLY)ZPFb*U(c&dbQ3V5Q+mecEnBy3-?4Mo?md}V zd-v_nK5+0*&fz0RbB`TAaq?8&>HITi3kuJjzi{!=?K^i%@0FF` zfAH{8#pB8+RZpKiuYOVUvi8;MH*f3Sy{~Wh@bOdQ=PzHsHGOaX@w0`j8>q2jtuxkp zNWwAeK?5up*uhL9t~y`@-3B%6A+=CccVK85iXBanyo~i~Mg*T5N7XwNHmdW$M4>MP zE`d4xnAAiwp{}L-6FhX{`dcgKgLoDeLfv`+nbk+4Dk%xpMzi0C#uCt zgpEvZ_>cYukb(v|0y5_b!Z>k+A=0oL5g|~55o~j$$~R{rg$h&53gxzYWdgYwTPTDs zpAXU#*85R|Z4j!Z{I*dH8hpSkC5*!{QG2JqG%Iv=yaEU@2!B%Vi0PuFF_6}EFcMBc zHsTYZcxs72T?Jpjl=6|v3Zy&IOd!Em1VUMx18qZuas{Dn*2n}_B9WjSEomk~k>(D> zAq*g?z>5SVLmH8dgbNjj0%Qi}hABt{8$5R*0j*dl)!iWyHYQ}ZO9jl6f+hO>)e<5D zOL03(F^obW#c_0Saij%=ie*W_I5zZ1=ueRVzhDw^<03!^htxwqMge3Iy&*ry5{rE8 zV!?7tn^|N`n6e2WS>!~9+*RU#=5KdvI0mh6R|&;G{QVl=0!)3HSb+a%HyvzS)-EK{ zWihr{-p(HRzf9bw{Hqcth{0gfn=mCTh;rf#p$n$9P)BcJKgd6{7=o}>QQWM!dGipe zqv_jsGx?CUnKHD23jO*wcgUv<$n2fq?gn|do1zP$Ky(E=B|@3#MyL>~gc^arF2f+& z0Q3lGk`?rBVsb`8PmgV`rzeL0)7nv?IS{0*1Nny!!*e`$VKCT$z%|?!FS4-38XlNH zfdP(z_?HD4ASc0a4TeN$VFs6D+kS-x7Kco*z6wcGScZ~-&KpVFhB(yteMRpM#EG43ZKagU5&cCu}Mhieq zfI3PhU)Ui?V3O>pKX``m3`o5RNN)rJFb9=CYBE$J890%^xN&WPLO`lbG)sW|fDt=0 z4E2OYu=WEc$Rwb9Kq?SlK$V5 zf<_3jf`1-3#W|R*as0D1LE9t~CH#Ly53Mo*$^`rQXC9SxduR}YEp+;J()nNgqnTNI z>_W7Uv%SkdeW(2s5Yj*O(}7NgkSRpUBKX^nu^fJzh)@;}^u~6P=z~M;7;VdNK{65I z)^sysNua()85$`PWQ#VFu+g-b8JCZG63(AJla&>F5=ptOlUg7Z2r?dEouJYW zV{nkxQz|hPgqq^{J*2^>eY{9cCtWJU-`2U6jZ z%ax#(la-zs!y|AWnLNA(02T;Y7(0*$b?uVlbBOU;$eG|#f0snSdA8W16bBt5|{QN6Ml0$i5o1*8^^ldB5{nQ z3y#-+0C}@mR5^*O){z-~GF=h7hgm3Lv{q8aIQ>0|H5J{k`TkuMj5ejDT&Pk>7EnM# zcWmEBMIGZfOR|JtvU_3k+l<~Ay`J>JC}-RkBauWF?9fV0Y+mZ3jq&R-9gJLa2FBBe z2Vi_|Hwfc}Te=wSqsgPpM)e?XKFcUD!uF%2#u$GN#aA5>`CrYj+3&Ii#;xf?F-qet zG3Er0z^FIQ8e^8NEk;#KJB-GbqcPsFwZ~XA))C{Ysm>S)i7UqYtH)uqKQSJo>{EA) zuT;G-7C24B=#l7)aoVX#7!No3W9(%=6{F|kK#T*+remC>&%&6Uz{dFICKqFwVK_$f zGlm&VGeaBjYv36W88gtZjIUF;nFo#)bj!FEfU)nO4;Z(reZ+X> z6Nwvce!^zP_C}2Sh|d_)48LH^swA;+=~rz2Z2k>n*fkO@rZ-`;q?Sbg$nV%pRFk-2 zQZqJt7Le#Q;0HF}oJnG*n{E6)&gGtobN+Q3U#GI}z*xg_RiP}LVR?Q~yLOO|k zb4eUkMB<|RBu;xlVxRXU4*E>ut0of5evmk|g~T@$LZmV%;`Ti!rD*?64r0K{F6hi9 zpedl#aIngP8h;`Efg*%MwiPe{|zFMkP-_DAQJLHVPR_O^|(C#du#QDz$Hcnc|ex`6B{GmUh3Rg^tJr7wvx z(@2L$e;+!1Y7`xfba?jk3za^5ijGFwIf;I;APE>09gVbeA^jpG3G6948foX&`bH&) zu5UEbbV+oEsPr+Y@MxszK)psjRQjl3>Fl=_M-ojZB+2P;XlxCSuGmySw2{VEI(af2 z`uqMZUVJGnBDEmt>3q`I8XhT5fnZ9~{WUz=BSk&L&9nMj!5*R!lTkh$44VgPiXv_K02Q?w$f?+nm#%l8e8cY z6di@MeMck=DLN{BbbK_@^mMt=NZWTr!kP+?DnB|L8fp3vP>}-i1UcPbv$4&%~9Pki4aF+@RSftxJDo5`k znD-&zhY8dEuo?$8j38Tq5N6r{<5&?A{2~U8B$LICAzkriGcgSKAZrp>TExSIOuRJ? zX#kM7HWL{o%&X%Gek#3UCM>3hi5D_;VrH~-#*8)-nGKv@7@$$4c(6thQsX?dcAok! z6sTYCb`QPkZ_wO_z6mwlBbAIofq;~T38ILir|TCi)P%t>THFa!H_-?^%^fHj1oZbM zh($6T|4e|1Et2VsUAn>?LC3%4#G20k`+vkAV@V?nTJ783)^vVJ9K-gVWY%+pB}4le3F!I}38&{Wpt0P1OG)^O4#f5!bYJ!wk}7ka}Uz(&612iqL#R zS+F1)X0d@k{NjI7+srJ3+CYNWv7tpWiUgYPWs5yw?;w~$g0v|T(@2LPnB2e)v^_;n z+fKveL5hS3mJM?W#b4KQ&?YrynI1ASJ(01u3T+89;pEc`0KKEG2O;Xx5O$>yJ|s4i z8V7>tOPNs@gy>KkMu@uf0VV{YU^PZ*7F@hbBPK76JG)s91qx8xF** zdY}oz%*?EVD$MoYQZt!Mh9k_^@ENp@mYha|mC$T416&me+JP_y9hwU$-e`;Vl;EaF zpuN_%jG2T9ltBAzv`a>OYpQ{kPV!`OzMpWE!qy;D#Et1SKa`FUW+QM%WQJA4Oabih zi;X~wjwoxqsgO3nTs@C|=dorakh3-vC0Q4-dX%PtDXq}<@6i8*gt6Hjt)zTB-A#pq z<{De0mqJB_rMr|9&Z2-#_(=JG&I|OWX5`}K);BUi$Vm1esK+%!N^69aS`ojK>O&@0 z5@L&J=s3|X4Cow%*y6X*8@9!r7L)3U#H5l@Y}UaRMTYyc-v~R{B0^pm?BB)eNz`+~ zc4gSu!VHCO7db}4E=&9}gDVo?7pjbqBV9n~o$L=W1q4X-ju0TWp|ga)5TZseu!>x ztwKmEO&CWSO@0te0Q)~cL}_(}`E9a%a6N)ZApq>!&J4l~R5ARe#jSO~0V&m6ZDXV? zz66pokwm;Qk@kU@CG4evCB=vp)3)tO*n<@x5e;jp$+9rTO$&C6pv8@B9!#0iUicsy z!tOO{lT;`JjP0nnZFJ!s2-^qzS6|@A%)$&O@!zlvv|1eP9}h7^e1_&7|He~D0`dTC zkt#sTxQ$IMg2bXI0jypVwT30O5i%-|4uu&M%n zO!YVzF^~)s7Fr-~V%8=tQ)b+zC`Y=TcQE2+Pm;p|`v|njji$hJ`;?(oE_FcDa4>rY zi!kuaF4=`sI0fE=1``svI$7m9$eT}g_gmhhQPq&wu-RM7un)1Jmk!F2lv(~ zZ>l0p(-Q}QH$Lth$YBGP{z3Qw%eGMibwI`MV+Lpvts@5N9hDavE#Qx-CpN?Xe#sDI zg-^B$sC{@B4djHjXrFT+ggv?FtPc{H5Jd-f)H}d`daWT^p@ zne0D@O1b?dRMM3S6?cm8-E9dz;+as;aa=BpBPqX7%Bb*c3GB(191H^dnif1n{JjO! zNhO^@)J4tj&wgPL_b(i&4W42Gv!LIi$F_fuo^<|8^pxYDqo*8OqsOCWTY_R69dkmi zCxVhg%Zh$WB+VzN!Tuj8LRhd`0R8;t7k99z3#DIvqoA1==7{YLzfn?sAh|BLz2P@X zn(IM|DfUPABdGSlf*GhSLg+~%r64bdCIH_6IZ#AMB@sMW@d})-V*w#c#0139+lUFq zpB^kpcc@*ow?Ee(+V#(0kaEQ;d`RkFLT>%Qe{n}b^eGY0kzqScFT~K&9yMK0u zgi)n|g=nguwlE`o6BcF`7J5HCOa%1r6&h$PCqjlx0-t0=@HNO-qLBT0CYC@iFq zR(!WAwkd^lt74kgB+-gxG-^c;tr(^?t#m$V#VxIvr87n=R%yj4tr%tC(1=#WrUhlD z6_fUq`M(7~6+|$F|E&R3K_pSQjlx0-t0??$6QJ7qe|fI6je1n5N0oZ0ZQp9$J3Ku)Jn9{u zo*f?8*!wS^-v8$5^KTxFfAjSH51)Sj;iLHA%NF5?L~27QPZ-e3Fcfhp)q<_f)91A$Fwy>WCfbmNCil=r zHoRe75)KF~`MhWHRQ6bm|+0<`N`&yT24R7St12FH%hnqiUqWymd^RPsPu4_awB z$PopE4tQ)m$n5u|!Tt_IBlF+wQ3?%>M*fow!R8Qj&Kr0z>rkw4uoPO2K0DXU2jwV;>60P#|Y~%=!J^m$#F%%|I z*q=hWuT-VY)PgEnYl0{D!Hr)2*P1AX6FL=@VGshE;3z!f#AFN@#jr4-HrV~LRi4^5 zPwha(3Su}zS%f#r{}v+BUYm|a+nJbT;P`}22pjEl7fM8}G5lMfVkR5T%o{a|fx{6q zq24o5=*-q2a4g6Q6td(NDQX`e%;X3ehHaS}r8k2^Z~rH^?=i75947J6c}g(D4XZZL znVC>!6VS(o`ik?3=8UMb6WV7VCTv1F#)+duiGis(*bR!$P=vEX;V4pexX~Yp=|s1d zD1C+*Zknheq2Sv5qQj*iXK>~coVR2Sdy3GJA_M?^YNUf7(+5rr_VOT;Yy$fz;Q(YD!n^d!@ zpF=i}uXDPi7jkJoY1QGEcF$jYfBlg=8Fs@mv+fQ0>}E46^ON%r)!jokuTQ>S>mkXMB5?69}J2SY=F(Ec^Fq!ub(+8WHA)Ou-jB6>s+p0b{@0{rVKdjX>l zF%$4S(F=a#XfN#$`DpuN$#}{>mH8<%Rc3)q61 z7Rvu@qD5vQ_>CbJ$V$5g$Xd%T1=A?m#l$P@GX_FJWBPtF{XjTHzch}*wgfQ{Y|)yd zj(;RA{aXlBO++ehu5TN|n0o!*VFB2dLMcavT{g^%70|*`6-RRSiyL-^vZx87XCvAJW`AvZaMEC6I zTr}uU;M8bBYD=;wrpT7+eA*ms#TIC#tQkT3OF*l4U@{1p8GSq(Y5BQrvx%XxZzR!7zq(KV^-4q1&hxX9czeJJ+A!amKzN|Fvv9UB z9G;8NpUdHsZ+_u0ErqOHMG**mQLlnuq+kcL5t5WZYEc^jQROnAZh+z36PKg1T!Fqe;CAXF40 z1Rz2|p+Ha=Cq`9!{8?E{;bJ}qHN5d88zgQ%~^auTpJ7v33jv?zF~;gZW^z9($; zR(HM&{JC2n6%997-VKYQfO)YzNe}daP^};Tk1y0Xxz$a^F$UgO7qqk>7bFTHjG<&A z^iKOgvrK3mhi2|jf2$7v(Hd4XW7iM7&MxaYnrvWw`rQ)(5Yi5p?bBMAcg~7w*e{x2|oNq0}aG1gHTb10CPD0 z3kI`{W~Y$>x&*NVrXYEXzqI(d6U5h(-&!PKXL#ycOB&#!)8AV10n-3W0k;9Z1#AXX z)&uu5-&#xnO99;h>80P+CkAfqcUeQVJL z%m%arECTcctOw))-ntCw0hC<%){+aD2zVXv6X07wlOjlu0YQXcgY*DqlzeLm1XRBB zttA=I4saJ>65vI^qPyQ(ssT5YLi|w3S%6xAMSzxo)qw7R`S&1xWDl4OXk7;30J;O_ z12*4>Z~%KgfN+c;AAq`mMSymI`41r-fTe&Ez>|+4AAoNGa{)OOP#%D(fS&-<0F~i` zYdnT@044$k0-gj+1Z)P(0Nhpy@dGM6f%pOA0p*OLkpSufGOE6{I0LQ(xD|r^6E?`<0@DG>`$OFs= zOa`m~%m8c#%mvg^Xll6*Xa`sg7zo%5mChqzljv za2H@8U_M|XU@2e*;9J0aKsjJM6@XfR&48AGT8fZPKs!L#b3_CJCITh`rU7OEW&`E} zUIeTFtN?5V`~;|_)YPI3BNsbBUBEyZ@WT0ZeszECTtQfUu913KZq7kbEDG;$@)v8fCkb$CssSt6%s1>6Y zjanpPk$|ICts1my)QVB-+SR&7ozza8b@6Y@G^||kJa`L0+z$mQ21oSb*GglNQVG?Fw7Up09&cQM)!y2qY-(wm7b0`n}FbYF32?t;n#$f@bU>Rm% z4d$WG%lKgkF2N|Q!6fvCC=Ua$0K>2hqp$`O(02;shaor#qp$#zumrQP0t?X1TfQ>% z!5R!g-{TlR48a(T!X!+>49vnDEWkNfhGkfTRp@&>u_U<#ID z7S>=M`Z^dt48bKBg*BLjUe3a@FaQfM49hSIYcK(QPhk8o1Seq>7GM&VU=~(j0lIrB z4}Gu(gV1*>{wE3g3F z7g8SjU=0SL?{vlwLof!TFbR_|1G6v(3vdpWVHwt775bjU_`{Tkei(%zn1ll`3*)c= zQ?LxPum7aoFT*m)`AjPU$5tS8vlYNjP#f<={9hz#=Tc3ar3Y=)Rouy%_TYgD?pDU<3}s7)-(>bnT}; z(cv8IxQ2RQCv?A<^3VsvFbD@=1RjEMI0aL1>00W6(d)<`EWVC)0t2rnfBpCmkay@A zp?x?2Q!om%un6bi!W)=x*mWcAzl8Hk=!Z2JhO;+O9tPe>9^oR)!|2Vl3kz@wy5B?| zVLS9*!FXT*_QNn-hEce3kUYTETj)RRzLkE+^CbO;k+(Abm(uUIvH!w@Fa}pf=|3EM zJLRD3Hp)pHR^T#p54c?Ucab;feK&c76Yrrt7=0h*U=bGK0$hUO_fzg=oIiem{=(5a z=qGf2kaExmGcXPFu>DTvAI4!7jzZ7Ni96gD?k& z;2i98AE{U2#I7TC->b=A+mU(%F2fk?JMl<838x-;q@IDZFbAuiBlQyWoOGmKff?wH zGEb)*sfXdz}PftBkAAs&I+JhdL zfw5;Esn5amS@aY7_8h7EU&}l^A3vOfad;S}VA~654|-u9Mqm*pE~Gphhn_1rzC%Bp zhanj2W4y5Ek|Xsr?7xh0!ZBEc8Mp)w!5UnKz9I4egRll8uq{G8aOiUS0|#Dmq&^8F zFJl~V6_#P@mGomD<9Ze4;rMGP2Q#mweK>X{?Za7Eg!6C-mS7D=_R;^V$n$>k07Jv% z4W_Q6Jy?Q8xCocv3ar6Z=)Icp$C)SCem(8NJWRs^oP=|*0E@5$T{nL7=<;Mgn<$IFT9EJu=9;a>hArF69!=UX8f@0P55CDW?(PO!3dm#c zdkyRNowNswFb3z|MZe`aoP;G5Vbv z9H!vFos@?qI0qvc%ELM6ejWYz5ar=K48z{LC=X{~3U+^(@^Bi?LH`8hVHUby&wg_^ z<)I&jU;qxl98AFOkB|pA1@o};9_9^p!HUG8JI?+AeXth>VIPdZei(y;FbRiX1`fj< z9D#GNFH67RBy?X-zV4-8FbBiJ`4i<6Xt ziBHikY@1>oz{$_hp6H)t9US1e@;UkqCt(zJ70+(*P!aQ6m(0^F^CV9Szb@VOPKb-kC^A6*4^czP1 zk#=C@d(1ZseV_iqz&!JkU>*E`enRg*u}+}-hvW%X;T&8l(l1#05&e22?fjU2!I_`X zFPJ+_zhL&C=@(4@lzzeF&*<09^rJ+-VD#to3--x35muOI;U8IFx57U$zrz1yeF;~{v+&R4S@;+7Ec`F> zEUc2}H^aY@XW`$-GxWhU?D{+7fx%VA1A8B0JW1y3Va5Xok1!q>t1%upvc`B|a-H$O zu{z^{8CR|Ddke2$$JFX!m^-#skHeYcYV`~(x@+|UT-;TwFTu+3wYul6?1v}R>VDYP zR;x!~`y*=g1nfAmR?ouDN7m|dLQkz;5uQ}5d*236uGK@rN7d>v=x?vp)3EE&wR&Fo zm|DFod~B`m9);doJs>=#Rv!>Pu2xUNz~gK6N!Z;{s~3e&sMV{&QyI_O;S(8;(8qX$ zr!gKFJe~2to+mLLVJG9c4L-S64+_ty)uY0v)aogrzgEw|&{J#m66}3itzHv8o$oAC?JVf-+1F5`bE>psZ% zg?kvk@I1yZ+{^fdJ&a#?KI4Z2&u0AZV*a1Q_=O?HFMKZJ7hb^lh0kOB!d}MzZuYb1 zGk!Sq0>&@Aknsz{j9+*W;}^b=@e40z{9|%{#Q5RxC5&HqDdUGJn1}tBF&-F&Rf$8- z?HqTZACAHhOv3?~fpM6HDL4tU!po^o^p`VUIQ$CggCo%M9@aJV!xRj`%&VvmF2FeK z9HKrL9Hu^4gaufFC0K?P*l`{8y_b2vnR?&=9DrR1DG%pvp&xMg&5R4iVHJ)*PnvN- zKWuv!<)D9z`GSM*Wxn8OnsouQ?<4=P_I}E}kA8lD{KNKf%EA0yv8e}LDoFR?CQD$n|ZhrY_XfP-Hr zA8-hkU>sK9H1v#fT!#VZdVun<{4M4iF2gkJ{|@D0*Y_v~^FL%A-obuXWL$9QNAwTQ z!z4WXWAXu$KcO5f!7^++%=&_Z(Dy-3n|?}raNs{_4=%$D99y9rjQyE%u9sXfK_-1dNLdbp&!n}5G=z1xB}zQ^*81Rw!26?_z>+sOg(Vm2zh{gHTnblVFr%CJRFB5I1Q_C6?*Ps-K>*;n1W$A z2BR<)Ty4LD7IC;ieJ@8@Leac!r0+T&!^(5>(f32Q{%gplM&%u!o;fJfxdpG&{@LD|tLofh6!Q-$5T-{M=eS3|CuCsXx#zticpaRwxg9 z|49Gg6kLK;=>7!t{)v9Wc^H9Z7=tS?2?Hzi8wTMV9ED4;Moh5e6QuZKQG zzF-u_JnQu&>^^C|o`r{C9?qY|L)9z=7wl*AsB+1(bv7 z3)ky&Fc4m^S6~);rkK}@*6RV-ehKYC-(~CdG_1iK9Ez;hi!gmT?ZC*3*XzE|P(KX9 z-hSGH9WS9hI1Do|1M@I>#d^IAtFQ*=Uru|UrQd_=^#}~X7%ab%al-7YC=ZJ;2a~U% zJRH1|{(g@68lpTrxQ}*W1twtMRg@DQ=Arv)@&nso1-3(X&P75g4@dS>9`;;Ac{mHR z(0lEAy#Qxm85Us`x`x;5-utN!2H?ay_ks0# z7*=5nwv8}en1GXT85Uvr4de-q-blGG(7&5l4{+v<%nvNW1njz*_TVhc!xAjQ;6eHe z-S1{SOyhe${e&|gSg*%nWSsehV{j6BGL(bm4^s{X@1flaua`51x_4*R@e~kQoiT(BCtP6N>lK#S(Pp#Ke(EDlf3*Da~Kd}F^X1@j33*_G-=jc#dFgkRf1|`3w_eco`)+Te-#+Re`^ZCeqh_hh z$X5`30=-?@SDP0`cs_p5L-n#LLeXRB!&!d!V$+l8z4ty;@3qyF5&wPE-{hCP7zavAYSJVV^|v4`qCfEO0ax|4XGcn9&& zaXi^bcQi*tQlHX-XJwwfTQtR^W$9%+_?I38ZV>k8meCnb4h#SFDAO42R=vM&W z=x1&FL({s95FaNVmNMr4(iD#q&lB&{%%6-aO}zZMhw6td<(XfeOFnbN(>d$95Pc3k zh2F_ya}&L+{8G2M$>S>eIQ|6@ZT*yTUh>d)zjc3=asl)Z`iPY4v(Af@ix5u}m&fMj z=Rpj841HSW!?JE=J~r1=8ejYLL-oeY*y@+|bHv+-C#CX@esAWdNIXWov2Mh}CgqmU zQ|MI@Hp&^RQu^xN#WMe*YF9jLQqHIJB@s5(g;7r8A>zI-vCbquV$Fxdqr|(3AC$OR zzOi>0^(Tps5$}??ZJkTMvc$)U8!d{-Sg(2XQS_7u_I!%Q=I37-Um0Ia1iMd;&&bkG z_wnpcUuJ(2-$s6nyh_|pJV-q35Dya{Bt9u|bDZ)zAZ7U%BVHyxEb%S6B%6m?rzh7}KA0Qqg-pG@t{s{3whq&Y+PCP-pktc`a zs`xVaR`9iLIZkXIk7Rum@E!iDb=^w&lF~CgcFMQ-YWPB5<9DJhe10Z!5#OxNCmNg7 z7r}Rs-z9f$({Gu_1iq24t97}tUJ%(NE(<45d`JXMJWsqzT#og+<4{Sedcb=8l6+NE zIeBbu^4R?d@=blxwz-M!Lzi~!{-E-kuMJX0JQ4II{B}KtzJPA5ba5O0B>G`ZyBYM7 zrrn(KH}^+!Fo#~m@7nJ5?w-drUh$-C6@T^{*8a%YJtvZT^v3*c90w$s#=Hb1UT{91 zMTl33H|E7$-;L5n`8e@a;$5Pd$APAJnz-wm>bz}Z-Z_qNek1+M5f2a_mNJ_CuSh&W zJScHZ{T1T!JNQOiGd|BF$qVtYL;V5bv&6?FzR|zU{6vU%eM{dz$w{2}2yr>iIG&d@ z%~KYCgx}R0El8ury2+!5&}R&(@xRnxL@%S4czthGzvQ!qfAk0ZURr9f(7X z=KNXWYLju5q#SxoG~2i&UM1e~(}(K)&2f3mCUNgcEHB~{B5cg_=6P+9_$=|9#MLJK ziJ%Ys%zFMI`H7(?&_~6)C67(7fB1^{2E@0~@6FfY9C25P-?dA8vtNzn~b>;1X@JWIymJ(=@i;`VhHK<_roHEJ`~V_5k+lqQgRqUam#sF>&p^gc~J zY4m=LK8Zf4(F^E98oh)*tkEmzBO2ZPDAuJ$_o0t!^dS0}MvtJ6YxEfUgho%IAJXU< z^eK&=L!Z{@bLcY~y^KDq(W~h58r{QEJgm|E=nEP>gubZJ2hf)_dK`U4qo>eUHF_4^ z^$Y9yjjW$MdYeWsqPJ`GCG-xBUPJHH=-x+j9M|Xp^lptFMwh=g+Oq!9do_9jy-%a3 z(fc*}B>JF6FQ5-;^b-28Mz5fcXms~uSpOQ`hd!#&gXm)#J%T>2(PQWn8a;`ANTX-a zr!;yFeOjZ>q0eaaGWx7WucFUubkAd1{~FzozM#=V=!+VC0DW1b$I(|bdJ272qi4}w zzuda6^5|_Ey@=kf(U;IWGr%AJga&^l^@CE=zjDCjUGZ@)aV1~%Njk7zM|1n=&KsNaSK`gZqt5T z&Es#==tcB)jlP85q0wvTof_Tyc-Fs051@Bz^e}plMvtQRYV-tppGHrk_iOY?^g)eY zKp)cRCG=s9UO^wx=x!2xP^0_MM>Tp7eN3ZA(8o1;41GeQC(#dS^bGoxM$e&7YxFtv z8I4{>pVjD9^m&c$c>?QSqx;bpG3iqqZiPJGM+oiL8H(?n58d=t1-`jUGWC*XS|y35}jaKcvw!=u;X!hd!;*=g?;~dKrCI zqgT=AHM)m4hle$~AALcihtL-_`T+W}MvtSfX!I2Nsz%SEyMCov|LAQRy@=kf(U;IW zGJ^eB3-Mo*ykY4kLDzeb-#AJpgt^dXI2LLb)X74#8} z?mnIMuhD(zqZ&PkKBmzl=;InahCZRuljw&udIo(;qvz14HToR-j7Be`&ua84`n*P$ zuf7j!bU*rnMh~GcYV-m0WsM$3U(x6(^i_?XMR)yLv;NWBG^a}cjMt47%^{>%= z=%X4vh(4y#Bk1EAJ%&DE(>e6Gl1lI4@#g1!o1Z6ViBD5b7w4^vt3Z67xLeArP3kS7 z=g~(*+xVQ&s5gLACGIZsx#|u+hnD*MXYkn~*BP0y)MtDiDxW)siOcmz^uMRzLl!n^ zH;ymP^+@s}M77_`XJuTnNxfP8EBIGM(D`wTZw}uq*CCPLKREeh92My&*C`q2@n&8& zzhCe?h1?OJw8k6X*h={T@xkA#&y8()m3~Kvj}f1h@@kWEG4yV(dvZ{;jn8$B`H=pn zq#W@fiEq>^{o4G#PkcFivn_nmUJ2hSzLex+qdntuWT~f0Jo-oVJ%;^rV)cEz_wWS)YB7`Rb#wNq^4xa@%Z2Q9!6KOEMZjJGD`AEqDT+nk?`{oVI;ULTLB z^{x9rz)-Kzq=QC;5DekRAy96s+lK0deG$@gbv z{IN$r?6{xQh_^rY@5XxAm~Z2_B>9!=Y{_+H`gm+^qRVx+is&IzgpyCW4%Z60SH@xc zepbrK^|_WF^RV%~8_#8va&o;c-(&f^b`fmlB#&|(uYTey65leP#_w1pKSg{UTrZ~a zdZF{l@4loCHmSFUe-VFH1p9u~bR3lHgbkeXu>Jc$%REZG5qxv_I;B3fiOz+6T}AZf ze2Jb!U(xvGx?`*89$@OxY{gNj? zy7VU{m29qa%lI4fk8gnMkGx z^mcUPl|(8x@>ND}v+2^$D!L0jB1YSKllr{R zIk+Xh`T8aKOyC=P!o&3uov-P2HH+`?sSnqOqz@bWr}6qD>%Bm{{fS(M#LW|@-#Mz^ z6G;E%`f&qK;<^z$w$~?d50xGw-e-v$S&+QQ_2%Y@H|Ish6GopwACYp~tdBT8S10Rw z%eal-cch6|i1$l98{;)vlm6w1%XM&~4)G%KLE^getj73>PZOWBl;1orlI!CQoI!r1 zf4cKceitm`4dR=43fJ3^dbU}|a$P;YpX*tOPi>M%xz64g`j9C?(dD{(htV7PS25A$ zI()9DZap5zb@{6J?dx8y(^o<7R&@xZ9=UFxn|daibjgc*H^*S~#_PUqe~|I|iN~L& z;#-cZM(a|KT>mff^oMQlS2oux*8+CMh|eF3xy5>wcC+|vXY#rr^RdmklIsd~ab2TX zZ1(+G>XGXW&J&MIe4BbaXURUG_Fr2bWPAbQF|L=?$e$)2AzmilSbt_a>bz9?5hotx zI!l(gc^)i%W0QHx;7j2fX(+~jEL~Sl>PIgg$CDQMlX}Yd51wOPXHt(`KXDd4wS#&Z z^Up%^p2vP8^R{KZ8?V3Oi{P8?+5R{n{gUf5uHy6X*xV$)Y4i&E)_F3HuQ}p9=X1Q1 z^{{0f(hv4e8D9xsit9-GokD`x= zs5xFmi617Ovc!#grF@cjkn37SC2sap9hwa<*~WR<2-sCy*Ym}?jrgS`q1$_ z(H_4Zv+@2~>Z#!$ztDO-mwLSCFfR0AIrQ$Jo-qEI%eMA2ik?Srry6sUb&^2Op)ZMO z?kBeWK=LEk&m4}Z^TLh%ZsxB*JWIS^>aoS8oigzv@u;i=hjlCCb)Uc{&L z?>!v^SttAp;_tjd|2ir00pfiQaml+}&vS&h^wHepv0UFXiJm@zC)$1(uRnQwi?3u| ziBGp5Aj`PQ_(J2C=C*xS_pxJ`fM zdZQ(-uhp1`E&VlChqRx@*LEAPmoi`4`Zmwsy;utP2i{>_cT!IYy&wIc9Isl`Bja%I z;r0BT57)x8&$;tSwg<@#US`J{ch9_rv2?<;ngce!5b++AExYrAo)d6Is}^;G*O z*#G7DZC}?&vPnP6_=5Lv+}_2L7I~0-dd}l@@{skoBDx>FfZn_xiylJHqc@)qiavlo zY15@Yar6xOvJ}|C`pV*uaDB3V-MpyxXHxGRz694TOYLC3rGHiY^EqBed2DW?d-k&b z-Ouq;+Ebh8e)PG|Z`DKSp6Q1zzfX|)8Blsq>e;g1jn^ruSFS(X@x`s><$AQDkKu7T zKFYWY_zvO=?=YVg{OPYhZ2W!EHtWXQ!@7B3XXgjfk1+nRZ?b-6owb-(8FvD|_q*2p zR`fJ_JGzI*<|g$|qPx*YWxYGCV|9F%{Fm?#eDC4Qg2p#OV;x)(evm@jb20_wdvB{5_$?fD|z2$J;?QqSC_f|A~q*qc!aoI-*Hv;2TecY#7C=K&#}=H z<3CyFDRl2&Iq#8jE!M5fXCD8^D&x|vAER31^$%bFLtKAzhxK}%%dx(Of1BgBkNV|$ z(yle~AZ^*kDe(c~CE|_!#P&Y1G5^FX#O?EvMlYi`uL~LPB>G|W#^*}fm zZKBKdz#|%6t`FX0)1@7EFLR8ZkUBQnG4^}Ov!D1u;*IZ9wCiv4yj=1)ApS=lsk>yq zv!7=)owp_N^`Ff3-eiAkF@I8T4!^hk$hMy|N`FiEqWB_GuTy`U-p{$8Pk$ecAN%I` zH}fplJD()(k+|9<&vO0q35_n-Lr>du>AzebeH1+`>s7bDkj0n5mwhbz(J?%+=MPCX z8D|0CocBom;E6nGkw2N|3VzR%Z1apRo3!hB0o4#+6~W%Fnl~AD5MMXf!)@LlMUS9& z+H~n>480w_@qT3o{mkI6oT2Zhj3ZCH_bJYCxgLCGn|PJ@vO`?*;JuK!^&hFv^Vlg5 z(vJ|nGQP&T(H!Ta#M_>_HBSljRmwHzPwGpfFQZ%bL+JyX#O3<*A?kCA7m1H=6R!|o z+9od7x9@qHbNvD0>22Z>;)~nFzl2G$F zCy&`Azn+W8&o*(nK7aU3u8%1NH02}2r-+X_#N))tY0PWg-yuQNDW7-p)X%>P@u6#X>)_=4wh-CgPLHs=d-_}cayF+SJX##g~N zy#t^3QkpyONPVhByCHlFdwKsLKJ9sy@p-k}mm_>W*I{l^UmD-cv(-4Z$x9yJ&~uI$ z-^1B`bVWAFUm4%w7CxD8_hp<Hx0`=_BfVUQ`8W#c<|}J_-sRFKBen zi}+kvqsx6;W;A*TeM+Ot{avKL&Ha&iildLAuSh5D>$d6lMj3ql7q#}u{bD*^*xI** zZ*d1c&*hBg;@0)a{bxq{TKl5-~G!A0en+0Y3)nm8@ZyjPwvCh^U~Hnxla!l^4)gaG7hkWv$x{;~RQ;YhN5+*DG54GWb?@;49#p8Ejpj+<$2Fm96vQc?pM?SGD#9 z@pZhqb-Ph~i#zb8@J&Tq*O$XL@|xDZ626|-w)V;Wlw4P~9-yX;=Y3hCwJ(4#`Nq~hxj)&!&8>Y&e1SK$_D$k*A8hTD`<|6|;FJ5J z|b)9wfLJ``%?J&ldXMnpSIvzTKh`) zI^NpaSHoA^fzLn4>)6{`*B8MzH`>~lz&G{w*1jyh^lhzubNEKy(b`wRH;`)Wll$29 zytB10gwOXb)<=u?wK05c?{4i&`!rM(=9vOW=!s zxOKZ(e8Gv%_Q}@uh4E!S)!G-wm-uw+@n-P#Pqp?H@C828x?Q;ssQa_6eV!qXlRNMQ z@#R0)y1pnrxlduM_aP~KvHM$(H;1qH^R0a)e7-NVZnuW7x&xnoAIHh**7Zg3O@6Vp zFM%)lrPjVIzJV{d9`78!?tE)s1)t|Dt=sip#c^^6z7W2`S6kN?!wKWK&wVw=Nqnu=X#ij88?Ai<_;Q8Tz9hcXH(UGU{T zymDXM_Sx1xUyS1(yn(u>qeu?eSq6FdKle>?m7;tP3n>R z1uxI>dzuqW)%ad2fj(b!%1e90+!#N;_>Z>sFOMEYx96*f-mmd5q4%P<+xk~S?>Wrx zkv6;6$h+LPIELQ0YcnAI37}_w&ijt0gvp%C{f)w#{I0us9T1avoOr%Y zdy)@ZT-r$!pXR==L-x4sIw*O3%g1nkM9OGCALj1>rM)t~EccD=5uauq)ucbfCyyuI zB43U5e;uDob3fTOsZVXv?=X4?`k+jp?t3J@S8a?RU*AdGCsE4Ue%~PTmnI%3J|y+* zpg#rtS?*6eA^B39)KfzDai7|x%vXzg#P5C`A1gnOamhN+_5be8zZ;bC2J!cDf7@~C zr|$DF^>>(2d{aK|FDdozpdV@c`P1q54*doEt|xQ9$sPD5{}ucJ?w{)szh*snUe7#o zKi$Uuu!B6veaYS2Z+AxObJ#Cr-Nf)^&a|!@(Ua&YbPtcsP5LSKH9v^nSSMyb)%PP3 zm;0S(x5SNADfP;I&}VJ+$~?<`(evo#9j-55oN42}z|H-WdgMOp1L(stZhf9MuCpP2 zxzBpKyH;P=LA&CY`>+T1aNkgQ{b{joWV~{p_VH)4Zg^~Nejb$3tLTm2EjJ%;nvN%u zPr0vq?KzA`3UBy~d`dlXzxQy6`O_W8r3{<&BZAM*{fu)B#rRL^kD6x@)>G zREqct@t{L|lDPYVn&x-)#@|KH5f2jYk@?u@Kfilw8fSn|1z(u^D$hx$TI5wMa=-e8 zDEEQoaZ|mGzi5}dgosC8Q>!16xb-;D_!A_N8{@|}hp%k+;gWh%#7AGtaai(et;e{J zoT?{>uXd$#03PB-jbbwT??*4A z&x+8TU!F?2F!AI*YdINL6g`gKSXWz)4^6NC_;R-LQg0SLtI_l5X><=Ba}$3NeH2~B zYHp%0p{LO0vAN0Pn)2Iq?}#xTqlZQb@dVHl)MMAf=rT^b9z~C#+w++~A3)DY)t0Z(#p(h|9SB#OJQr zI*t%}9=#Kf{q$I7dWBM7m3WPK;1r&i_g!=NyLidB|0doi9OC%I zw9ZHvhB#yZWdqkQ?>dikDJ=%xNiJj zwm`fJJU@$I_w^j(*AVwFwW_a)e` zxsQ2G1Y7-*#}M(pzi_;j*Xb5{6n`B5;lFbqaaoux{EhMBPjH{{G3@r^wXCZG@iOto z>$0VPo7Y>CdY14N);JH9dMrNU^~e22UgwB+$^K-CTk8pkZ@pIUm3nNiA2PlO@m1nc zIrOwxr{YiI_n)v^C$D9MZcvVLHr$^+<%(K<|h4!pu5o<>v8M4Zd{*5<|Bb`AJ7*JX+!s+k9KqabekST?>Jl46FIs4UiW>+O8pV^cJu|AFSUstLti*&%{aa^ z=}GkN;95Oz^Jm1rhx^K#l|Xr%LtjMiZqgg$M_)ycNIq6p%y<5{x z3cVA(`8Xl|EV@^t=h55HJEVQJNnVOdm$}lNk5-6#D7W?fuyH)`FtNU8uNj}e+15v6 z{)tDQv!*#8*gS5DZvbC+Xsx~|{m`Bd8Gjd&#J7yEB)%={*7*EU*3%@u%yZZ3AvaGP z*OQSAX}^U3(DQkH(Vt&6UWX*VHGIK~ta@S2;{7eVhs z51AsA`eNvD^kva4$6+JyDdNEwt{Lx7nvZYlzTT6>+b*`ww~Vu(^ggNAT8=xf8=Lr+ z@O4S~Ew2-s^CtcDyp`kHC2Pjtp_uh$jlD(U0pcUXM|r%t?l-Rw5g~q<_?*Pe@{RLq zW1Ml~iA(Lrm(B8N;sxRnOZ}Ve=ZG&8Z{%0AK8wWNm#rCp7i4LFbDT0yRp|#ld2DX- z*z-2VXVYc<`O)L(A?@*%b=q`YS*d3L|G|j$I3xLsqYtAyjW10+Lwo}7Mtbaf0X>%bZI}1K7{U-_O-_a>-EU8_`_Flypg=O@JqWz{OtqQ{z$t^=(U&b zydLk{`TXH!)_No#0rWijAdffK_hvpK#4E%-5?7m)i=i)~%f4toE=#{t#FvO0HHnFF z^Dm2DLLYq;Peyk5{%u#}j9oAC@4EbHyI$NyF=>B^dOZ8>>uz(ubKl1CjQBj>&2eqU z{lpiE7bR}4EA@G_^gm2Iea)KXdX>^{6nzQ3@xDrP+)EO#T&w!kJinBcdCL;-80P#- z2GU}^iob|IGtPPcBU<~Ve>ME0ck7RLGFQHLa2`f{;2554d3`d@tHc+^=e}qA^K9|O z@y+5p#AEC0z~=Eu`jx@glU4cLIF1>sL*jYjW5f$mkL@@pc_;5BoDx-IxSET$#KQ`BOjrbDr1xtK0 z&%SrE20y)~{oGyZ4-qdCmz;0bzd6n*@de_`61U_@=3(=^JcVy!%C-*G>!#E_NqmL) zPVehn!k@{pU&*@N7`M?c$yb&5?EPEcAA8=#>o=w5VV#-yfg=$f?ib#>ggtWV?f z1K$|^Cl0n#G@4W%JBJ`X6NOMQbfeoKF>$JrV_cft8}&v!fXOFSY~Z?tdBpRDr`@de^FiEEEz z*6Sk0@lSq}W-_&uM>X`aF3K|L`p5_u{wAqxCpb!8iCF{qaol z>3I*wb>hwQAnVDGopZF>tGW90)E^2 zX}mL$_#E-*ch!0CM!UwUkoXeuD)CV{9v$1OPNaCuCS!HKmtlR+I&LZFL+?kAig9E9 zH|q@%PZOVz_(r?3KN_DyMv1$>Z>?AIlt6c(4{7IX^L=j?U(ftn{jhv4yWQvIyZU^R z*CPIfUvQpz0#9138yR;EfAKeK^>OKk+GO0`G{@I};d5Nkn#UzE{soE85D!UQZH@Uy zFQYfEE3VrgcJ+B1ObQ+mg@CXZJ|@Xg>0OMPlywcx>m_ zQZ7h*rKXODP30o!;WgX)Z*@K){fZMW6W{JQEq%%0E3Wf;DyKXn|CjIkx{MCVdMQ)S z!sFKKVaaQY&AYoiN}b0pR%s~yjc2?A|4<Pd3>?kC(F`2Tz|jmG&A`zN z9L>PaGf-EXS;BlCe%Xbl)S#)G;u{{{*LZlF;`t@waqN%4=0$DC%=+ei-o zJykvLReY)90mU)J4=a9B@mq?&R{WdduG3Y&70*_Do?@TkYZT3XyiPrTo8p~{pH%#^ zVo~w;imoRa?LAuY>56+5FH+p6IHLFt#k&+gqxciW-z%QbY1DtZ;zf$DQ+$u&1B#1^ ze^Wf;$wv7-iebg86-O2CP@Go$zT$5cR~4UjhEZQw@p{FS;-q3;@rR0kSL}F-QT}Yj z%M@=?{IKFbDlRLoDYmQq>NLgOiV?+Y6qAY{Qv9OgPZXCF|ElPIs?jg6;?oq*R~%4` zDc+*^A;m+AUsWtCp7HjJ8%ylTios_X?Hrykj<52OnQULU+|WJ7Q`c<&CF+p@(>L5_ z_$)!51lFzfjX3xY*?edA>+74STkE^>e7*0G%@Oi*9EA`_w^Jn@FJNS-Q`7>(U{C{efwcXPke7@tYzGpc2#%;dy9DME*to2>& z;5%sZnd99Yf$^wf^IhXm-%y*i-CG=dC7aJ&fA)6!9$~F7>rh|8=KHLJulq!6eUDMc zZF{?uHs6B|_4yxZt?$XN)UW$Vn{U~nzAlfoz6%_VYg0B~{U!Q#J5RFKcY@RL)aG+K z?)px)*5`EG&Dwkyyj-%>HU&`kDyhC1AZN9HL_(mRW zZP#g?uGoA|>vYxTGmlsHb?SPIwcV;i|3+=TI~;uO$6D+Aor5oF^En+isy5#)hxw@4 ze5W}0++J({&UWy5Y`*;tKCjJpmxIq|^L@_2=ePMDaPS3ez8^UFf;Qi~9eg31?{`7{ z{uZ|RiVm-rzEiAu?l@0h-$CUw*Ja68&gr;x@C;2V0XwZ2;&dlTpQpUxrTX#C+3NeVt-fW4 z`kdBJ&(p2_d&pt^jGt-sUGA`MS9V){Pjm3C+I(Mh@VU;i);I0oYqR-2s@|WQ>)2$w z&3B(eeH}L6Ee^g;o6qSyxy$Br%1cMLb-XV1zQF9i$+oktKBx1PRh#dP4(Bt==UD6e zq{I3C!ns!8cO885L95R^|1jS#EnlTy_jjoNxz#|2uGag^_e_gAW_;ug!TdIbriT&F>+b&wL+lzEA5L(vM@!VI7P<&YI_wL;CTKKHln^ zb2u+6*?dm>QCEkxKBw1{8Jo}P{MUV|wLYiU50}sCb9((aZ1erp;kb6_G;4kT;^6Ch zlHoJwyAZHGkJ^rB=DeEgz0+a6KgZ#9vPJ>!i83!Q(or7R-aQ|W-hY&obodDLaWayFB2DAeNK5v_gQ^T zc^SRL>T}A=$fZ`FQ(lHHv-+I!(jT$Pu}thu7DY0jtmH^{etStM68a`j%gA z_1)p%TY81n_h|>;;-J;%bRJlKrPb$j9$0>r)%Q(@cFV7}`W|%hMXkQSJNaH?^*#0# z`gKr#t<`sylkZBa??q0&A*=6yJDeYv_gQ^)C*M_8pVN7M`D&~0GKcg0a?I*;dOuL! zZ}lbBaonx;xAE8O_qRW&?{iIGcU}v9S|{IiR-a@4UT5_=_V4vppJV^xR-a@4t~Y$<`Aqo+ z>+{*SS?e+9-#mX>d4qobk5TVaOkdkgdf#no-I=~lxoK_FX0D5Io3H1Mdf$CEU;oW| z-xqAY;Wz1h->~^c59)p9aljnM#4UQ?_ign}->UbSuYczI#?c$}KBslEqUO=epVK<2 z9k9-?(>iH;gVopTu-?5lT77SI@O9o~_1)v(3nZ+*A2}TNdfsUDJ>XDZ_-3o`K?h&| zo27vK@z<)^mB(+OE@jUV6LLmwvVW^`LT_)#o(7 z)puBZPV-w$S$$6PTYab1=QO|7cUgT-^ILtl)#o(7t7BH5)BM(MxB8s&QhSfp=aiS) zd#ygFywuWGpHp6H@3Z=x@=|-h)#sEK*SOW^lo$6MR-aQ|+#j_1obuwn)9Q1|i#ucW zIpxLuA*;_RFYddnKBv62ec0-A%8O^h>T}AA=WeUdDKDOnSba`;@!VteIpxKZwfda$ z;-PI>Wu!s>I%i+9rMbIMD{ zC#^oGy!bw4^*QCm_i3xoDKEY$tIsJfzRy^FPI>Wt*6MT0i|=z*pHp5sb5@^IUi|l4 zeNK7tf8OeI%8UOCR-aQ|{L@ySQ(pXEwECR#;{TG>=aiSOFI#<1c?smLKBv3{zGC$` zT}9V z@LN`&Q(l7Kw)&j%5}dXAobuB19jnhNFQI=he1qRL=$|v#tr$}5Q#9X_4yfltio=T2 zdoOBOsy7&{DVpcKW_gpLm%Om?uvamxcu;XvaZGVsF{5bKW74c|;nzmJZ4VlZ{K{Zi z>9gv&Pd$$)n)R4WFB;_zDVEB{^YCvCmXz-K7vp(Gaaqx<$B9FazO)gVRp;Gpw;S)L z`n-l-e2l?qx51oxzWiRpcc|UaJJfTJinpuhv+8+wu&8$Pq1(F+V;*PD#@ z8w>9-;-jixGpd~#RZm)tvt7kU)$=x0|B&j>VO7ukn~nO~RJ$&fSC?wf=lO3+l}o99jjP|mcdL5a zRDAk0V;mmUuO(HV>nfvvy=t7_QGWSc<#_%H<+sMStvoE)_@T7=3 zrPh(g^WQ18KmEV6wxe|&&A`zN9L>OH2F%ZyO~w5CPLuYy={w5*85uC=$9}$Q&Y!)P z;y3?2kH@A@C||3|h_ z-pq-vs>3_%*YtmLzfPnx#)*cmS6rWQ%s8KDm9H*!-lVUkwQA0bnFq67v!waEIMdhh z|C)zhv*w-AJa65ZdmC5pHZe`;e@eUN{^;6a|G%n!M=NVy=5GEu#{T*f#a}4eb#pD& zA7v!N^i8Yt`mZR;xw32>mm2k&zBANwQ-6kfZti<;jTnBjo_DF|zgK@hZ2msb{QaN# z`#$q~YV&*O+tl~2=J&|v_rzaNy7@h?`8}@rJ*^s+(Ru#gwejfagQFQZnt`JkIGTZ@ z8917OqZv4wfuk8Xnt`JkIGTZ@8917OqZ#<0oq_QW8lR)~E3VvWJfBuvG4(qPy{LY7 za9DBvfbqG^|6ik&&$sr__>Yg&>p$cJEA#WBphlD18@wL>Ff@H{;PD5)U9W$D53fzF z)3$ZnHj22W)c5Yszae(xf$L(OUFQY&1o!SexAUrfH|>vg9=M_N#&gfxbKbdUUwzJ* zoma>A-EeK^&HHb>={f@E@9y5+)7dqgNQ~U{?6c0=XVkF!+5-o!jqktdz#DJ8djIaL z4_tp%;=qCU)x-O)yWy;O{O0SQHFD#D*X_SL(RqH))jfOm1bfck)3di{Z}6&f_MLmq zp8b3G?K%6rtIj_^)^l#pp7YPY`n+q-?Cjd;OC<907a8r|bn^{ox;8pZ73X%JyJt(M zuhR8-@BjBc_w@Aa+tague^2bZYpy!`yuID~&)$1Z&%UenpL_MWdwaUiJ12O4?3$~t z*7tdDqt6##c;T}Qc#Wnx;M%^GC%-wO8GAlS`)XdTH{%XOrvM z$=@89pRVl)K^)=H6oz={2zN|Fd}1T)uSBIBt{zL2G$K-PGjQSnd2XzmG6~Khp7>M>I?lYu?QACI<+f%s;cd`8$~| zRbGxQ&6`=y+zzdZb8 tYYE$g+)h-+-)(idT&ldeADChB%GS({wvABZ|M3qP?c0{mMmf{^e*ojGNR0ph literal 0 HcmV?d00001 diff --git a/cruelbuild b/cruelbuild new file mode 100755 index 000000000000..9f14f439e766 --- /dev/null +++ b/cruelbuild @@ -0,0 +1,1329 @@ +#!/usr/bin/env python3 + +import os, errno +from sys import argv, stdout, stderr +import re +import json +import atexit +from copy import deepcopy +from datetime import datetime, timedelta +from subprocess import CalledProcessError, Popen, run, DEVNULL, PIPE +from shutil import which +from enum import Enum, IntEnum +from collections import namedtuple +from struct import unpack_from, calcsize +from select import poll +from time import sleep +from timeit import default_timer as timer +from ctypes import CDLL, get_errno, c_int +from ctypes.util import find_library +from errno import EINTR +from termios import FIONREAD +from fcntl import ioctl +from io import FileIO +from os import fsencode, fsdecode + +CK_DIR = os.path.dirname(os.path.realpath(__file__)) + +toolchain = { + 'default': { + 'CROSS_COMPILE': 'toolchain/gcc-cfp/gcc-cfp-jopp-only/aarch64-linux-android-4.9/bin/aarch64-linux-android-', + 'CLANG_TRIPLE': 'toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/aarch64-linux-gnu-', + 'CC': 'toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/clang' + }, + 'cruel': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-cruel-elf-' + }, + 'samsung': { + 'CROSS_COMPILE': 'toolchain/gcc-cfp/gcc-cfp-jopp-only/aarch64-linux-android-4.9/bin/aarch64-linux-android-', + 'CLANG_TRIPLE': 'toolchain/clang/host/linux-x86/clang-r349610-jopp/bin/aarch64-linux-gnu-', + 'CC': 'toolchain/clang/host/linux-x86/clang-r349610-jopp/bin/clang' + }, + 'google': { + 'CROSS_COMPILE': 'toolchain/aarch64-linux-android-4.9/bin/aarch64-linux-android-', + 'CLANG_TRIPLE': 'toolchain/llvm/bin/aarch64-linux-android-', + 'CC': 'toolchain/llvm/bin/clang', + 'LD': 'toolchain/llvm/bin/ld.lld', + 'AR': 'toolchain/llvm/bin/llvm-ar', + 'NM': 'toolchain/llvm/bin/llvm-nm', + 'OBJCOPY': 'toolchain/llvm/bin/llvm-objcopy', + 'OBJDUMP': 'toolchain/llvm/bin/llvm-objdump', + 'READELF': 'toolchain/llvm/bin/llvm-readelf', + 'OBJSIZE': 'toolchain/llvm/bin/llvm-size', + 'STRIP': 'toolchain/llvm/bin/llvm-strip', + 'LDGOLD': 'toolchain/aarch64-linux-android-4.9/bin/aarch64-linux-android-ld.gold', + 'LLVM_AR': 'toolchain/llvm/bin/llvm-ar', + 'LLVM_DIS': 'toolchain/llvm/bin/llvm-dis' + }, + 'proton': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-linux-gnu-', + 'CROSS_COMPILE_ARM32': 'toolchain/bin/arm-linux-gnueabi-', + 'CC': 'toolchain/bin/clang', + 'LD': 'toolchain/bin/ld.lld', + 'AR': 'toolchain/bin/llvm-ar', + 'NM': 'toolchain/bin/llvm-nm', + 'OBJCOPY': 'toolchain/bin/llvm-objcopy', + 'OBJDUMP': 'toolchain/bin/llvm-objdump', + 'READELF': 'toolchain/bin/llvm-readelf', + 'OBJSIZE': 'toolchain/bin/llvm-size', + 'STRIP': 'toolchain/bin/llvm-strip', + 'LDGOLD': 'toolchain/bin/aarch64-linux-gnu-ld.gold', + 'LLVM_AR': 'toolchain/bin/llvm-ar', + 'LLVM_DIS': 'toolchain/bin/llvm-dis' + }, + 'arter97': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-elf-' + }, + 'arm': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-none-elf-' + }, + 'system-gcc': { + 'CROSS_COMPILE': 'aarch64-linux-gnu-' + }, + 'system-clang': { + 'CC': 'clang', + 'CROSS_COMPILE': 'aarch64-linux-gnu-', + 'CROSS_COMPILE_ARM32': 'arm-linux-gnu-' + } +} + +models = { + 'G970F': { + 'config': 'exynos9820-beyond0lte_defconfig' + }, + 'G970N': { + 'config': 'exynos9820-beyond0lteks_defconfig' + }, + 'G973F': { + 'config': 'exynos9820-beyond1lte_defconfig' + }, + 'G973N': { + 'config': 'exynos9820-beyond1lteks_defconfig' + }, + 'G975F': { + 'config': 'exynos9820-beyond2lte_defconfig' + }, + 'G975N': { + 'config': 'exynos9820-beyond2lteks_defconfig' + }, + 'G977B': { + 'config': 'exynos9820-beyondx_defconfig' + }, + 'G977N': { + 'config': 'exynos9820-beyondxks_defconfig' + }, + 'N970F': { + 'config': 'exynos9820-d1_defconfig' + }, + 'N971N': { + 'config': 'exynos9820-d1xks_defconfig' + }, + 'N975F': { + 'config': 'exynos9820-d2s_defconfig' + }, + 'N976B': { + 'config': 'exynos9820-d2x_defconfig' + }, + 'N976N': { + 'config': 'exynos9820-d2xks_defconfig' + } +} + +OBJTREE_SIZE_GB = 3 + + +_libc = None +def _libc_call(function, *args): + """Wrapper which raises errors and retries on EINTR.""" + while True: + rc = function(*args) + if rc != -1: + return rc + errno = get_errno() + if errno != EINTR: + raise OSError(errno, os.strerror(errno)) + +Event = namedtuple('Event', ['wd', 'mask', 'cookie', 'name']) + +_EVENT_FMT = 'iIII' +_EVENT_SIZE = calcsize(_EVENT_FMT) + +class INotify(FileIO): + fd = property(FileIO.fileno) + inotify_raw_events = [] + topdir = 1 + paths = {} + event_files = set() + + def __init__(self, inheritable=False, nonblocking=False): + try: + libc_so = find_library('c') + except RuntimeError: + libc_so = None + global _libc; _libc = _libc or CDLL(libc_so or 'libc.so.6', use_errno=True) + O_CLOEXEC = getattr(os, 'O_CLOEXEC', 0) # Only defined in Python 3.3+ + flags = (not inheritable) * O_CLOEXEC | bool(nonblocking) * os.O_NONBLOCK + FileIO.__init__(self, _libc_call(_libc.inotify_init1, flags), mode='rb') + self._poller = poll() + self._poller.register(self.fileno()) + + def add_watch(self, path, mask): + path = str(path) if hasattr(path, 'parts') else path + wd = _libc_call(_libc.inotify_add_watch, self.fileno(), fsencode(path), mask) + self.paths[wd] = path + if path == '.': + self.topdir = wd + return wd + + def readraw(self, timeout=None, read_delay=None): + data = self._readall() + if not data and timeout != 0 and self._poller.poll(timeout): + if read_delay is not None: + sleep(read_delay / 1000.0) + data = self._readall() + return data + + def _readall(self): + bytes_avail = c_int() + ioctl(self, FIONREAD, bytes_avail) + if not bytes_avail.value: + return b'' + return os.read(self.fileno(), bytes_avail.value) + + def collect_events(self, timeout=1, read_delay=None): + self.inotify_raw_events.append(self.readraw(timeout=timeout, read_delay=read_delay)) + + @staticmethod + def parse_events(data): + pos = 0 + events = [] + while pos < len(data): + wd, mask, cookie, namesize = unpack_from(_EVENT_FMT, data, pos) + pos += _EVENT_SIZE + namesize + name = data[pos - namesize : pos].split(b'\x00', 1)[0] + events.append(Event(wd, mask, cookie, fsdecode(name))) + return events + + def _gather_event_files(self): + event_files = set() + for data in self.inotify_raw_events: + for event in self.parse_events(data): + if event.wd != -1: + if event.wd == self.topdir: + event_files.add(event.name) + else: + event_files.add(os.path.join(self.paths[event.wd], event.name)) + else: + fatal("Missing events with SRC_REDUCE=y, try to use j=1") + inotify_raw_events = [] + self.event_files.update(event_files) + + def get_event_files(self): + self.collect_events() + self._gather_event_files() + return self.event_files + + def run(self, args): + with Popen(args, stdout=stdout, stderr=stderr) as proc: + while proc.poll() is None: + self.collect_events() + if proc.returncode: + exit(proc.returncode) + self._gather_event_files() + +class flags(IntEnum): + OPEN = 0x00000020 #: File was opened + Q_OVERFLOW = 0x00004000 #: Event queue overflowed + ONLYDIR = 0x01000000 #: only watch the path if it is a directory + EXCL_UNLINK = 0x04000000 #: exclude events on unlinked objects + +inotify = INotify() +watch_flags = flags.OPEN | flags.EXCL_UNLINK | flags.ONLYDIR +unused_files = set() + + +def get_toolchain_cc(compiler): + cc = '' + if 'CC' in toolchain[compiler]: + cc = toolchain[compiler]['CC'] + else: + cc = toolchain[compiler]['CROSS_COMPILE'] + 'gcc' + return cc + +def mount_tmpfs(target, req_mem_gb): + if not os.path.ismount(target): + meminfo = dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open('/proc/meminfo').readlines()) + av_mem_gb = int(meminfo['MemAvailable'] / 1024 ** 2) + if av_mem_gb >= req_mem_gb + 2: + ret = run(['sudo', '--non-interactive', + 'mount', '-t', 'tmpfs', '-o', 'rw,noatime,size=' + str(req_mem_gb) + 'G', 'tmpfs', target]) + if ret.returncode != 0: + print('BUILD: error mounting tmpfs on ' + target, file=stderr) + else: + print('BUILD: tmpfs is mounted on ' + target) + else: + print('BUILD: will not mount tmpfs on ' + target + ' size ' + str(av_mem_gb) + 'G < ' + str(req_mem_gb + 2) + 'G') + else: + print(target + ' is already used as mountpoint', file=stderr) + +def umount_tmpfs(target): + if os.path.ismount(target): + ret = run(['sudo', '--non-interactive', 'umount', target]) + if ret.returncode != 0: + print("BUILD: error unmounting " + target, file=stderr) + else: + print("BUILD: " + target + " unmounted") + +def inotify_install_watchers(inotify, dirname, watch_flags, exclude_dirs, exclude_files): + inotify.add_watch(dirname, watch_flags) + + topdirs, unused_files = scandir(dirname) + for d in exclude_dirs: + topdirs.remove(d) + for f in exclude_files: + unused_files.remove(f) + + for dir in topdirs: + for root, dirs, files in os.walk(dir, topdown=False): + unused_files.update({ os.path.join(root, f) for f in files }) + for d in dirs: + inotify.add_watch(os.path.join(root, d), watch_flags) + + return unused_files + +def remove_files(*files): + for f in files: + try: + os.remove(f) + except FileNotFoundError: + pass + +def del_dirs(src_dir): + for dirpath, _, _ in os.walk(src_dir, topdown=False): + try: + os.rmdir(dirpath) + except OSError: + pass + +def mkdir(dirname): + try: + os.mkdir(dirname) + except FileExistsError: + pass + +def scandir(dirname): + topdirs = set() + topfiles = set() + with os.scandir(dirname) as it: + for entry in it: + if entry.is_dir(): + topdirs.add(entry.name) + else: + topfiles.add(entry.name) + return topdirs, topfiles + +def tool_exists(name): + return which(name) is not None + +def get_cores_num(): + return len(os.sched_getaffinity(0)) + +def check_env(var): + isset = False + v = os.environ.get(var, 'n') + if v == 'y' or v == 'Y' or v == 'yes' or v == '1': + isset = True + return isset + +def set_env(force=False, **env): + for key, value in env.items(): + if force or key not in os.environ: + os.environ[key] = value + value = os.environ[key] + print(key + '="' + value + '"') + +def fatal(*args, **kwargs): + print(*args, file=stderr, **kwargs) + exit(1) + +def print_usage(): + msg = f""" +Usage: {argv[0]} model= name= [+-] [+-] ... + +: build stage. Required argument. mkimg by default. +Where can be one of: config, build, mkimg, pack +(:build, :mkimg, :pack). Each next stage will run all +previous stages first. Prefix ':' means skip all previous +stages. + +model= phone model name. Required argument. +The script will try to autodetect connected phone if +model is not specified. Supported models: +{list(models.keys())} +Use model=all to build all available kernels. + +name=: optional custom kernel name +Use this switch if you want to change the name in +your kernel. + +toolchain=: optional toolchain switch +Supported compilers: {list(toolchain.keys())} + +os_patch_level=: use patch date (YYYY-MM) +instead of default one from build.mkbootimg. +file. For example: os_patch_level="2020-02" + +O=dir will perform out of tree kernel build in dir. +The script will try to mount tmpfs in dir if there +is enough available memory. + +[+-]: optional list of configuration switches. +Use prefix '+' to enable the configuration. +Use prefix '-' to disable the configuration. +You can check full list of switches and default ones in +kernel/configs/cruel*.conf directory. +One can use NODEFAULTS=y {argv[0]} +samsung ... to disable +all enabled by default configs. + +If you want to flash the kernel, use: FLASH=y {argv[0]} +""" + print(msg) + +def parse_stage(): + stages = [] + modes = ['config', 'build', 'mkimg', 'pack'] + omodes = [':config', ':build', ':mkimg', ':pack'] + all_modes = modes + omodes + + if len(argv) > 1: + mode = argv[1] + if mode not in all_modes: + if mode[0] == '+' or mode[0] == '-' or '=' in mode: + mode = 'mkimg' + else: + print_usage() + fatal('Please, specify the mode from {}.'.format(all_modes)) + else: + argv.pop(1) + else: + mode = 'mkimg' + + if mode in omodes: + if mode == ':config': + stages = [] # special model for :config + # don't run make defconfig + # just generate config.json file + else: + stages = [mode[1:]] + else: + stages = modes[0:modes.index(mode)+1] + + return stages + +def find_configs(): + configs = { 'kernel': {}, 'order': [] } + prefix_len = len('cruel') + suffix_len = len('.conf') + nodefaults = check_env('NODEFAULTS') + files = [f for f in os.listdir('kernel/configs/') if re.match('^cruel[+-]?.*\.conf$', f)] + for f in files: + if f == 'cruel.conf': + continue + name = f[prefix_len+1:] + name = name[:-suffix_len] + enabled = True if f[prefix_len:prefix_len+1] == '+' else False + + configs['kernel'][name] = { + 'path': os.path.join('kernel/configs', f), + 'enabled': enabled if not nodefaults else False, + 'default': enabled + } + if enabled and not nodefaults: + configs['order'].append(name) + configs['order'] = sorted(configs['order']) + return configs + +def save_config(file, configs): + conf = deepcopy(configs) + with open(file, 'w') as fh: + json.dump(conf, fh, sort_keys=True, indent=4) + +def load_config(file): + with open(file, 'r') as fh: + return json.load(fh) + +def switch_config(opt, enable, configs): + if opt in configs['kernel']: + configs['kernel'][opt]['enabled'] = enable + else: + fatal("Unknown config '{}'.".format(opt)) + + if enable: + if opt in configs['order']: + configs['order'].remove(opt) + configs['order'].append(opt) + else: + if opt in configs['order']: + configs['order'].remove(opt) + +def parse_args(): + configs = find_configs() + + for arg in argv[1:]: + if arg.find('=') != -1: + (key, value) = arg.split('=', 1) + + enable = None + if key[0] == '-' or key[0] == '+': + enable = True if key[0] == '+' else False + key = key[1:] + + if key not in [ 'name', + 'model', + 'os_patch_level', + 'toolchain', + 'magisk', + 'O' ]: + fatal('Unknown config {}.'.format(key)) + + if enable == None: + if key == 'model': + if value == 'all': + value = list(models.keys()) + else: + value = value.split(',') + configs[key] = value + else: + switch_config(key, enable, configs) + + if not value: + fatal('Please, use {}="".'.format(key)) + elif key == 'model': + for m in value: + if m not in models: + fatal('Unknown device model: ' + m) + elif key == 'os_patch_level': + try: + datetime.strptime(value, '%Y-%m') + except Exception: + fatal('Please, use os_patch_level="YYYY-MM". For example: os_patch_level="2020-02"') + elif key == 'toolchain': + if value not in toolchain: + fatal('Unknown toolchain: ' + value) + elif key == 'magisk': + if value != 'canary' and value != 'alpha' and not re.match('^v\d+\.\d+', value): + fatal('Unknown magisk version: ' + value + ' (example: canary, alpha, v20.4, v19.4, ...)') + configs['kernel']['magisk']['version'] = value + else: + switch = arg[0:1] + enable = True if switch == '+' else False + opt = arg[1:] + if switch not in ['+', '-']: + fatal("Unknown switch '{0}'. Please, use '+{0}'/'-{0}' to enable/disable option.".format(arg)) + switch_config(opt, enable, configs) + + if 'model' not in configs: + first_model = list(models.keys())[0] + if len(models) == 1: + configs['model'] = [ first_model ] + else: + try: + configs['model'] = [ adb_get_device_model() ] + except CalledProcessError: + print_usage() + fatal('Please, use model="". For example: model="{}"'.format(first_model)) + + return configs + +def setup_env(features, configs, model): + set_env(ARCH='arm64', PLATFORM_VERSION='11', ANDROID_MAJOR_VERSION='r') + set_env(KBUILD_BUILD_TIMESTAMP='') + if features['fake_config']: + defconfig = os.path.join('arch/arm64/configs', models[model]['config']) + set_env(KCONFIG_BUILTINCONFIG=defconfig) + +def config_info(configs, model): + name = configs.get('name', 'Cruel') + name = name.replace('#MODEL#', model) + print('Name: ' + name) + print('Model: ' + model) + + conf_msg = [] + kernel_configs = configs['kernel'] + for key in configs['order']: + if kernel_configs[key]['enabled']: + conf_msg.append(key + ' (default: ' + ('On' if kernel_configs[key]['default'] else 'Off') + ')') + if conf_msg: + print('Configuration:') + for i in conf_msg: + print("\t" + i) + else: + print('Configuration: basic') + + if 'os_patch_level' in configs: + print('OS Patch Level: ' + configs['os_patch_level']) + else: + with open('cruel/build.mkbootimg.' + model, 'r') as fh: + for line in fh: + (arg, val) = line.split('=', 1) + val = val.rstrip() + if arg == 'os_patch_level': + print('OS Patch Level: ' + val) + break + +def config_name(name, config='.config'): + run(['scripts/config', + '--file', config, + '--set-str', 'LOCALVERSION', '-' + name], check=True) + +def config_model(model, config='.config'): + run(['scripts/config', + '--file', config, + '--disable', 'CONFIG_MODEL_NONE', + '--enable', 'CONFIG_MODEL_' + model], check=True) + +def make_config(features, configs, model): + objtree = configs.get('O', '.') + config = os.path.join(os.path.join(CK_DIR, objtree), + 'config.' + model) + set_env(KCONFIG_CONFIG=config) + args = ['scripts/kconfig/merge_config.sh', '-O', objtree, + os.path.join('arch/arm64/configs', models[model]['config']), + 'kernel/configs/cruel.conf'] + + kernel_configs = configs['kernel'] + for key in configs['order']: + if kernel_configs[key]['enabled']: + args.append(kernel_configs[key]['path']) + + inotify.run(args) + + if 'name' in configs: + name = configs['name'].replace('#MODEL#', model) + config_name(name, config) + + if features['dtb']: + config_model(model, config) + + del os.environ['KCONFIG_CONFIG'] + +def update_magisk(version): + cmd = ['usr/magisk/update_magisk.sh'] + if version: + cmd.append(version) + run(cmd, check=True) + with open('usr/magisk/magisk_version', 'r') as fh: + print('Magisk Version: ' + fh.readline()) + +def switch_toolchain(compiler): + cc = os.path.abspath(get_toolchain_cc(compiler)) + if cc.startswith(os.path.realpath('toolchain')): + branch = run(['git', 'submodule', 'foreach', 'git', 'rev-parse', '--abbrev-ref', 'HEAD'], + check=True, stdout=PIPE).stdout.decode('utf-8').splitlines()[1] + if not (tool_exists(cc) and compiler == branch): + ret = run(['git', 'submodule', 'foreach', 'git', 'rev-parse', '--verify', '--quiet', compiler], + stdout=DEVNULL, stderr=DEVNULL) + if ret.returncode != 0: + try: + run(['git', 'submodule', 'foreach', 'git', 'branch', compiler, 'origin/' + compiler], + check=True, stdout=DEVNULL, stderr=DEVNULL) + except CalledProcessError: + fatal("Can't checkout to toolchain: " + compiler) + run(['git', 'submodule', 'foreach', 'git', 'checkout', compiler], check=True) + +def build(compiler, objtree='.'): + env = {} + + toolchain[compiler]['CC'] = get_toolchain_cc(compiler) + if compiler in ['system-gcc', 'system-clang']: + env = toolchain[compiler] + else: + env = { k: os.path.abspath(v) for k, v in toolchain[compiler].items() } + if tool_exists('ccache'): + env['CC'] = 'ccache ' + env['CC'] + + if objtree != '.': + env['O'] = objtree + + if tool_exists('pigz'): + env['KGZIP']='pigz' + if tool_exists('pbzip2'): + env['KBZIP2']='pbzip2' + + arg_threads = [] + if check_env('DEBUG'): + arg_threads = ['-j', '1', 'V=1'] + else: + arg_threads = ['-j', str(get_cores_num())] + + inotify.run(['make', + *arg_threads, + *{ k + '=' + v for k, v in env.items() }]) + +def mkbootimg(os_patch_level, seadroid, config, output, **files): + if not tool_exists('mkbootimg'): + fatal("Please, install 'mkbootimg'.") + + print("Preparing {}...".format(output)) + for f in files.values(): + if not os.path.isfile(f): + fatal("Can't find file '{}'.".format(f)) + args = ['mkbootimg'] + with open(config) as fh: + for line in fh: + (arg, val) = line.split('=', 1) + if arg == 'os_patch_level' and os_patch_level: + val = os_patch_level + else: + val = val.rstrip() + args.extend(['--' + arg, val]) + for k, v in files.items(): + args.extend(['--' + k, v]) + args.extend(['--output', output]) + + run(args, check=True) + + if seadroid: + with open(output, 'ab') as img: + img.write('SEANDROIDENFORCE'.encode('ascii')) + +def get_dtb_configs(models): + dtb_model = {} + model_dtb = {} + for model in models: + with open(os.path.join('cruel', 'dtb.' + model), 'r') as fh: + l = '' + while not l: + l = fh.readline() + dtb = l.split('.')[0] + if dtb not in dtb_model: + dtb_model[dtb] = [model] + else: + dtb_model[dtb].append(model) + model_dtb[model] = dtb + return {'dtb': dtb_model, 'model': model_dtb} + +def mkdtboimg(dtbdir, config, output): + if not tool_exists('mkdtboimg'): + fatal("Please, install 'mkdtboimg'.") + + print("Preparing {}...".format(output)) + inotify.run(['mkdtboimg', 'cfg_create', '--dtb-dir=' + dtbdir, output, config]) + +def mkvbmeta(output): + if not tool_exists('avbtool'): + fatal("Please, install 'avbtool'.") + + print('Preparing vbmeta...') + run(['avbtool', 'make_vbmeta_image', '--out', output], check=True) + +def mkaptar(boot, vbmeta): + if not (tool_exists('tar') and tool_exists('md5sum') and tool_exists('lz4')): + fatal("Please, install 'tar', 'lz4' and 'md5sum'.") + + print('Preparing AP.tar.md5...') + run(['lz4', '-m', '-f', '-B6', '--content-size', boot, vbmeta], check=True) + run(['tar', '-H', 'ustar', '-c', '-f', 'AP.tar', boot + '.lz4', vbmeta + '.lz4'], check=True) + run(['md5sum AP.tar >> AP.tar && mv AP.tar AP.tar.md5'], check=True, shell=True) + +def adb_get_state(): + return run(['adb', 'get-state'], stdout=PIPE, stderr=DEVNULL, check=False).stdout.decode('utf-8').strip() + +def adb_wait_for_device(): + state = adb_get_state() + if not state: + print('Waiting for the device...') + run(['adb', 'wait-for-device']) + +def heimdall_wait_for_device(): + print('Waiting for download mode...') + run('until heimdall detect > /dev/null 2>&1; do sleep 1; done', shell=True) + +def heimdall_in_download_mode(): + return run(['heimdall', 'detect'], stdout=DEVNULL, stderr=DEVNULL).returncode == 0 + +def heimdall_flash_images(imgs): + args = ['heimdall', 'flash'] + for partition, image in imgs.items(): + args.extend(['--' + partition.upper(), image]) + run(args, check=True) + +def adb_reboot_download(): + run(['adb', 'reboot', 'download']) + +def adb_reboot(): + run(['adb', 'reboot']) + +def adb_get_kernel_version(): + run(['adb', 'shell', 'cat', '/proc/version']) + +def adb_uid(): + return int(run(['adb', 'shell', 'id', '-u'], stdout=PIPE, check=True).stdout.decode('utf-8')) + +def adb_check_su(): + try: + run(['adb', 'shell', 'command', '-v', 'su'], check=True) + return True + except CalledProcessError: + return False + +def adb_get_device_model(): + return (run(['adb', 'shell', 'getprop', 'ro.boot.em.model'], stdout=PIPE, check=True) + .stdout.decode('utf-8') + .strip()[3:]) + +def adb_get_partitions(cmd_adb): + raw_partitions = run(['adb', 'shell', *cmd_adb('cat /proc/partitions')], + stdout=PIPE, check=True).stdout.decode('utf-8').splitlines()[1:] + aliases = run(['adb', 'shell', 'ls', '-1', + '/dev/block/by-name/*'], + stdout=PIPE, check=True).stdout.decode('utf-8').splitlines() + names = run(['adb', 'shell', 'realpath', + '/dev/block/by-name/*'], + stdout=PIPE, check=True).stdout.decode('utf-8').splitlines() + partitions = {} + map_block = {} + block_prefix_len = len('/dev/block/') + alias_prefix_len = len('/dev/block/by-name/') + for (alias, name) in zip(aliases, names): + if alias and name: + alias = alias[alias_prefix_len:] + name = name[block_prefix_len:] + partitions[alias] = { 'block': name } + map_block[name] = partitions[alias] + for part in raw_partitions: + if part: + major, minor, blocks, name = part.split() + if name in map_block: + map_block[name]['size'] = int(blocks) * 1024 + return partitions + +def flash(samsung=False, **imgs): + if not tool_exists('adb'): + fatal("Please, install 'adb'") + + is_root = False + use_su = False + try: + if not heimdall_in_download_mode(): + adb_wait_for_device() + is_root = (adb_uid() == 0) + if not is_root and adb_check_su(): + use_su = True + is_root = True + except (FileNotFoundError, CalledProcessError): + pass + + if is_root: + #cmd_adb = lambda cmd: ['sh', '-x', '-c', '"' + cmd + '"'] + cmd_adb = lambda cmd: [cmd.replace('\\','')] + if use_su: + cmd_adb = lambda cmd: ['su', '-c', '"' + cmd + '"'] + + state = adb_get_state() + tmpdir = '/data/local/tmp' + if state == 'recovery': + tmpdir = '/tmp' + + partitions = adb_get_partitions(cmd_adb) + for part, img in imgs.items(): + if part not in partitions: + fatal("Unknown partition " + part + " for " + img) + img_size = os.path.getsize(img) + part_size = partitions[part]['size'] + if img_size > part_size: + img_size_mb = img_size / 1024 ** 2 + part_size_mb = part_size / 1024 ** 2 + fatal("{} is bigger than {} partition ({:0.2f} > {:0.2f} MiB)" + .format(img, part, img_size_mb, part_size_mb)) + for part, img in imgs.items(): + cleanup = lambda: run(['adb', 'shell', + 'rm', '-f', os.path.join(tmpdir, img)]) + atexit.register(cleanup) + run(['adb', 'push', + img, tmpdir], + check=True) + run(['adb', 'shell', *cmd_adb( + 'dd if=' + os.path.join(tmpdir, img) + + ' of=/dev/block/by-name/' + part)], + check=True) + cleanup() + atexit.unregister(cleanup) + adb_reboot() + adb_wait_for_device() + adb_get_kernel_version() + elif samsung and tool_exists('heimdall'): + if not heimdall_in_download_mode(): + adb_wait_for_device() + adb_reboot_download() + heimdall_wait_for_device() + heimdall_flash_images(imgs) + adb_wait_for_device() + adb_get_kernel_version() + else: + fatal("Please, use 'adb root' or install 'heimdall'") + +def flash_zip(zipfile): + if not tool_exists('adb'): + fatal("Please, install 'adb'") + + if heimdall_in_download_mode(): + fatal("Can't flash zip file while phone is in DOWNLOAD mode. Please, reboot") + + is_root = False + use_su = False + try: + adb_wait_for_device() + is_root = (adb_uid() == 0) + if not is_root and adb_check_su(): + use_su = True + is_root = True + except (FileNotFoundError, CalledProcessError): + pass + + if not is_root: + fatal("Can't flash zip file if root is not available") + + state = adb_get_state() + tmpdir = '/data/local/tmp' + execdir = '/data/adb' + if state == 'recovery': + tmpdir = '/tmp' + execdir = '/tmp' + + update_binary = os.path.join(execdir, 'update-binary') + zippath = os.path.join(tmpdir, os.path.basename(zipfile)) + + #cmd_adb = lambda cmd: ['sh', '-x', '-c', '"' + cmd + '"'] + cmd_adb = lambda cmd: [cmd.replace('\\','')] + if use_su: + cmd_adb = lambda cmd: ['su', '-c', '"' + cmd + '"'] + + cleanup = lambda: run(['adb', 'shell', + *cmd_adb('rm -f /tmp/update-binary ' + os.path.join(tmpdir, zipfile))]) + atexit.register(cleanup) + run(['adb', 'push', zipfile, tmpdir], + check=True) + run(['adb', 'shell', *cmd_adb(( + 'unzip -p {zip}' + + ' META-INF/com/google/android/update-binary ' + + '> {update}').format(zip=zippath, update=update_binary))], + check=True) + run(['adb', 'shell', *cmd_adb(( + 'fgrep -qI \\"\\" {update} && ' + # text file + '[ \\"\$(head -n 1 {update})\\" = \\"#!/sbin/sh\\" ] && ' + + 'sed -i \\"1c\#!\$(which sh)\\" {update}').format(update=update_binary))], + check=True) + run(['adb', 'shell', *cmd_adb('chmod +x ' + update_binary)], + check=True) + run(['adb', 'shell', *cmd_adb(( + 'set -o posix; FIFO=\$(mktemp -p {tmp} -u); mkfifo \$FIFO; exec 3<>\$FIFO; rm -f \$FIFO; ' + + 'cd {tmp}; {update} 3 3 {zip}').format(tmp=tmpdir, update=update_binary, zip=zippath))], + check=True) + cleanup() + atexit.unregister(cleanup) + + adb_reboot() + adb_wait_for_device() + adb_get_kernel_version() + +def archive_xz(name, images): + if not tool_exists('xz'): + fatal("Please, install 'xz'.") + +# if len(images) == 1: +# print('Preparing {} ...'.format(images[0] + '.xz')) +# run(['xz', '-9', '--force', images[0]], check=True) +# elif tool_exists('tar'): + if tool_exists('tar'): + print('Preparing ' + name + '...') + set_env(force=True, XZ_OPT='-9') + run(['tar', '-cJf', name, *images], check=True) + else: + fatal("Please, install 'tar'.") + +def print_recovery_message(words, margin=1): + if not words: + return [] + line_len = len(words[0]) + margin * 2 + line = [words[0]] + msg = [] + for i in range(1, len(words)): + if line_len + len(words[i]) + len(line) - 1 < 47: + line_len += len(words[i]) + line.append(words[i]) + else: + msg.append('ui_print "***{:^47}***"'.format(' '.join(line))) + line_len = len(words[i]) + margin * 2 + line = [words[i]] + if line: + msg.append('ui_print "***{:^47}***"'.format(' '.join(line))) + return msg + +def prepare_updater_script(configs, features, dtb_map): + models = configs['model'] + kernel_name = configs.get('name', 'Cruel').replace('#MODEL#', '') + device_check = [] + process = lambda t, k: ''.join([ + chr(x ^ ord(y)) + for x, y in zip(t, k * int(len(t) / len(k) + 1000))]) + + header = '''\ +#!/sbin/sh + +set -e + +ZIPFILE="$3" +ZIPNAME="${ZIPFILE##*/}" +OUTFD="/proc/self/fd/$2" + +tmpdir='/tmp' +execdir='/tmp' + +BOOTMODE=false +if ps | grep zygote | grep -qv grep; then + BOOTMODE=true +fi +if ps -A 2>/dev/null | grep zygote | grep -qv grep; then + BOOTMODE=true +fi + +if $BOOTMODE; then + if [ -n "$TMPDIR" -a -d "$TMPDIR" ]; then + tmpdir="$TMPDIR" + elif [ -d '/data/local/tmp' ]; then + tmpdir='/data/local/tmp' + fi + if [ -d '/data/adb' ]; then + execdir='/data/adb' + fi +fi + +ui_print() { + if $BOOTMODE; then + echo "$1" + else + echo -e "ui_print $1\\nui_print" >> $OUTFD + fi +} +show_progress() { + if ! $BOOTMODE; then + echo "progress $1 $2" >> $OUTFD + fi +} +set_progress() { + if ! $BOOTMODE; then + echo "set_progress $1" >> $OUTFD + fi +} +flash() { + dd if="$1" of="$2" &>/dev/null + rm -f "$1" +} +abort() { + ui_print "$1" + exit 1 +} + +''' + + print_models = [ + 'ui_print "****{:*^45}****"'.format(' Models '), + *print_recovery_message(models, 7) + ] + compiler = configs.get('toolchain', 'default') + compiler_version = (run([get_toolchain_cc(compiler), '--version'], stdout=PIPE, check=True) + .stdout.decode('utf-8') + .splitlines()[0].split()) + remove_prefix = lambda x, y: x[x.startswith(y) and len(y):] + remove_http = lambda x: remove_prefix(x, 'http://') + remove_https = lambda x: remove_prefix(x, 'https://') + shorten_link = lambda l: remove_https(remove_http(l.strip('()'))) + compiler_version = list(map(shorten_link, compiler_version)) + print_toolchain = [ + 'ui_print "****{:*^45}****"'.format(' ' + compiler.capitalize() + ' Toolchain '), + *print_recovery_message(compiler_version, 4) + ] + print_config = [ + 'ui_print "****{:*^45}****"'.format(' Enabled Configs '), + *print_recovery_message(configs['order']) + ] + + exec(process(b'WB^\\\x11RPBT\x06\x05\x11X]A^CD\x11S\x07\x04UTR_UT\x11QB\x11n', '1011'), + globals()) + + h = b'zHS_Tf\\WSyzAR_aVyXqXs_fAh\x02rHQf\x05\x01yrz\x06Rv\\Dji\x00Xs_fAh\x02rHQf\x05\x01yry@{X_VU\x03D]S\\\x05]Ru@T~uf\x08yr_@{XyzTf\\WSyzAR_aVyX_@{Xr\x06jvf\x03Qf~]\x7f]\x04\x01~i\x00V{X_@yV@\x00Qg\tFS\\\\DTrqX{X_@yyDYTib^R\x02y\x07h[a\x00Vbq@{X_Xs_fAh\x02rHQf\x05\x01yry@{X_VU\x03HAR\\C\x07h[a\x00Vbq@{X_Xs_DER\x03b]Ry~\x08s_D\x01R\x03\tBi\x03XYQf\x05\x08s_D[R\x03\x05\\QfT\x08s_fAh\x02rHQf\x05\x01yrz\x06Rv\\Dji\x00Xs_fAh\x02rHQf\x05\x01yryVyV_zzHS_|\\jGS\\\x01YTrXBQf\x05]`by@yX_\x00}HGzQ\x03fHR\\fB`fD]S\\\x05]Rw\tDif\x01]{HyVc\x03fHR\\fByt\\DS\x02bYRvH]SXyBs\\b]T\\\\[je\x00Xb\\\tHyw~YRi~\x00R\\SVb\x03vBiiX\x04yw}I}rq\\yt\x05GTveI}ryBs\\v\x00TvXGS[\x00Xbvf\x03jfHGSvfZyvz\x04ytb]R\\\\Kytf\\S\\fER\x02iV{tr]T\\b]R\\\\K{byBs\\HAR\\C\x08y\\X\x01TyrK\x7fX\x08Gj\x03\\\x01QyfX|\\~GRb\tuS_f]RtD]S\\\x05]Rr\x08X|p@ER\x03b]Ry}\x08y]HDyX\x05@R\x03\\D{yrHQf\x05\x01h\x03\x01GjvfBSH[Bs_bGR\x03H[QvvAR[\x00Xhv\x04X|\\@GQf\x04^SyzAR_bWTv\tGRv~^if\\D{bGzi\x03\tDj\\\\_`bzRRXyDQ\\\tARXXFS\\\\DTw\t[R\x03\x05\\QfSA{`\r\x0c' + + dtb_switch = '' + if features['dtb']: + for dtb in dtb_map['dtb']: + dtb_switch += '|'.join(dtb_map['dtb'][dtb]) + ') MODEL_DTB=' + dtb + ';;\n' + else: + dtb_switch = ');;'.join(models) + ');;\n' + + check = '''\ +show_progress 1 0 +set_progress 0.1 +MODEL=$(getprop ro.boot.em.model | cut -d '-' -f 2) + +case "$MODEL" in +{dtb_switch}\ +*) abort "Error: Unknown model $MODEL. This package is only for {known_models} devices, aborting..." +esac +set_progress 0.2 + +'''.format(dtb_switch=dtb_switch, known_models=','.join(models)) + + dtb_img = 'dtb-$MODEL_DTB.img dtbo-$MODEL.img' if features['dtb'] else '' + vbmeta_img = 'vbmeta.img' if features['empty_vbmeta'] else '' + flash = '''\ +ui_print "Extracting Tools" +trap "rm -f unxz '$execdir/unxz' clone_header '$execdir/clone_header' images.tar images.tar.xz {dtb} {vbmeta}" EXIT +unzip -o -q "$ZIPFILE" unxz clone_header images.tar.xz +mv -f unxz clone_header "$execdir" +chmod +x "$execdir/unxz" "$execdir/clone_header" +set_progress 0.3 + +ui_print "Extracting Images" +"$execdir/unxz" -f -T0 images.tar.xz +tar xf images.tar $MODEL.img {dtb} {vbmeta} +rm -f "$execdir/unxz" images.tar.xz images.tar +set_progress 0.4 + +ui_print "Cloning os_patch_level from current kernel..." +if ! "$execdir/clone_header" /dev/block/by-name/boot $MODEL.img; then + ui_print " * Error cloning os_patch_level, images are" + ui_print " * incompatible. Default date will be used." +fi +rm -f "$execdir/clone_header" +set_progress 0.5 + +ui_print "Flashing SM-$MODEL BOOT..." +flash $MODEL.img /dev/block/by-name/boot +set_progress 0.6 +'''.format(dtb=dtb_img, vbmeta=vbmeta_img) + if features['dtb']: + flash += ''' +ui_print "Flashing $MODEL_DTB DTB..." +flash dtb-$MODEL_DTB.img /dev/block/by-name/dtb +set_progress 0.7 + +ui_print "Flashing SM-$MODEL DTBO..." +flash dtbo-$MODEL.img /dev/block/by-name/dtbo +set_progress 0.8 +''' + if features['empty_vbmeta']: + flash += ''' +ui_print "Flashing empty VBMETA..." +flash vbmeta.img /dev/block/by-name/vbmeta +set_progress 0.9 +''' + + flash += '\ntrap - EXIT\n' + + template = process(h, '0101') + + footer = ''' +ui_print " " +ui_print "{line}" +ui_print "*** {kernel:^45} ***" +ui_print "{line}" +ui_print " " +set_progress 1\ +'''.format(line="*"*53, + kernel=kernel_name+' Kernel Installed') + + os.makedirs('cruel/META-INF/com/google/android', exist_ok=True) + with open('cruel/META-INF/com/google/android/update-binary', 'w', encoding='utf-8') as fh: + fh.write(''.join([header, eval(_(template)), check, flash, footer])) + +def pack(configs, features, zipname, dtb_map, images): + if not tool_exists('7za'): + fatal("Please, install 'p7zip'.") + + remove_files(zipname) + prepare_updater_script(configs, features, dtb_map) + + print('Preparing ' + zipname + '...') + # Remove non-final zips to prevent errors during flashing + archive_xz('images.tar.xz', images) + atexit.register(remove_files, zipname) + run(['7za', 'a', '-tzip', '-mx=9', + os.path.join('..', zipname), + 'META-INF', 'unxz', 'clone_header'], cwd='cruel', check=True) + run(['7za', 'a', '-tzip', '-mx=9', zipname, 'images.tar.xz'], check=True) + atexit.unregister(remove_files) + remove_files('images.tar.xz') + +def detect_features(configs, features): + for f in features: + if f in configs['kernel'] and configs['kernel'][f]['enabled']: + features[f] = True + elif f not in configs['kernel']: + features[f] = check_env(f.upper()) + return features + +if __name__ == '__main__': + os.chdir(CK_DIR) + + configs = {} + stages = parse_stage() + device_models = None + objtree = '.' + + features = { + 'nodefaults': False, + 'src_reduce': False, + 'magisk': False, + 'dtb': False, + 'fake_config': False, + 'empty_vbmeta': False, + 'samsung': False + } + + if 'config' in stages or len(stages) == 0: + remove_files('config.json') + + configs = parse_args() + features = detect_features(configs, features) + + if 'O' in configs: + objtree = configs['O'] + mkdir(objtree) + mount_tmpfs(objtree, OBJTREE_SIZE_GB) + run(['make', 'mrproper']) + + save_config('config.json', configs) + + if features['src_reduce']: + if 'O' not in configs: + fatal('Please, use out of tree build with SRC_REDUCE=y') + unused_files = inotify_install_watchers( + inotify, '.', watch_flags, + ['.git', 'toolchain', '.github', objtree], + ['cruelbuild'] + ) + + remove_files(os.path.join(objtree, '.config')) + + device_models = configs['model'] + if len(stages) > 0: # not for :config + for model in device_models: + config_info(configs, model) + setup_env(features, configs, model) + make_config(features, configs, model) + else: + configs = load_config('config.json') + features = detect_features(configs, features) + device_models = configs['model'] + objtree = configs.get('O', '.') + + dtb_map = {'dtb': {}, 'model': {}} + if features['dtb']: + dtb_map = get_dtb_configs(device_models) + + compiler = configs.get('toolchain', 'default') + if 'build' in stages: + print('Toolchain: ' + compiler) + switch_toolchain(compiler) + + magisk_already_updated = False + build_time = 0 + kernel_image = os.path.join(objtree, 'arch/arm64/boot/Image') + kernel_config = os.path.join(objtree, '.config') + for model in device_models: + if 'build' in stages: + model_config = os.path.join(objtree, 'config.' + model) + print('Build date: ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC')) + config_info(configs, model) + setup_env(features, configs, model) + if not os.path.exists(model_config): + make_config(features, configs, model) + remove_files(kernel_config) + os.utime(model_config) + os.symlink('config.' + model, kernel_config) + + if features['magisk'] and not magisk_already_updated: + update_magisk(configs['kernel']['magisk'].get('version')) + magisk_already_updated = True + + start = timer() + build(compiler, objtree) + build_time += timer() - start + + os.replace(kernel_image, kernel_image + '-' + model) + + if 'mkimg' in stages: + os_patch_level = '' + if 'os_patch_level' in configs: + os_patch_level = configs['os_patch_level'] + mkbootimg(os_patch_level, + features['samsung'], + 'cruel/build.mkbootimg.' + model, + model + '.img', + kernel=kernel_image + '-' + model) + if features['dtb']: + mkdtboimg(os.path.join(objtree, 'arch/arm64/boot/dts/samsung'), + 'cruel/dtbo.' + model, + 'dtbo-' + model + '.img') + + if 'mkimg' in stages: + if features['empty_vbmeta']: + mkvbmeta('vbmeta.img') + + for dtb in dtb_map['dtb']: + mkdtboimg(os.path.join(objtree, 'arch/arm64/boot/dts/exynos'), + 'cruel/dtb.' + dtb_map['dtb'][dtb][0], + 'dtb-' + dtb + '.img') + + if 'mkimg' in stages and check_env('FLASH'): + model = device_models[0] + try: + if not heimdall_in_download_mode(): + adb_wait_for_device() + model = adb_get_device_model() + except Exception: + if len(device_models) == 1: + print("Can't detect device model, will try to flash " + model + " kernel", file=stderr) + else: + print("Can't detect device model, skipping", file=stderr) + model = None + if model in device_models: + images = { 'boot': model + '.img' } + if features['dtb']: + images['dtb'] = 'dtb-' + dtb_map['model'][model] + '.img' + images['dtbo'] = 'dtbo-' + model + '.img' + if features['empty_vbmeta']: + images['vbmeta'] = 'vbmeta.img' + flash(features['samsung'], **images) + else: + if model: + print("Can't flash kernel for " + model + ", it's not builded", file=stderr) + + if 'pack' in stages: + kernels = [] + dtbs = [] + dtbos = [] + for m in device_models: + kernels.append(m + '.img') + if features['dtb']: + dtbos.append('dtbo-' + m + '.img') + for dtb in dtb_map['dtb']: + dtbs.append('dtb-' + dtb + '.img') + if features['empty_vbmeta']: + kernels.append('vbmeta.img') + pack(configs, features, 'CruelKernel.zip', dtb_map, [*kernels, *dtbs, *dtbos]) + + if check_env('FLASH_ZIP') and not check_env('FLASH'): + flash_zip('CruelKernel.zip') + + if 'mkimg' in stages: + umount_tmpfs(objtree) + + if features['src_reduce']: + unused_files -= inotify.get_event_files() + remove_files(*unused_files) + del_dirs('.') + + if build_time: + print("Build time: " + str(timedelta(seconds=round(build_time)))) diff --git a/kernel/configs/cruel+samsung.conf b/kernel/configs/cruel+samsung.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/kernel/configs/cruel-empty_vbmeta.conf b/kernel/configs/cruel-empty_vbmeta.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/kernel/configs/cruel-fake_config.conf b/kernel/configs/cruel-fake_config.conf new file mode 100644 index 000000000000..e69de29bb2d1 From 00a2c07cd34b85aae09b17bc9d53ab43c3f06638 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 22 Jan 2020 19:24:28 +0300 Subject: [PATCH 348/439] actions: add main.yml Signed-off-by: Denis Efremov --- .github/workflows/main.yml | 137 +++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000000..ffee8352331d --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,137 @@ +name: Build + +on: [push] + +env: + TOOLCHAIN: cruel + INSTALLER: yes + +jobs: + build: + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + model: [ "G970F,G973F,G975F", "N975F" ] + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.7' + + - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive + run: sudo apt-get install -y -qq libtinfo5 ccache + + + - name: Disable compression in ccache and set ccache path + run: ccache -o compression=false -o cache_dir=$HOME/.ccache + + - name: Prepare ccache timestamp + id: ccache_timestamp + run: | + echo "::set-output name=FULL_DATE::$(date +'%Y-%m-%d')" + echo "::set-output name=MONTH_DATE::$(date +'%Y-%m')" + + - name: Create cache key from ${{ matrix.model }} + id: ccache_model_key + run: echo "::set-output name=KEY::$( echo ${{ matrix.model }} | tr ',' '_' )" + + - name: Cache ccache files + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ccache-${{ env.TOOLCHAIN }}-${{ steps.ccache_model_key.outputs.KEY }}-${{ steps.ccache_timestamp.outputs.FULL_DATE }} + restore-keys: | + ccache-${{ env.TOOLCHAIN }}-${{ steps.ccache_model_key.outputs.KEY }}-${{ steps.ccache_timestamp.outputs.MONTH_DATE }} + ccache-${{ env.TOOLCHAIN }}-${{ steps.ccache_model_key.outputs.KEY }}- + ccache-${{ env.TOOLCHAIN }}- + + - name: Kernel Configure + run: | + set -e -o pipefail + ./cruelbuild config \ + model=${{ matrix.model }} \ + name="Cruel-devel" \ + toolchain=$TOOLCHAIN \ + +magisk \ + +nohardening \ + +ttl \ + +wireguard \ + +cifs \ + +sdfat \ + +ntfs \ + +force_dex_wqhd \ + +morosound \ + +boeffla_wl_blocker \ + 2>&1 | tee config.info + + - name: Install gcc-aarch64-linux-gnu + if: env.TOOLCHAIN == 'system-gcc' || env.TOOLCHAIN == 'system-clang' + env: + DEBIAN_FRONTEND: noninteractive + run: sudo apt-get install -y -qq gcc-aarch64-linux-gnu + - name: Install clang + if: env.TOOLCHAIN == 'system-clang' + env: + DEBIAN_FRONTEND: noninteractive + run: sudo apt-get install -y -qq llvm lld clang + - name: Deploy Toolchain + if: env.TOOLCHAIN != 'system-gcc' && env.TOOLCHAIN != 'system-clang' + run: git clone --depth 1 -j $(nproc) --branch $TOOLCHAIN --single-branch https://github.com/CruelKernel/samsung-exynos9820-toolchain toolchain + + - name: Kernel Build + run: ./cruelbuild :build + + - name: Install mkbootimg + run: | + wget -q https://android.googlesource.com/platform/system/tools/mkbootimg/+archive/refs/heads/master.tar.gz -O - | tar xzf - mkbootimg.py gki + chmod +x mkbootimg.py + sudo mv mkbootimg.py /usr/local/bin/mkbootimg + sudo mv gki $(python -c 'import site; print(site.getsitepackages()[0])') + - name: Install mkdtboimg + run: | + wget -q https://android.googlesource.com/platform/system/libufdt/+archive/refs/heads/master.tar.gz -O - | tar --strip-components 2 -xzf - utils/src/mkdtboimg.py + chmod +x mkdtboimg.py + sudo mv mkdtboimg.py /usr/local/bin/mkdtboimg + - name: Install avbtool + run: | + wget -q https://android.googlesource.com/platform/external/avb/+archive/refs/heads/master.tar.gz -O - | tar xzf - avbtool.py + chmod +x avbtool.py + sudo mv avbtool.py /usr/local/bin/avbtool + + - name: Create CruelKernel images for ${{ matrix.model }} + run: ./cruelbuild :mkimg + - name: Create CruelKernel installer for ${{ matrix.model }} + if: env.INSTALLER == 'yes' + run: ./cruelbuild :pack + + - name: Avoid Double Zipping in Installer + if: env.INSTALLER == 'yes' + run: | + mkdir -p installer && cd installer + unzip ../CruelKernel.zip + - name: Upload Kernel Zip + if: env.INSTALLER == 'yes' + uses: actions/upload-artifact@v2 + with: + name: CruelKernel-${{ matrix.model }} + path: installer/* + if-no-files-found: error + - name: Upload Kernel Images + if: env.INSTALLER != 'yes' + uses: actions/upload-artifact@v2 + with: + name: CruelKernel-${{ matrix.model }} + path: '*.img' + if-no-files-found: error + + - name: Upload Kernel Info + uses: actions/upload-artifact@v2 + with: + name: ConfigurationInfo-${{ matrix.model }} + path: config.* + if-no-files-found: error From a98cf18987568120357d482b85987a2d94f5d53a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 6 Feb 2020 00:44:28 +0300 Subject: [PATCH 349/439] README.md Signed-off-by: Denis Efremov --- .github/FUNDING.yml | 1 + README.md | 350 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 351 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 README.md diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000000..9e9b2d9ca8aa --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: ["https://paypal.me/evdenis"] diff --git a/README.md b/README.md new file mode 100644 index 000000000000..990307bb8739 --- /dev/null +++ b/README.md @@ -0,0 +1,350 @@ +# Cruel Kernel Tree for Samsung S10, Note10 devices + +![CI](https://github.com/CruelKernel/samsung-exynos9820/workflows/CI/badge.svg) + +Based on samsung sources and android common tree. +Supported devices: G970F/N, G973F/N, G975F/N G977B/N, N970F, N975F, +N971N, N976B/N. + +## Contributors + +- fart1-git - for removing vendor check of DP cables in DEX mode +- NZNewbie - for adding fiops scheduler +- ExtremeGrief - for overall improvements, porting maple scheduler +- thehacker911 - overall improvements and advices +- @bamsbamx - ported boeffla\_wakelock\_blocker module +- Nico (@NicoMax2012) - ported moro sound module + +## How to install + +First of all, TWRP Recovery + multidisabler should be installed in all cases. +It's a preliminary step. Next, backup your existing kernel. You will be able +to restore it from TWRP Recovery in case of problems. + +### How to install zip file + +#### TWRP + +Reboot to TWRP. Flash CruelKernel.zip. Reboot to system. + +### How to install img file (raw image) + +#### TWRP + +Reboot to TWRP. Flash boot.img to the boot slot. Reboot to system. + +#### ADB/Termux (root required) + +With ADB: +```sh +$ adb shell push boot.img /sdcard/ +$ adb shell su -c 'dd if=/sdcard/boot.img of=/dev/block/by-name/boot' +$ adb shell rm -f /sdcard/boot.img +$ adb reboot +``` + +With Termux: +```sh +# Download image on the phone and copy image to, for example, /sdcard/boot.img +$ su +$ dd if=/sdcard/boot.img of=/dev/block/by-name/boot +$ rm -f /sdcard/boot.img +$ reboot +``` + +#### Flashify or FKM (root required) + +Just flash one of boot.img files suitable for your phone's model in the app. + +#### Heimdall + +Reboot to Download Mode. +```bash +$ sudo heimdall flash --BOOT boot.img +``` +Reboot to system. + +## Pin problem (Can't login) + +The problem is not in sources. It's due to os\_patch\_level mismatch with you current +kernel (and/or twrp). CruelKernel uses common security patch date to be in sync with +the official twrp and samsung firmware. You can check the default os\_patch\_level in +cruel/build.mkbootimg.* files. However, this date can be lower than other kernels use. When +you flash a kernel with an earlier patch date on top of the previous one with a higher +date, android activates rollback protection mechanism and you face the pin problem. It's +impossible to use a "universal" os_patch_level because different users use different +custom kernels and different firmwares. CruelKernel uses the common date by default +in order to suite most of users. + +How can you solve the problem? 6 different ways: +- You can restore your previous kernel and unlock problem will gone +- You can flash [backtothefuture-2099-12.zip](https://github.com/CruelKernel/backtothefuture/releases/download/v1.0/backtothefuture-2099-12.zip) + in TWRP to set the os_patch_level date for your boot and recovery partitions to 2099-12. + You can use other than 2099-12 date in the zip filename. You need to set it to the same + or greater date as your previous kernel. Nemesis and Los (from ivanmeller) kernels use 2099-12. + Max possible date is: 2127-12. It will be used if there will be no date in the zip filename. +- You can check the os_patch_level date of your previous kernel here + https://cruelkernel.org/tools/bootimg/ and patch cruel kernel image to the same date. + If your previous kernel is nemesis, patch cruel to 2099-12 date. +- You can reboot to TWRP, navigate to data/system and delete 3 files those names starts + with 'lock'. Reboot. Login, set a new pin. To fix samsung account login, reinstall the app +- You can rebuild cruel kernel with os_patch_level that suites you. To do it, you need to + add the line os_patch_level="\" to the main.yml cruel configuration. + See the next section if you want to rebuild the kernel. +- You can do the full wipe during cruel kernel flashing + +## How to customize the kernel + +It's possible to customize the kernel and build it in a web browser. +First of all, you need to create an account on GitHub. Next, **fork** +this repository. **Switch** to the "Actions" tab and activate GitHub Actions. +At this step you've got your copy of the sources and you can build it with +GitHub Actions. You need to open github actions [configuration file](.github/workflows/main.yml) +and **edit** it from the browser. + +First of all, you need to edit model argument (by default it's G973F) to the model +of your phone. You can select multiple models. Supported models are: G970F/N, G973F/N, +G975F/N, G977B/N, N970F, N971N, N975F, N976B/N. + +Edit model: +```YAML + strategy: + matrix: + model: [ "G973F" ] +``` + +For example, you can add two models. This will create separate +installers for models: +```YAML + strategy: + matrix: + model: [ "G973F", "G975F" ] +``` + +If you want one installer for 2 kernels, use: +```YAML + strategy: + matrix: + model: [ "G973F,G975F" ] +``` + +To alter the kernel configuration you need to edit lines: +```YAML + - name: Kernel Configure + run: | + ./cruelbuild config \ + model=${{ matrix.model }} \ + name="Cruel-v5.3" \ + +magisk \ + +nohardening \ + +ttl \ + +wireguard \ + +cifs \ + +sdfat \ + +ntfs \ + +morosound \ + +boeffla_wl_blocker +``` + +You can change the name of the kernel by replacing ```name="Cruel-v5.3"``` with, +for example, ```name="my_own_kernel"```. You can remove wireguard from the kernel +if you don't need it by changing "+" to "-" or by removing the "+wireguard" line +and "\\" on the previous line. + +OS patch date can be changed with ```os_patch_level=2020-12``` argument, +the default current date is in cruel/build.mkbootimg.G973F file. + +### Preset configurations + +Available configuration presets can be found in [configs](kernel/configs/) folder. +Only the *.conf files prefixed with "cruel" are meaningful. +Presets list (+ means enabled by default, use NODEFAULTS=1 env var to drop them): +* +magisk - integrates magisk into the kernel. This allows to have root without + booting from recovery. Enabled by default. It's possible to specify magisk version, + e.g. +magisk=canary or +magisk=alpha or +magisk=v20.4 or +magisk=v19.4 +* dtb - build dtb/dtbo images +* empty\_vbmeta - include empty vbmeta img in installer and flash it +* always\_permit - pin SELinux to always use permissive mode. Required on LOS rom. +* always\_enforce - pin SELinux to always use enforcing mode. +* +force\_dex\_wqhd - disable vendor check of DP cables in DEX mode and always use WQHD resolution. +* 25hz - decrease interrupt clock freq from 250hz to 25hz. +* 50hz - decrease interrupt clock freq from 250hz to 50hz. +* 100hz - decrease interrupt clock freq from 250hz to 100hz. +* 300hz - increase interrupt clock freq from 250hz to 300hz. +* 1000hz - increase interrupt clock freq from 250hz to 1000hz. Don't use it if you + play games. You could benefit from this setting only if you use light/middle-weight + apps. Look here for more info: https://source.android.com/devices/tech/debug/jank\_jitter +* fp\_boost - fingerprint boost, max freqs for faster fingerprint check. +* noatime - mount fs with noatime by default. +* simple\_lmk - use simple low memory killer instead of lmdk. +* io\_bfq - enable BFQ MQ I/O scheduler in the kernel. BFQ is multi-queue scheduler, enabling + it requires switching SCSI subsystem to MQ mode. This means you will loose the ability + to use cfq and other single-queue schedulers after enabling +bfq. +* io\_maple - enable MAPLE I/O scheduler in the kernel. +* io\_fiops - enable FIOPS I/O scheduler in the kernel. +* io\_sio - enable SIO I/O scheduler in the kernel. +* io\_zen - enable ZEN I/O scheduler in the kernel. +* io\_anxiety - enable Anxiety I/O scheduler in the kernel. +* io\_noop - use no-op I/O scheduler by default (it's included in kernel in all cases). +* io\_cfq - make CFQ I/O scheduler default one. CFQ is enabled by default if you are not + enabling other schedulers. This switch is relevant only in case you enable multiple + schedulers and want cfq to be default one, for example: +maple +fiops will make fiops + default scheduler and give you the ability to switch to maple at runtime. Thus: +maple + +fiops +zen +cfq will add to the kernel maple, fiops, zen and make cfq scheduler default. +* +sdfat - use sdfat for exFAT and VFAT filesystems. +* +ntfs - enable ntfs filesystem support (read only). +* +cifs - adds CIFS fs support. +* tcp\_cubic - enable CUBIC TCP congestion control. +* tcp\_westwood - enable WestWood TCP congestion control. +* tcp\_htcp - enable HTCP congestion control. +* tcp\_bbr - enable BBR congestion control. +* tcp\_bic - make BIC TCP congestion control default one. BIC is enabled by default + if you are not enabling other engines. This options work as +cfq but for TCP + congestion control modules. +* sched_... - enable various (+performance, conservative, ondemand, +powersave, + userspace) CPU schedulers in the kernel. +* ttl - adds iptables filters for altering ttl values of network packets. This + helps to bypass tethering blocking in mobile networks. +* mass\_storage - enable usb mass storage drivers for drivedroid. +* +wireguard - adds wireguard module to the kernel. +* +morosound - enable moro sound control module. +* +boeffla\_wl\_blocker - enable boeffla wakelock blocker module. +* +nohardening - removes Samsung kernel self-protection mechanisms. Potentially + can increase the kernel performance. Enabled by default. Disable this if you + want to make your system more secure. +* nohardening2 - removes Android kernel self-protection mechanisms. Potentially + can increase the kernel performance. Don't use it if you don't know what you are + doing. Almost completely disables kernel self-protection. Very insecure. (fake\_config + to shut-up android warning) +* size - invoke compiler with size optimization flag (-Os). +* performance - invoke compiler with aggressive optimizations (-O3). +* +nodebug - remove debugging information from the kernel. +* noksm - disable Kernel Samepage Merging (KSM). +* nomodules - disable loadable modules support (fake\_config to shut-up android warning). +* noaudit - disable kernel auditing subsystem (fake\_config to shut-up android warning). +* noswap - disable swapping (fake\_config to shut-up android warning). +* nozram - disable nozram. +* usb\_serial - enable usb serial console support for nodemcu/arduino devices. +* fake\_config - Use defconfig for /proc/config.gz Some of the config presets, for + example nomodules, noaudit are safe but Android system checks kernel configuration + for these options to be enabled and issues the warning "There's an internal problem + with your device. Contact your manufacturer for details." in case they are not. This + config preset forces default configuration to be in /proc/config.gz This trick allows + to pass Android system check and shut up the warning. However, the kernel will use + other configuration during build. + +For example, you can alter default configuration to something like: +```YAML + - name: Kernel Configure + run: | + ./cruelbuild config \ + os_patch_level=2020-12 \ + model=${{ matrix.model }} \ + name="OwnKernel" \ + toolchain=proton \ + +magisk=canary \ + +wireguard \ + +nohardening \ + +1000hz +``` + +After editing the configuration in the browser, save it and **commit**. +Next, you need to **switch** to the "Actions" tab. At this step you will find that +GitHub starts to build the kernel. You need to **wait** about 25-30 mins while github builds +the kernel. If the build is successfully finished, you will find your boot.img in the Artifacts +section. Download it, unzip and flash. + +To keep your version of the sources in sync with main tree, please look at one of these tutorials: +- [How can I keep my fork in sync without adding a separate remote?](https://stackoverflow.com/a/21131381) +- [How do I update a GitHub forked repository?](https://stackoverflow.com/a/23853061) + +### Toolchain + +It's possible to select a toolchain. For example, you can switch to default toolchain by adding +"TOOLCHAIN: default" line in the main.yml config file. + +```YAML +env: + TOOLCHAIN: default +``` + +Available toolchains: + - default - standard toolchain from samsung's kernel archives for S10/Note10 models (clang6/gcc4.9) + - cruel - stable gcc 10.3.0 with LTO+PGO optimizations and reverted default inlining params to 9.3 version (https://github.com/CruelKernel/aarch64-cruel-elf) + - samsung - samsung's toolchain from S20 sources archive (clang8/gcc-4.9) + - google - official toolchain from google. Clang 12.0.4 from r23 and GCC 4.9 from r21 + - proton - bleeding-edge clang 13 (https://github.com/kdrag0n/proton-clang) + - arter97 - stable gcc 10.2.0 (https://github.com/arter97/arm64-gcc) + - arm - arm's gcc 9.2-2019.12 (https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads) + - system-gcc - gcc cross compiler installed in your system + - system-clang - clang installed in your system + +## How to build the kernel locally on your PC + +This instructions assumes you are using Linux. Install heimdall if you want to flash the +kernel automatically. + +Next: +```sh +# Install prerequisites +# If you use ubuntu or ubuntu based distro then you need to install these tools: +$ sudo apt-get install build-essential libncurses-dev libtinfo5 bc bison flex libssl-dev libelf-dev heimdall-flash android-tools-adb android-tools-fastboot curl p7zip-full +# If you use Fedora: +$ sudo dnf group install "Development Tools" +$ sudo dnf install ncurses-devel ncurses-compat-libs bc bison flex elfutils-libelf-devel openssl-devel heimdall android-tools curl p7zip +# If you use Arch/Manjaro (from ..::M::..): +$ sudo pacman -Sy base-devel ncurses bc bison flex openssl libelf heimdall android-tools curl p7zip --needed +$ sudo link /lib/libtinfo.so.6 /lib/libtinfo.so.5 + +# Install avbtool +$ wget -q https://android.googlesource.com/platform/external/avb/+archive/refs/heads/master.tar.gz -O - | tar xzf - avbtool.py +$ chmod +x avbtool.py +$ sudo mv avbtool.py /usr/local/bin/avbtool + +# Install mkbootimg +$ wget -q https://android.googlesource.com/platform/system/tools/mkbootimg/+archive/refs/heads/master.tar.gz -O - | tar xzf - mkbootimg.py gki +$ chmod +x mkbootimg.py +$ sudo mv mkbootimg.py /usr/local/bin/mkbootimg +$ sudo mv gki $(python -c 'import site; print(site.getsitepackages()[0])') + +# Install mkdtboimg +$ wget -q https://android.googlesource.com/platform/system/libufdt/+archive/refs/heads/master.tar.gz -O - | tar --strip-components 2 -xzf - utils/src/mkdtboimg.py +$ chmod +x mkdtboimg.py +$ sudo mv mkdtboimg.py /usr/local/bin/mkdtboimg + +# Get the sources +$ git clone https://github.com/CruelKernel/samsung-exynos9820 +$ cd samsung-exynos9820 + +# List available branches +$ git branch -a | grep remotes | grep cruel | cut -d '/' -f 3 +# Switch to the branch you need +$ git checkout cruel-HVJ5-v5.3 + +# Install compilers +$ git submodule update --init --depth 1 -j $(nproc) +# execute these 4 commands if you want to use non-default toolchains +# cd toolchain +# git remote set-branches origin '*' +# git fetch -v --depth 1 +# cd ../ + +# Compile kernel for G970F, G973F, G975F phones. +# Use model=all if you want to build the kernel for all available phones. +$ ./cruelbuild mkimg name="CustomCruel" model=G970F,G973F,G975F toolchain=proton +magisk=canary +wireguard +ttl +cifs +nohardening +# You will find your kernel in boot.img file after compilation. +$ ls -lah ./boot.img + +# You can automatically flash the kernel with adb/heimdall +# if you connect your phone to the PC and execute: +$ FLASH=y ./cruelbuild mkimg ... + +# Or in a single command (compilation with flashing) +# FLASH=y ./cruelbuild mkimg name="CustomCruel" model=G973F toolchain=proton +magisk=canary +wireguard +ttl +cifs +nohardening +``` + +## Support + +- [Telegram](https://t.me/joinchat/GsJfBBaxozXvVkSJhm0IOQ) +- [XDA Thread](https://forum.xda-developers.com/galaxy-s10/samsung-galaxy-s10--s10--s10-5g-cross-device-development-exynos/kernel-cruel-kernel-s10-note10-v3-t4063495) + From 6fb2fa4d63b3027a1b00b942669df21917049cb6 Mon Sep 17 00:00:00 2001 From: Chris Redpath Date: Tue, 23 Oct 2018 17:43:34 +0100 Subject: [PATCH 350/439] ANDROID: sched/fair: initialise util_est values to 0 on fork Since "sched/fair: Align PELT windows between cfs_rq and its se" the upstream kernel has initialised the whole content of sched_avg to zero on fork. When util_est was backported, we missed this and so ended up with util_est values copied from the parent task. Add the zero initialisation which is present upstream and ensure that util_est values always start from a known point. Fixes: 700f1172f7a7 ("BACKPORT: sched/fair: Add util_est on top of PELT") Reported-by: Puja Gupta Cc: Dietmar Eggemann Cc: Abhijeet Dharmapurikar Cc: Patrick Bellasi Cc: Todd Kjos Cc: Saravana Kannan Change-Id: I06995e4320d606a52761d0e773baf28fcd1e2680 Signed-off-by: Chris Redpath Signed-off-by: celtare21 (cherry picked from commit a4af8d41cb4acf9c689f27e4afc81692520eb1bb) (cherry picked from commit e701e0d73c2917ed34951b7f509cdb40c795e38d) --- kernel/sched/fair.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 00467b8ee599..3a0d14abb94f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -738,8 +738,10 @@ void init_entity_runnable_average(struct sched_entity *se) { struct sched_avg *sa = &se->avg; - sa->last_update_time = 0; + memset(sa, 0, sizeof(*sa)); /* + * util_avg is initialized in post_init_entity_util_avg. + * util_est should start from zero. * sched_avg's period_contrib should be strictly less then 1024, so * we give it 1023 to make sure it is almost a period (1024us), and * will definitely be update (after enqueue). @@ -754,11 +756,6 @@ void init_entity_runnable_average(struct sched_entity *se) if (entity_is_task(se)) sa->load_avg = scale_load_down(se->load.weight); sa->load_sum = sa->load_avg * LOAD_AVG_MAX; - /* - * At this point, util_avg won't be used in select_task_rq_fair anyway - */ - sa->util_avg = 0; - sa->util_sum = 0; /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ init_multi_load(se); From 3aaa87d020eb0c2880b28a00617146a1d81355c1 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Thu, 4 May 2017 19:31:39 -0700 Subject: [PATCH 351/439] cpufreq: schedutil: Ignore work_in_progress Blindly ignoring frequency updates because of work_in_progress can leave the CPUs at the wrong frequency for a long time. It's better to update the frequency immediately than wait for a future event that might take a long time to come. The irq_work code already ignores double queuing of work. So, that part of the code is still safe when the work_in_progress flag is ignored. Change-Id: Id0b3711314dfbfa18b5f4bce30a239ee3cf962d6 Signed-off-by: Vikram Mulukutla Signed-off-by: Saravana Kannan (cherry picked from commit 5dec9a7e188d43d9639e4c3e3315f0047f83d96a) (cherry picked from commit 33628fe3cd7c3d9590113901647655478e822927) --- kernel/sched/cpufreq_schedutil.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d17eaaa916a7..69d919b88b7a 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -218,9 +218,6 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) !cpufreq_can_do_remote_dvfs(sg_policy->policy)) return false; - if (sg_policy->work_in_progress) - return false; - if (unlikely(sg_policy->need_freq_update)) { sg_policy->need_freq_update = false; /* From 9436c4cab19943e851c70711af5390ba815e86a3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 5 Apr 2020 23:26:36 +0300 Subject: [PATCH 352/439] mmc: core: make crc control switchable Signed-off-by: Denis Efremov --- drivers/mmc/core/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 1951555832a8..7f7dd6e6f9a2 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -70,7 +70,7 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; * So we allow it it to be disabled. */ bool use_spi_crc = 1; -module_param(use_spi_crc, bool, 0); +module_param(use_spi_crc, bool, 0644); static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) From 0ee2eb31a1d919daaab526a252f4b687a8541457 Mon Sep 17 00:00:00 2001 From: Ameya Thakur Date: Sun, 21 Dec 2014 12:53:22 -0800 Subject: [PATCH 353/439] arm64: Add 32-bit sigcontext definition to uapi signcontext.h The arm64 uapi sigcontext.h can be included by 32-bit userspace modules. Since arm and arm64 sigcontext definition are not compatible, add arm sigcontext definition to arm64 sigcontext.h. Change-Id: I94109b094f6c8376fdaeb2822d7b26d18ddfb2bc Signed-off-by: David Ng Signed-off-by: Ameya Thakur Signed-off-by: Prasad Sodagudi --- arch/arm64/include/uapi/asm/sigcontext.h | 32 ++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index f6cc3061b1ae..018c12faef81 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -17,6 +17,7 @@ #ifndef _UAPI__ASM_SIGCONTEXT_H #define _UAPI__ASM_SIGCONTEXT_H +#ifdef CONFIG_64BIT #include /* @@ -117,4 +118,35 @@ struct extra_context { __u32 __reserved[3]; }; +#else /* CONFIG_64BIT */ + +/* + * Signal context structure - contains all info to do with the state + * before the signal handler was invoked. Note: only add new entries + * to the end of the structure. + */ +struct sigcontext { + unsigned long trap_no; + unsigned long error_code; + unsigned long oldmask; + unsigned long arm_r0; + unsigned long arm_r1; + unsigned long arm_r2; + unsigned long arm_r3; + unsigned long arm_r4; + unsigned long arm_r5; + unsigned long arm_r6; + unsigned long arm_r7; + unsigned long arm_r8; + unsigned long arm_r9; + unsigned long arm_r10; + unsigned long arm_fp; + unsigned long arm_ip; + unsigned long arm_sp; + unsigned long arm_lr; + unsigned long arm_pc; + unsigned long arm_cpsr; + unsigned long fault_address; +}; +#endif /* CONFIG_64BIT */ #endif /* _UAPI__ASM_SIGCONTEXT_H */ From bb0837eb04cbcbc6f065ed18d7c3c6a08ef602fd Mon Sep 17 00:00:00 2001 From: Stricted Date: Mon, 26 Aug 2019 18:03:34 +0000 Subject: [PATCH 354/439] video: mdnie: fix lux node permissions --- drivers/video/fbdev/exynos/panel/mdnie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/mdnie.c b/drivers/video/fbdev/exynos/panel/mdnie.c index e3ff5b86d060..60cab1a17c47 100644 --- a/drivers/video/fbdev/exynos/panel/mdnie.c +++ b/drivers/video/fbdev/exynos/panel/mdnie.c @@ -1234,7 +1234,7 @@ struct device_attribute mdnie_dev_attrs[] = { __PANEL_ATTR_RW(scenario, 0664), __PANEL_ATTR_RW(accessibility, 0664), __PANEL_ATTR_RW(bypass, 0664), - __PANEL_ATTR_RW(lux, 0000), + __PANEL_ATTR_RW(lux, 0664), __PANEL_ATTR_RO(mdnie, 0444), __PANEL_ATTR_RW(sensorRGB, 0664), __PANEL_ATTR_RW(whiteRGB, 0664), From 8ab7da5a1aa75790ffb6260fec157b20b60abe91 Mon Sep 17 00:00:00 2001 From: Andreas Schneider Date: Sat, 29 Feb 2020 23:57:20 +0100 Subject: [PATCH 355/439] drivers:soc:samsung: Fix divide by zero issues in macros Signed-Off-by: Andreas Schneider --- drivers/soc/samsung/cal-if/cmucal.h | 38 + .../cal-if/exynos9820/acpm_dvfs_exynos9820.h | 30 +- .../samsung/cal-if/exynos9820/cmucal-node.c | 144 +- .../samsung/cal-if/exynos9820/cmucal-vclk.c | 1642 ++++++++--------- 4 files changed, 946 insertions(+), 908 deletions(-) diff --git a/drivers/soc/samsung/cal-if/cmucal.h b/drivers/soc/samsung/cal-if/cmucal.h index 353c49cf686d..adb64ad91f6e 100644 --- a/drivers/soc/samsung/cal-if/cmucal.h +++ b/drivers/soc/samsung/cal-if/cmucal.h @@ -423,6 +423,19 @@ struct cmucal_clkout { .ops = NULL, \ } +#define CMUCAL_VCLK2(_id, _lut, _list, _seq, _switch) \ +[_id & MASK_OF_ID] = { \ + .id = _id, \ + .name = #_id, \ + .lut = NULL, \ + .list = _list, \ + .seq = _seq, \ + .num_rates = 0, \ + .num_list = (sizeof(_list) / sizeof((_list)[0])), \ + .switch_info = _switch, \ + .ops = NULL, \ +} + #define CMUCAL_ACPM_VCLK(_id, _lut, _list, _seq, _switch, _margin_id) \ [_id & MASK_OF_ID] = { \ .id = _id, \ @@ -437,6 +450,20 @@ struct cmucal_clkout { .margin_id = _margin_id, \ } +#define CMUCAL_ACPM_VCLK2(_id, _lut, _list, _seq, _switch, _margin_id) \ +[_id & MASK_OF_ID] = { \ + .id = _id, \ + .name = #_id, \ + .lut = _lut, \ + .list = _list, \ + .seq = _seq, \ + .num_rates = 0, \ + .num_list = 0, \ + .switch_info = _switch, \ + .ops = NULL, \ + .margin_id = _margin_id, \ +} + #define SFR_BLOCK(_id, _pa, _size) \ [_id & MASK_OF_ID] = { \ .id = _id, \ @@ -492,6 +519,17 @@ struct cmucal_clkout { .num_parents = (sizeof(_pids) / sizeof((_pids)[0])), \ } +#define CLK_MUX2(_id, _pids, _o, _so, _eo) \ +[_id & MASK_OF_ID] = { \ + .clk.id = _id, \ + .clk.name = #_id, \ + .clk.offset_idx = _o, \ + .clk.status_idx = _so, \ + .clk.enable_idx = _eo, \ + .pid = NULL, \ + .num_parents = 0, \ +} + #define CLK_DIV(_id, _pid, _o, _so, _eo) \ [_id & MASK_OF_ID] = { \ .clk.id = _id, \ diff --git a/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h b/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h index 013df11da15d..e73d51d1d75a 100644 --- a/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h +++ b/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h @@ -18,21 +18,21 @@ enum acpm_dvfs_id { }; struct vclk acpm_vclk_list[] = { - CMUCAL_ACPM_VCLK(dvfs_mif, NULL, NULL, NULL, NULL, MARGIN_MIF), - CMUCAL_ACPM_VCLK(dvfs_int, NULL, NULL, NULL, NULL, MARGIN_INT), - CMUCAL_ACPM_VCLK(dvfs_cpucl0, NULL, NULL, NULL, NULL, MARGIN_LIT), - CMUCAL_ACPM_VCLK(dvfs_cpucl1, NULL, NULL, NULL, NULL, MARGIN_MID), - CMUCAL_ACPM_VCLK(dvfs_cpucl2, NULL, NULL, NULL, NULL, MARGIN_BIG), - CMUCAL_ACPM_VCLK(dvfs_npu, NULL, NULL, NULL, NULL, MARGIN_NPU), - CMUCAL_ACPM_VCLK(dvfs_disp, NULL, NULL, NULL, NULL, MARGIN_DISP), - CMUCAL_ACPM_VCLK(dvfs_score, NULL, NULL, NULL, NULL, MARGIN_SCORE), - CMUCAL_ACPM_VCLK(dvfs_aud, NULL, NULL, NULL, NULL, MARGIN_AUD), - CMUCAL_ACPM_VCLK(dvs_cp, NULL, NULL, NULL, NULL, MARGIN_CP), - CMUCAL_ACPM_VCLK(dvfs_g3d, NULL, NULL, NULL, NULL, MARGIN_G3D), - CMUCAL_ACPM_VCLK(dvfs_intcam, NULL, NULL, NULL, NULL, MARGIN_INTCAM), - CMUCAL_ACPM_VCLK(dvfs_cam, NULL, NULL, NULL, NULL, MARGIN_CAM), - CMUCAL_ACPM_VCLK(dvfs_iva, NULL, NULL, NULL, NULL, MARGIN_IVA), - CMUCAL_ACPM_VCLK(dvfs_mfc, NULL, NULL, NULL, NULL, MARGIN_MFC), + CMUCAL_ACPM_VCLK2(dvfs_mif, NULL, NULL, NULL, NULL, MARGIN_MIF), + CMUCAL_ACPM_VCLK2(dvfs_int, NULL, NULL, NULL, NULL, MARGIN_INT), + CMUCAL_ACPM_VCLK2(dvfs_cpucl0, NULL, NULL, NULL, NULL, MARGIN_LIT), + CMUCAL_ACPM_VCLK2(dvfs_cpucl1, NULL, NULL, NULL, NULL, MARGIN_MID), + CMUCAL_ACPM_VCLK2(dvfs_cpucl2, NULL, NULL, NULL, NULL, MARGIN_BIG), + CMUCAL_ACPM_VCLK2(dvfs_npu, NULL, NULL, NULL, NULL, MARGIN_NPU), + CMUCAL_ACPM_VCLK2(dvfs_disp, NULL, NULL, NULL, NULL, MARGIN_DISP), + CMUCAL_ACPM_VCLK2(dvfs_score, NULL, NULL, NULL, NULL, MARGIN_SCORE), + CMUCAL_ACPM_VCLK2(dvfs_aud, NULL, NULL, NULL, NULL, MARGIN_AUD), + CMUCAL_ACPM_VCLK2(dvs_cp, NULL, NULL, NULL, NULL, MARGIN_CP), + CMUCAL_ACPM_VCLK2(dvfs_g3d, NULL, NULL, NULL, NULL, MARGIN_G3D), + CMUCAL_ACPM_VCLK2(dvfs_intcam, NULL, NULL, NULL, NULL, MARGIN_INTCAM), + CMUCAL_ACPM_VCLK2(dvfs_cam, NULL, NULL, NULL, NULL, MARGIN_CAM), + CMUCAL_ACPM_VCLK2(dvfs_iva, NULL, NULL, NULL, NULL, MARGIN_IVA), + CMUCAL_ACPM_VCLK2(dvfs_mfc, NULL, NULL, NULL, NULL, MARGIN_MFC), }; unsigned int acpm_vclk_size = ARRAY_SIZE(acpm_vclk_list); diff --git a/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c b/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c index 22d4ebb26b1a..b110b58180f6 100644 --- a/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c +++ b/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c @@ -1078,78 +1078,78 @@ struct cmucal_mux cmucal_mux_list[] = { CLK_MUX(CLKCMU_MIF_DDRPHY2X_S2D, cmucal_clkcmu_mif_ddrphy2x_s2d_parents, CLK_CON_MUX_CLKCMU_MIF_DDRPHY2X_S2D_SELECT, CLK_CON_MUX_CLKCMU_MIF_DDRPHY2X_S2D_BUSY, CLK_CON_MUX_CLKCMU_MIF_DDRPHY2X_S2D_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_CLK_S2D_CORE, cmucal_mux_clk_s2d_core_parents, CLK_CON_MUX_MUX_CLK_S2D_CORE_SELECT, CLK_CON_MUX_MUX_CLK_S2D_CORE_BUSY, CLK_CON_MUX_MUX_CLK_S2D_CORE_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_CLK_VTS_BUS, cmucal_mux_clk_vts_bus_parents, CLK_CON_MUX_MUX_CLK_VTS_BUS_SELECT, CLK_CON_MUX_MUX_CLK_VTS_BUS_BUSY, CLK_CON_MUX_MUX_CLK_VTS_BUS_ENABLE_AUTOMATIC_CLKGATING), - CLK_MUX(APM_CMU_APM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(APM_CMU_APM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(AUD_CMU_AUD_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(AUD_CMU_AUD_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(BUSC_CMU_BUSC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(BUSC_CMU_BUSC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMGP_CMU_CMGP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMGP_CMU_CMGP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMU_CMU_CMU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMU_CMU_CMU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CORE_CMU_CORE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CORE_CMU_CORE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL1_CMU_CPUCL1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL1_CMU_CPUCL1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DPU_CMU_DPU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DPU_CMU_DPU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPM_CMU_DSPM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPM_CMU_DSPM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPS_CMU_DSPS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPS_CMU_DSPS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0_CMU_FSYS0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0_CMU_FSYS0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0A_CMU_FSYS0A_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0A_CMU_FSYS0A_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS1_CMU_FSYS1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS1_CMU_FSYS1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G2D_CMU_G2D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G2D_CMU_G2D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_EMBEDDED_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_EMBEDDED_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPHQ_CMU_ISPHQ_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPHQ_CMU_ISPHQ_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPLP_CMU_ISPLP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPLP_CMU_ISPLP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPPRE_CMU_ISPPRE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPPRE_CMU_ISPPRE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(IVA_CMU_IVA_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(IVA_CMU_IVA_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MFC_CMU_MFC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MFC_CMU_MFC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF_CMU_MIF_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF_CMU_MIF_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF1_CMU_MIF1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF1_CMU_MIF1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF2_CMU_MIF2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF2_CMU_MIF2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF3_CMU_MIF3_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF3_CMU_MIF3_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU0_CMU_NPU0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU0_CMU_NPU0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU1_CMU_NPU1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU1_CMU_NPU1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC0_CMU_PERIC0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC0_CMU_PERIC0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC1_CMU_PERIC1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC1_CMU_PERIC1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIS_CMU_PERIS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIS_CMU_PERIS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VRA2_CMU_VRA2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VRA2_CMU_VRA2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VTS_CMU_VTS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VTS_CMU_VTS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(APM_CMU_APM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(APM_CMU_APM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(AUD_CMU_AUD_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(AUD_CMU_AUD_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(BUSC_CMU_BUSC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(BUSC_CMU_BUSC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMGP_CMU_CMGP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMGP_CMU_CMGP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMU_CMU_CMU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMU_CMU_CMU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CORE_CMU_CORE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CORE_CMU_CORE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL1_CMU_CPUCL1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL1_CMU_CPUCL1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DPU_CMU_DPU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DPU_CMU_DPU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPM_CMU_DSPM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPM_CMU_DSPM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPS_CMU_DSPS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPS_CMU_DSPS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0_CMU_FSYS0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0_CMU_FSYS0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0A_CMU_FSYS0A_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0A_CMU_FSYS0A_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS1_CMU_FSYS1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS1_CMU_FSYS1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G2D_CMU_G2D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G2D_CMU_G2D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_EMBEDDED_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_EMBEDDED_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPHQ_CMU_ISPHQ_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPHQ_CMU_ISPHQ_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPLP_CMU_ISPLP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPLP_CMU_ISPLP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPPRE_CMU_ISPPRE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPPRE_CMU_ISPPRE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(IVA_CMU_IVA_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(IVA_CMU_IVA_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MFC_CMU_MFC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MFC_CMU_MFC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF_CMU_MIF_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF_CMU_MIF_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF1_CMU_MIF1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF1_CMU_MIF1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF2_CMU_MIF2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF2_CMU_MIF2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF3_CMU_MIF3_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF3_CMU_MIF3_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU0_CMU_NPU0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU0_CMU_NPU0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU1_CMU_NPU1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU1_CMU_NPU1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC0_CMU_PERIC0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC0_CMU_PERIC0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC1_CMU_PERIC1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC1_CMU_PERIC1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIS_CMU_PERIS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIS_CMU_PERIS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VRA2_CMU_VRA2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VRA2_CMU_VRA2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VTS_CMU_VTS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VTS_CMU_VTS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), CLK_MUX(MUX_CLKCMU_APM_BUS_USER, cmucal_mux_clkcmu_apm_bus_user_parents, PLL_CON0_MUX_CLKCMU_APM_BUS_USER_MUX_SEL, PLL_CON0_MUX_CLKCMU_APM_BUS_USER_BUSY, PLL_CON2_MUX_CLKCMU_APM_BUS_USER_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_DLL_USER, cmucal_mux_dll_user_parents, PLL_CON0_MUX_DLL_USER_MUX_SEL, PLL_CON0_MUX_DLL_USER_BUSY, PLL_CON2_MUX_DLL_USER_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_CLKMUX_APM_RCO_USER, cmucal_mux_clkmux_apm_rco_user_parents, PLL_CON0_MUX_CLKMUX_APM_RCO_USER_MUX_SEL, PLL_CON0_MUX_CLKMUX_APM_RCO_USER_BUSY, PLL_CON2_MUX_CLKMUX_APM_RCO_USER_ENABLE_AUTOMATIC_CLKGATING), diff --git a/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c b/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c index bde09430d294..d3f12dc6e673 100644 --- a/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c +++ b/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c @@ -3910,825 +3910,825 @@ struct vclk cmucal_vclk_list[] = { CMUCAL_VCLK(VCLK_BLK_VRA2, cmucal_vclk_blk_vra2_lut, cmucal_vclk_blk_vra2, NULL, NULL), /* GATE VCLK*/ - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_APM, NULL, cmucal_vclk_ip_lhs_axi_d_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_APM, NULL, cmucal_vclk_ip_lhm_axi_p_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_APM, NULL, cmucal_vclk_ip_wdt_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_APM, NULL, cmucal_vclk_ip_sysreg_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_APM_AP, NULL, cmucal_vclk_ip_mailbox_apm_ap, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_PMU_ALIVE, NULL, cmucal_vclk_ip_apbif_pmu_alive, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_INTMEM, NULL, cmucal_vclk_ip_intmem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_C_MODEM, NULL, cmucal_vclk_ip_lhm_axi_c_modem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_SCAN2DRAM, NULL, cmucal_vclk_ip_lhs_axi_g_scan2dram, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PMU_INTR_GEN, NULL, cmucal_vclk_ip_pmu_intr_gen, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PEM, NULL, cmucal_vclk_ip_pem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPEEDY_APM, NULL, cmucal_vclk_ip_speedy_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_DP_APM, NULL, cmucal_vclk_ip_xiu_dp_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APM_CMU_APM, NULL, cmucal_vclk_ip_apm_cmu_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_APM, NULL, cmucal_vclk_ip_vgen_lite_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GREBEINTEGRATION, NULL, cmucal_vclk_ip_grebeintegration, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_GPIO_ALIVE, NULL, cmucal_vclk_ip_apbif_gpio_alive, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_TOP_RTC, NULL, cmucal_vclk_ip_apbif_top_rtc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_CP, NULL, cmucal_vclk_ip_mailbox_ap_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_CP_S, NULL, cmucal_vclk_ip_mailbox_ap_cp_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GREBEINTEGRATION_DBGCORE, NULL, cmucal_vclk_ip_grebeintegration_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DTZPC_APM, NULL, cmucal_vclk_ip_dtzpc_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_C_VTS, NULL, cmucal_vclk_ip_lhm_axi_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_APM_VTS, NULL, cmucal_vclk_ip_mailbox_apm_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_DBGCORE, NULL, cmucal_vclk_ip_mailbox_ap_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhs_axi_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_APM_CP, NULL, cmucal_vclk_ip_mailbox_apm_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_RTC, NULL, cmucal_vclk_ip_apbif_rtc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhs_axi_c_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPEEDY_SUB_APM, NULL, cmucal_vclk_ip_speedy_sub_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AUD_CMU_AUD, NULL, cmucal_vclk_ip_aud_cmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_AUD, NULL, cmucal_vclk_ip_lhs_axi_d_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_AUD, NULL, cmucal_vclk_ip_ppmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_AUD, NULL, cmucal_vclk_ip_sysreg_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ABOX, NULL, cmucal_vclk_ip_abox, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhs_atb_t0_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_AUD, NULL, cmucal_vclk_ip_gpio_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_US_32TO128, NULL, cmucal_vclk_ip_axi_us_32to128, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_AUD, NULL, cmucal_vclk_ip_btm_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERI_AXI_ASB, NULL, cmucal_vclk_ip_peri_axi_asb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_AUD, NULL, cmucal_vclk_ip_lhm_axi_p_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_AUD, NULL, cmucal_vclk_ip_wdt_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC, NULL, cmucal_vclk_ip_dmic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_AUD, NULL, cmucal_vclk_ip_trex_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DFTMUX_AUD, NULL, cmucal_vclk_ip_dftmux_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SMMU_AUD, NULL, cmucal_vclk_ip_smmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_AUD, NULL, cmucal_vclk_ip_wrap2_conv_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_AUD, NULL, cmucal_vclk_ip_xiu_p_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SMMU_AUD, NULL, cmucal_vclk_ip_ad_apb_smmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_AUD, NULL, cmucal_vclk_ip_axi2apb_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SMMU_AUD_S, NULL, cmucal_vclk_ip_ad_apb_smmu_aud_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhs_atb_t1_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_AUD, NULL, cmucal_vclk_ip_vgen_lite_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSC_CMU_BUSC, NULL, cmucal_vclk_ip_busc_cmu_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_BUSCP0, NULL, cmucal_vclk_ip_axi2apb_buscp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_BUSC_TDP, NULL, cmucal_vclk_ip_axi2apb_busc_tdp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_BUSC, NULL, cmucal_vclk_ip_sysreg_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_CMUTOPC, NULL, cmucal_vclk_ip_busif_cmutopc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_D0_BUSC, NULL, cmucal_vclk_ip_trex_d0_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_P_BUSC, NULL, cmucal_vclk_ip_trex_p_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF0, NULL, cmucal_vclk_ip_lhs_axi_p_mif0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhs_axi_p_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhs_axi_p_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhs_axi_p_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhs_axi_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhs_axi_p_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhs_axi_p_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASYNCSFR_WR_SMC, NULL, cmucal_vclk_ip_asyncsfr_wr_smc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhs_axi_d_ivasc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhm_acel_d0_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhm_acel_d1_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhm_acel_d2_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhm_acel_d_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhm_acel_d_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhm_acel_d_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhm_axi_d0_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhm_axi_d0_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_d_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhm_axi_d1_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhm_axi_d1_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhm_axi_d2_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d0_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DPU, NULL, cmucal_vclk_ip_lhs_axi_p_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_p_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhs_axi_p_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhs_axi_p_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_G2D, NULL, cmucal_vclk_ip_lhs_axi_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_p_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_p_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_IVA, NULL, cmucal_vclk_ip_lhs_axi_p_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MFC, NULL, cmucal_vclk_ip_lhs_axi_p_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhm_acel_d_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_APM, NULL, cmucal_vclk_ip_lhm_axi_d_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d1_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhs_axi_p_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SIREX, NULL, cmucal_vclk_ip_sirex, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d0_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d1_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_d_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_RB_BUSC, NULL, cmucal_vclk_ip_trex_rb_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPFW, NULL, cmucal_vclk_ip_ppfw, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_BUSC, NULL, cmucal_vclk_ip_wrap2_conv_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_PDMA0, NULL, cmucal_vclk_ip_vgen_pdma0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_BUSC, NULL, cmucal_vclk_ip_vgen_lite_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_BUSC, NULL, cmucal_vclk_ip_hpm_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMBUSC, NULL, cmucal_vclk_ip_busif_hpmbusc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PDMA0, NULL, cmucal_vclk_ip_pdma0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SBIC, NULL, cmucal_vclk_ip_sbic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPDMA, NULL, cmucal_vclk_ip_spdma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DIT, NULL, cmucal_vclk_ip_ad_apb_dit, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DIT, NULL, cmucal_vclk_ip_dit, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_BUSC, NULL, cmucal_vclk_ip_d_tzpc_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_NPU, NULL, cmucal_vclk_ip_lhs_axi_p_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MMCACHE, NULL, cmucal_vclk_ip_mmcache, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_D1_BUSC, NULL, cmucal_vclk_ip_trex_d1_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_BUSCP1, NULL, cmucal_vclk_ip_axi2apb_buscp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_AUD, NULL, cmucal_vclk_ip_lhm_axi_d_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_AUD, NULL, cmucal_vclk_ip_lhs_axi_p_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhs_dbg_g_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_VTS, NULL, cmucal_vclk_ip_lhm_axi_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_VTS, NULL, cmucal_vclk_ip_lhs_axi_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_SPDMA, NULL, cmucal_vclk_ip_qe_spdma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_PDMA0, NULL, cmucal_vclk_ip_qe_pdma0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_BUSC, NULL, cmucal_vclk_ip_xiu_d_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_P_VTS, NULL, cmucal_vclk_ip_baaw_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_US_64TO128, NULL, cmucal_vclk_ip_axi_us_64to128, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_P_NPU, NULL, cmucal_vclk_ip_baaw_p_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhm_axi_d_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CMGP_CMU_CMGP, NULL, cmucal_vclk_ip_cmgp_cmu_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADC_CMGP, NULL, cmucal_vclk_ip_adc_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_CMGP, NULL, cmucal_vclk_ip_gpio_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP0, NULL, cmucal_vclk_ip_i2c_cmgp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP1, NULL, cmucal_vclk_ip_i2c_cmgp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP2, NULL, cmucal_vclk_ip_i2c_cmgp2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP3, NULL, cmucal_vclk_ip_i2c_cmgp3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP, NULL, cmucal_vclk_ip_sysreg_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP0, NULL, cmucal_vclk_ip_usi_cmgp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP1, NULL, cmucal_vclk_ip_usi_cmgp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP2, NULL, cmucal_vclk_ip_usi_cmgp2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP3, NULL, cmucal_vclk_ip_usi_cmgp3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP2CP, NULL, cmucal_vclk_ip_sysreg_cmgp2cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP2PMU_AP, NULL, cmucal_vclk_ip_sysreg_cmgp2pmu_ap, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DTZPC_CMGP, NULL, cmucal_vclk_ip_dtzpc_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhm_axi_c_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP2APM, NULL, cmucal_vclk_ip_sysreg_cmgp2apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CORE_CMU_CORE, NULL, cmucal_vclk_ip_core_cmu_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CORE, NULL, cmucal_vclk_ip_sysreg_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_0, NULL, cmucal_vclk_ip_axi2apb_core_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE2AXI_0, NULL, cmucal_vclk_ip_mpace2axi_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE2AXI_1, NULL, cmucal_vclk_ip_mpace2axi_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_DEBUG_CCI, NULL, cmucal_vclk_ip_ppc_debug_cci, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_P0_CORE, NULL, cmucal_vclk_ip_trex_p0_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL2_0, NULL, cmucal_vclk_ip_ppmu_cpucl2_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G0_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g0_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G1_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g1_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G2_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g2_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G3_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g3_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T_BDU, NULL, cmucal_vclk_ip_lhs_atb_t_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_APB_G_BDU, NULL, cmucal_vclk_ip_adm_apb_g_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BDU, NULL, cmucal_vclk_ip_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_P1_CORE, NULL, cmucal_vclk_ip_trex_p1_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_TP, NULL, cmucal_vclk_ip_axi2apb_core_tp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPFW_G3D, NULL, cmucal_vclk_ip_ppfw_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_G3D, NULL, cmucal_vclk_ip_lhs_axi_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_CP, NULL, cmucal_vclk_ip_lhm_axi_d0_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D0_G3D, NULL, cmucal_vclk_ip_lhm_ace_d0_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D1_G3D, NULL, cmucal_vclk_ip_lhm_ace_d1_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D2_G3D, NULL, cmucal_vclk_ip_lhm_ace_d2_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D3_G3D, NULL, cmucal_vclk_ip_lhm_ace_d3_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_D_CORE, NULL, cmucal_vclk_ip_trex_d_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CORE, NULL, cmucal_vclk_ip_hpm_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMCORE, NULL, cmucal_vclk_ip_busif_hpmcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D0_G3D, NULL, cmucal_vclk_ip_bps_d0_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D1_G3D, NULL, cmucal_vclk_ip_bps_d1_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D2_G3D, NULL, cmucal_vclk_ip_bps_d2_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D3_G3D, NULL, cmucal_vclk_ip_bps_d3_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPCFW_G3D, NULL, cmucal_vclk_ip_ppcfw_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_CP, NULL, cmucal_vclk_ip_lhs_axi_p_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_PPFW_G3D, NULL, cmucal_vclk_ip_apb_async_ppfw_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_CP, NULL, cmucal_vclk_ip_baaw_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_P_G3D, NULL, cmucal_vclk_ip_bps_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_APM, NULL, cmucal_vclk_ip_lhs_axi_p_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL2_1, NULL, cmucal_vclk_ip_ppmu_cpucl2_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_CORE, NULL, cmucal_vclk_ip_d_tzpc_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_1, NULL, cmucal_vclk_ip_axi2apb_core_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_CORE, NULL, cmucal_vclk_ip_xiu_p_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL2_0, NULL, cmucal_vclk_ip_ppc_cpucl2_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL2_1, NULL, cmucal_vclk_ip_ppc_cpucl2_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D0, NULL, cmucal_vclk_ip_ppc_g3d0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D1, NULL, cmucal_vclk_ip_ppc_g3d1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D2, NULL, cmucal_vclk_ip_ppc_g3d2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D3, NULL, cmucal_vclk_ip_ppc_g3d3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_IRPS0, NULL, cmucal_vclk_ip_ppc_irps0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_IRPS1, NULL, cmucal_vclk_ip_ppc_irps1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_CP, NULL, cmucal_vclk_ip_lhm_axi_d1_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_L_CORE, NULL, cmucal_vclk_ip_lhs_axi_l_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_2, NULL, cmucal_vclk_ip_axi2apb_core_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_L_CORE, NULL, cmucal_vclk_ip_lhm_axi_l_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL0_0, NULL, cmucal_vclk_ip_ppc_cpucl0_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL0_1, NULL, cmucal_vclk_ip_ppc_cpucl0_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL0_0, NULL, cmucal_vclk_ip_ppmu_cpucl0_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL0_1, NULL, cmucal_vclk_ip_ppmu_cpucl0_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhm_dbg_g_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D0_MIF, NULL, cmucal_vclk_ip_mpace_asb_d0_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D1_MIF, NULL, cmucal_vclk_ip_mpace_asb_d1_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D2_MIF, NULL, cmucal_vclk_ip_mpace_asb_d2_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D3_MIF, NULL, cmucal_vclk_ip_mpace_asb_d3_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_ASB_CSSYS, NULL, cmucal_vclk_ip_axi_asb_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CCI, NULL, cmucal_vclk_ip_cci, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CPUCL0, NULL, cmucal_vclk_ip_axi2apb_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CPUCL0, NULL, cmucal_vclk_ip_sysreg_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMCPUCL0, NULL, cmucal_vclk_ip_busif_hpmcpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CSSYS, NULL, cmucal_vclk_ip_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhm_atb_t0_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T_BDU, NULL, cmucal_vclk_ip_lhm_atb_t_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T0_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T1_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t2_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t3_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SECJTAG, NULL, cmucal_vclk_ip_secjtag, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t2_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t3_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_APB_G_CLUSTER0, NULL, cmucal_vclk_ip_adm_apb_g_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL0_CMU_CPUCL0, NULL, cmucal_vclk_ip_cpucl0_cmu_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL0, NULL, cmucal_vclk_ip_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t4_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t5_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t4_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t5_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_CPUCL0, NULL, cmucal_vclk_ip_d_tzpc_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhm_atb_t1_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_int_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_int_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_int_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_int_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_CPUCL0, NULL, cmucal_vclk_ip_xiu_p_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_DP_CSSYS, NULL, cmucal_vclk_ip_xiu_dp_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_CPUCL0, NULL, cmucal_vclk_ip_trex_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_US_32TO64_G_DBGCORE, NULL, cmucal_vclk_ip_axi_us_32to64_g_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL0_1, NULL, cmucal_vclk_ip_hpm_cpucl0_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL0_0, NULL, cmucal_vclk_ip_hpm_cpucl0_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_P_CSSYS_0, NULL, cmucal_vclk_ip_apb_async_p_cssys_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhs_axi_g_int_etr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhm_axi_g_int_etr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_P_CSSYS, NULL, cmucal_vclk_ip_axi2apb_p_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_CPUCL0, NULL, cmucal_vclk_ip_bps_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL1_CMU_CPUCL1, NULL, cmucal_vclk_ip_cpucl1_cmu_cpucl1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL1, NULL, cmucal_vclk_ip_cpucl1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL2_CMU_CPUCL2, NULL, cmucal_vclk_ip_cpucl2_cmu_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CPUCL2, NULL, cmucal_vclk_ip_sysreg_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMCPUCL2, NULL, cmucal_vclk_ip_busif_hpmcpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL2_0, NULL, cmucal_vclk_ip_hpm_cpucl2_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CLUSTER2, NULL, cmucal_vclk_ip_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CPUCL2, NULL, cmucal_vclk_ip_axi2apb_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL2_1, NULL, cmucal_vclk_ip_hpm_cpucl2_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL2_2, NULL, cmucal_vclk_ip_hpm_cpucl2_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_CPUCL2, NULL, cmucal_vclk_ip_d_tzpc_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DPU_CMU_DPU, NULL, cmucal_vclk_ip_dpu_cmu_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DPUD0, NULL, cmucal_vclk_ip_btm_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DPUD1, NULL, cmucal_vclk_ip_btm_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_DPU, NULL, cmucal_vclk_ip_sysreg_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DPUP1, NULL, cmucal_vclk_ip_axi2apb_dpup1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DPUP0, NULL, cmucal_vclk_ip_axi2apb_dpup0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_sysmmu_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DPU, NULL, cmucal_vclk_ip_lhm_axi_p_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhs_axi_d1_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_DPU, NULL, cmucal_vclk_ip_xiu_p_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DECON0, NULL, cmucal_vclk_ip_ad_apb_decon0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DECON1, NULL, cmucal_vclk_ip_ad_apb_decon1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_MIPI_DSIM1, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPP, NULL, cmucal_vclk_ip_ad_apb_dpp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhs_axi_d2_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DPUD2, NULL, cmucal_vclk_ip_btm_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_sysmmu_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPU_DMA, NULL, cmucal_vclk_ip_ad_apb_dpu_dma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPU_WB_MUX, NULL, cmucal_vclk_ip_ad_apb_dpu_wb_mux, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_sysmmu_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DPUD0, NULL, cmucal_vclk_ip_ppmu_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DPUD1, NULL, cmucal_vclk_ip_ppmu_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DPUD2, NULL, cmucal_vclk_ip_ppmu_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_MIPI_DSIM0, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DECON2, NULL, cmucal_vclk_ip_ad_apb_decon2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD0_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD1_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD2_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DPU, NULL, cmucal_vclk_ip_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAPPER_FOR_S5I6280_HSI_DCPHY_COMBO_TOP, NULL, cmucal_vclk_ip_wrapper_for_s5i6280_hsi_dcphy_combo_top, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPU_DMA_PGEN, NULL, cmucal_vclk_ip_ad_apb_dpu_dma_pgen, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhs_axi_d0_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_DPU, NULL, cmucal_vclk_ip_d_tzpc_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_MCD, NULL, cmucal_vclk_ip_ad_apb_mcd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DSPM_CMU_DSPM, NULL, cmucal_vclk_ip_dspm_cmu_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_DSPM, NULL, cmucal_vclk_ip_sysreg_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DSPM, NULL, cmucal_vclk_ip_axi2apb_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DSPM0, NULL, cmucal_vclk_ip_ppmu_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DSPM0, NULL, cmucal_vclk_ip_sysmmu_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DSPM0, NULL, cmucal_vclk_ip_btm_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhm_axi_p_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d0_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhm_axi_p_ivadspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhs_axi_p_dspmiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_DSPM, NULL, cmucal_vclk_ip_wrap2_conv_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM0, NULL, cmucal_vclk_ip_ad_apb_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM1, NULL, cmucal_vclk_ip_ad_apb_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM3, NULL, cmucal_vclk_ip_ad_apb_dspm3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_AXI_DSPM0, NULL, cmucal_vclk_ip_ad_axi_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DSPM1, NULL, cmucal_vclk_ip_btm_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d1_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhs_axi_p_dspmdsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DSPM1, NULL, cmucal_vclk_ip_ppmu_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DSPM1, NULL, cmucal_vclk_ip_sysmmu_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_APB_DSPM, NULL, cmucal_vclk_ip_adm_apb_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhm_axi_d0_dspsdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_DSPM, NULL, cmucal_vclk_ip_xiu_p_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_DSPM, NULL, cmucal_vclk_ip_vgen_lite_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM2, NULL, cmucal_vclk_ip_ad_apb_dspm2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SCORE_TS_II, NULL, cmucal_vclk_ip_score_ts_ii, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_DSPM, NULL, cmucal_vclk_ip_d_tzpc_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhm_ast_isppredspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhm_ast_isplpdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhm_ast_isphqdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhs_ast_dspmisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhs_ast_dspmisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_DSPM, NULL, cmucal_vclk_ip_xiu_d_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_DSPM, NULL, cmucal_vclk_ip_baaw_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhs_axi_d_dspmnpu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DSPS_CMU_DSPS, NULL, cmucal_vclk_ip_dsps_cmu_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DSPS, NULL, cmucal_vclk_ip_axi2apb_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhm_axi_p_dspmdsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_DSPS, NULL, cmucal_vclk_ip_sysreg_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhs_axi_d_dspsiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhs_axi_d0_dspsdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SCORE_BARON, NULL, cmucal_vclk_ip_score_baron, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhm_axi_d_ivadsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_DSPS, NULL, cmucal_vclk_ip_d_tzpc_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_DSPS, NULL, cmucal_vclk_ip_vgen_lite_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_FSYS0_CMU_FSYS0, NULL, cmucal_vclk_ip_fsys0_cmu_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhs_acel_d_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhm_axi_p_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_FSYS0, NULL, cmucal_vclk_ip_gpio_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_FSYS0, NULL, cmucal_vclk_ip_sysreg_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_FSYS0, NULL, cmucal_vclk_ip_xiu_d_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_FSYS0, NULL, cmucal_vclk_ip_btm_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DP_LINK, NULL, cmucal_vclk_ip_dp_link, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_FSYS0, NULL, cmucal_vclk_ip_vgen_lite_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_USB, NULL, cmucal_vclk_ip_lhm_axi_d_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_USB, NULL, cmucal_vclk_ip_lhs_axi_p_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_FSYS0, NULL, cmucal_vclk_ip_ppmu_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_PCIE_GEN3A, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3a, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_PCIE_GEN3B, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3b, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P0_FSYS0, NULL, cmucal_vclk_ip_xiu_p0_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_GEN3, NULL, cmucal_vclk_ip_pcie_gen3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_IA_GEN3A, NULL, cmucal_vclk_ip_pcie_ia_gen3a, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_IA_GEN3B, NULL, cmucal_vclk_ip_pcie_ia_gen3b, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_FSYS0, NULL, cmucal_vclk_ip_d_tzpc_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_FSYS0A_CMU_FSYS0A, NULL, cmucal_vclk_ip_fsys0a_cmu_fsys0a, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USB31DRD, NULL, cmucal_vclk_ip_usb31drd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_USB, NULL, cmucal_vclk_ip_lhm_axi_p_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_USB, NULL, cmucal_vclk_ip_lhs_axi_d_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_FSYS1_CMU_FSYS1, NULL, cmucal_vclk_ip_fsys1_cmu_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MMC_CARD, NULL, cmucal_vclk_ip_mmc_card, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_GEN2, NULL, cmucal_vclk_ip_pcie_gen2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SSS, NULL, cmucal_vclk_ip_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_RTIC, NULL, cmucal_vclk_ip_rtic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_FSYS1, NULL, cmucal_vclk_ip_sysreg_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_FSYS1, NULL, cmucal_vclk_ip_gpio_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhs_acel_d_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhm_axi_p_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_FSYS1, NULL, cmucal_vclk_ip_xiu_d_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_FSYS1, NULL, cmucal_vclk_ip_xiu_p_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_FSYS1, NULL, cmucal_vclk_ip_ppmu_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_FSYS1, NULL, cmucal_vclk_ip_btm_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UFS_CARD, NULL, cmucal_vclk_ip_ufs_card, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_AHB_SSS, NULL, cmucal_vclk_ip_adm_ahb_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_FSYS1, NULL, cmucal_vclk_ip_sysmmu_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_FSYS1, NULL, cmucal_vclk_ip_vgen_lite_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_IA_GEN2, NULL, cmucal_vclk_ip_pcie_ia_gen2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_FSYS1, NULL, cmucal_vclk_ip_d_tzpc_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UFS_EMBD, NULL, cmucal_vclk_ip_ufs_embd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PUF, NULL, cmucal_vclk_ip_puf, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_RTIC, NULL, cmucal_vclk_ip_qe_rtic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_SSS, NULL, cmucal_vclk_ip_qe_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_SSS, NULL, cmucal_vclk_ip_baaw_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_G2D_CMU_G2D, NULL, cmucal_vclk_ip_g2d_cmu_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_G2DD0, NULL, cmucal_vclk_ip_ppmu_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_G2DD1, NULL, cmucal_vclk_ip_ppmu_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_G2DD0, NULL, cmucal_vclk_ip_sysmmu_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_G2D, NULL, cmucal_vclk_ip_sysreg_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhs_acel_d0_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhs_acel_d1_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_G2D, NULL, cmucal_vclk_ip_lhm_axi_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_G2D, NULL, cmucal_vclk_ip_as_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_G2DP0, NULL, cmucal_vclk_ip_axi2apb_g2dp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_G2DD0, NULL, cmucal_vclk_ip_btm_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_G2DD1, NULL, cmucal_vclk_ip_btm_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_G2D, NULL, cmucal_vclk_ip_xiu_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_G2DP1, NULL, cmucal_vclk_ip_axi2apb_g2dp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_G2DD2, NULL, cmucal_vclk_ip_btm_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_JPEG, NULL, cmucal_vclk_ip_qe_jpeg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_MSCL, NULL, cmucal_vclk_ip_qe_mscl, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_G2DD2, NULL, cmucal_vclk_ip_sysmmu_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_G2DD2, NULL, cmucal_vclk_ip_ppmu_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhs_acel_d2_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_JPEG, NULL, cmucal_vclk_ip_as_p_jpeg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_G2D, NULL, cmucal_vclk_ip_xiu_d_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_MSCL, NULL, cmucal_vclk_ip_as_p_mscl, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_ASTC, NULL, cmucal_vclk_ip_as_p_astc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_NS_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_NS_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_S_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_S_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_ASTC, NULL, cmucal_vclk_ip_qe_astc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_G2D, NULL, cmucal_vclk_ip_vgen_lite_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_G2D, NULL, cmucal_vclk_ip_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_NS_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_S_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_G2DD1, NULL, cmucal_vclk_ip_sysmmu_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_JPEG, NULL, cmucal_vclk_ip_jpeg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MSCL, NULL, cmucal_vclk_ip_mscl, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASTC, NULL, cmucal_vclk_ip_astc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_JSQZ, NULL, cmucal_vclk_ip_as_p_jsqz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_JSQZ, NULL, cmucal_vclk_ip_qe_jsqz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_G2D, NULL, cmucal_vclk_ip_d_tzpc_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_JSQZ, NULL, cmucal_vclk_ip_jsqz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_G3D, NULL, cmucal_vclk_ip_xiu_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_G3D, NULL, cmucal_vclk_ip_lhm_axi_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMG3D, NULL, cmucal_vclk_ip_busif_hpmg3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_G3D0, NULL, cmucal_vclk_ip_hpm_g3d0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_G3D, NULL, cmucal_vclk_ip_sysreg_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_G3D_CMU_G3D, NULL, cmucal_vclk_ip_g3d_cmu_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhs_axi_g3dsfr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_G3D, NULL, cmucal_vclk_ip_vgen_lite_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPU, NULL, cmucal_vclk_ip_gpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_G3D, NULL, cmucal_vclk_ip_axi2apb_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhm_axi_g3dsfr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GRAY2BIN_G3D, NULL, cmucal_vclk_ip_gray2bin_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_G3D, NULL, cmucal_vclk_ip_d_tzpc_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASB_G3D, NULL, cmucal_vclk_ip_asb_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_p_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_d_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IS_ISPHQ, NULL, cmucal_vclk_ip_is_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_ISPHQ, NULL, cmucal_vclk_ip_sysreg_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ISPHQ_CMU_ISPHQ, NULL, cmucal_vclk_ip_isphq_cmu_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhm_atb_isppreisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhs_atb_isphqisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPHQ, NULL, cmucal_vclk_ip_btm_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhm_atb_vo_isplpisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhs_ast_vo_isphqisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPHQ, NULL, cmucal_vclk_ip_d_tzpc_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhs_ast_isphqdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_p_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d0_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPLP0, NULL, cmucal_vclk_ip_btm_isplp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IS_ISPLP, NULL, cmucal_vclk_ip_is_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_ISPLP, NULL, cmucal_vclk_ip_sysreg_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ISPLP_CMU_ISPLP, NULL, cmucal_vclk_ip_isplp_cmu_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPLP1, NULL, cmucal_vclk_ip_btm_isplp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d1_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhm_atb_isphqisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_ast_vo_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_atb_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhs_atb_vo_isplpisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPLP, NULL, cmucal_vclk_ip_d_tzpc_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhs_ast_isplpdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhm_ast_dspmisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhs_axi_p_isplpvra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d_vra2isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IS_ISPPRE, NULL, cmucal_vclk_ip_is_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_d_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPPRE, NULL, cmucal_vclk_ip_btm_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_p_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_ISPPRE, NULL, cmucal_vclk_ip_sysreg_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ISPPRE_CMU_ISPPRE, NULL, cmucal_vclk_ip_isppre_cmu_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_atb_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhs_atb_isppreisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPPRE, NULL, cmucal_vclk_ip_d_tzpc_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhs_ast_isppredspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhm_ast_dspmisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMISPPRE, NULL, cmucal_vclk_ip_busif_hpmisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_ISPPRE, NULL, cmucal_vclk_ip_hpm_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPPRE1, NULL, cmucal_vclk_ip_d_tzpc_isppre1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_ast_vo_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhm_ast_vo_isphqisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IVA_CMU_IVA, NULL, cmucal_vclk_ip_iva_cmu_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhs_acel_d_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhs_axi_d_ivadsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhs_axi_p_ivadspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhm_axi_p_dspmiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_IVA, NULL, cmucal_vclk_ip_lhm_axi_p_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_IVA, NULL, cmucal_vclk_ip_btm_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_IVA, NULL, cmucal_vclk_ip_ppmu_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_IVA, NULL, cmucal_vclk_ip_sysmmu_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_IVA, NULL, cmucal_vclk_ip_xiu_p_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_IVA0, NULL, cmucal_vclk_ip_ad_apb_iva0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_2M_IVA, NULL, cmucal_vclk_ip_axi2apb_2m_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_IVA, NULL, cmucal_vclk_ip_axi2apb_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_IVA, NULL, cmucal_vclk_ip_sysreg_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhm_axi_d_ivasc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_DAP_IVA, NULL, cmucal_vclk_ip_adm_dap_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhm_axi_d_dspsiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_IVA1, NULL, cmucal_vclk_ip_ad_apb_iva1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_IVA2, NULL, cmucal_vclk_ip_ad_apb_iva2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_IVA, NULL, cmucal_vclk_ip_vgen_lite_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IVA, NULL, cmucal_vclk_ip_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IVA_INTMEM, NULL, cmucal_vclk_ip_iva_intmem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D0_IVA, NULL, cmucal_vclk_ip_xiu_d0_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D1_IVA, NULL, cmucal_vclk_ip_xiu_d1_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_IVA, NULL, cmucal_vclk_ip_d_tzpc_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D2_IVA, NULL, cmucal_vclk_ip_xiu_d2_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_RB1_IVA, NULL, cmucal_vclk_ip_trex_rb1_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_IVA, NULL, cmucal_vclk_ip_qe_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_IVA, NULL, cmucal_vclk_ip_wrap2_conv_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MFC_CMU_MFC, NULL, cmucal_vclk_ip_mfc_cmu_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_MFC, NULL, cmucal_vclk_ip_as_apb_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MFC, NULL, cmucal_vclk_ip_axi2apb_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MFC, NULL, cmucal_vclk_ip_sysreg_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhs_axi_d0_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhs_axi_d1_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MFC, NULL, cmucal_vclk_ip_lhm_axi_p_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_MFCD0, NULL, cmucal_vclk_ip_sysmmu_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_MFCD1, NULL, cmucal_vclk_ip_sysmmu_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_MFCD0, NULL, cmucal_vclk_ip_ppmu_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_MFCD1, NULL, cmucal_vclk_ip_ppmu_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_MFCD0, NULL, cmucal_vclk_ip_btm_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_MFCD1, NULL, cmucal_vclk_ip_btm_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_NS_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_NS_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_S_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_S_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_WFD_NS, NULL, cmucal_vclk_ip_as_apb_wfd_ns, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_AXI_WFD, NULL, cmucal_vclk_ip_as_axi_wfd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_MFCD2, NULL, cmucal_vclk_ip_ppmu_mfcd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_MFC, NULL, cmucal_vclk_ip_xiu_d_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_WFD_S, NULL, cmucal_vclk_ip_as_apb_wfd_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_MFC, NULL, cmucal_vclk_ip_vgen_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MFC, NULL, cmucal_vclk_ip_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WFD, NULL, cmucal_vclk_ip_wfd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LH_ATB_MFC, NULL, cmucal_vclk_ip_lh_atb_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_MFC, NULL, cmucal_vclk_ip_d_tzpc_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF_CMU_MIF, NULL, cmucal_vclk_ip_mif_cmu_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY, NULL, cmucal_vclk_ip_ddrphy, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF, NULL, cmucal_vclk_ip_sysreg_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF, NULL, cmucal_vclk_ip_busif_hpmmif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF, NULL, cmucal_vclk_ip_lhm_axi_p_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF, NULL, cmucal_vclk_ip_axi2apb_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_DVFS, NULL, cmucal_vclk_ip_ppc_dvfs, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_DEBUG, NULL, cmucal_vclk_ip_ppc_debug, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY, NULL, cmucal_vclk_ip_apbbr_ddrphy, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC, NULL, cmucal_vclk_ip_apbbr_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ, NULL, cmucal_vclk_ip_apbbr_dmctz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF, NULL, cmucal_vclk_ip_hpm_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC, NULL, cmucal_vclk_ip_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPC_DEBUG, NULL, cmucal_vclk_ip_qch_adapter_ppc_debug, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPC_DVFS, NULL, cmucal_vclk_ip_qch_adapter_ppc_dvfs, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_MIF, NULL, cmucal_vclk_ip_d_tzpc_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF1, NULL, cmucal_vclk_ip_hpm_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF1_CMU_MIF1, NULL, cmucal_vclk_ip_mif1_cmu_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY1, NULL, cmucal_vclk_ip_apbbr_ddrphy1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC1, NULL, cmucal_vclk_ip_apbbr_dmc1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ1, NULL, cmucal_vclk_ip_apbbr_dmctz1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF1, NULL, cmucal_vclk_ip_axi2apb_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF1, NULL, cmucal_vclk_ip_busif_hpmmif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY1, NULL, cmucal_vclk_ip_ddrphy1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC1, NULL, cmucal_vclk_ip_dmc1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhm_axi_p_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_ppmuppc_debug1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_ppmuppc_dvfs1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF1, NULL, cmucal_vclk_ip_sysreg_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF2, NULL, cmucal_vclk_ip_hpm_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY2, NULL, cmucal_vclk_ip_apbbr_ddrphy2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC2, NULL, cmucal_vclk_ip_apbbr_dmc2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ2, NULL, cmucal_vclk_ip_apbbr_dmctz2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF2, NULL, cmucal_vclk_ip_axi2apb_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF2, NULL, cmucal_vclk_ip_busif_hpmmif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY2, NULL, cmucal_vclk_ip_ddrphy2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC2, NULL, cmucal_vclk_ip_dmc2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhm_axi_p_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_ppmuppc_debug2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_ppmuppc_dvfs2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF2, NULL, cmucal_vclk_ip_sysreg_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF2_CMU_MIF2, NULL, cmucal_vclk_ip_mif2_cmu_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF3, NULL, cmucal_vclk_ip_hpm_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY3, NULL, cmucal_vclk_ip_apbbr_ddrphy3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC3, NULL, cmucal_vclk_ip_apbbr_dmc3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ3, NULL, cmucal_vclk_ip_apbbr_dmctz3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF3, NULL, cmucal_vclk_ip_axi2apb_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF3, NULL, cmucal_vclk_ip_busif_hpmmif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY3, NULL, cmucal_vclk_ip_ddrphy3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC3, NULL, cmucal_vclk_ip_dmc3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhm_axi_p_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_ppmuppc_debug3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_ppmuppc_dvfs3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF3, NULL, cmucal_vclk_ip_sysreg_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF3_CMU_MIF3, NULL, cmucal_vclk_ip_mif3_cmu_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhs_acel_d_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhs_axi_p_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU0_CMU_NPU0, NULL, cmucal_vclk_ip_npu0_cmu_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SI0, NULL, cmucal_vclk_ip_apb_async_si0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SMMU_NS, NULL, cmucal_vclk_ip_apb_async_smmu_ns, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_NPU0, NULL, cmucal_vclk_ip_axi2apb_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_NPU0, NULL, cmucal_vclk_ip_btm_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_NPU0, NULL, cmucal_vclk_ip_d_tzpc_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhm_ast_p_npu1_done, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhm_axi_d_dspmnpu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_NPU, NULL, cmucal_vclk_ip_lhm_axi_p_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhs_ast_p_npud1_setreg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPUC, NULL, cmucal_vclk_ip_npuc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPUD_UNIT0, NULL, cmucal_vclk_ip_npud_unit0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUDMA, NULL, cmucal_vclk_ip_ppmu_cpudma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_RFM, NULL, cmucal_vclk_ip_ppmu_rfm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_CPUDMA, NULL, cmucal_vclk_ip_qe_cpudma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_RFM, NULL, cmucal_vclk_ip_qe_rfm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SMMU_NPU0, NULL, cmucal_vclk_ip_smmu_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_NPU0, NULL, cmucal_vclk_ip_sysreg_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_NPU0, NULL, cmucal_vclk_ip_xiu_d_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SMMU_S, NULL, cmucal_vclk_ip_apb_async_smmu_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_NPU0, NULL, cmucal_vclk_ip_vgen_lite_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_NPU0, NULL, cmucal_vclk_ip_ppmu_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU0_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu0_ppc_wrapper, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU1_CMU_NPU1, NULL, cmucal_vclk_ip_npu1_cmu_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhm_axi_p_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SI1, NULL, cmucal_vclk_ip_apb_async_si1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_NPU1, NULL, cmucal_vclk_ip_axi2apb_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_NPU1, NULL, cmucal_vclk_ip_d_tzpc_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhm_ast_p_npud1_setreg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_NPU1, NULL, cmucal_vclk_ip_sysreg_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhs_ast_p_npu1_done, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPUD_UNIT1, NULL, cmucal_vclk_ip_npud_unit1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_NPU1, NULL, cmucal_vclk_ip_ppmu_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU1_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu1_ppc_wrapper, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_PERIC0, NULL, cmucal_vclk_ip_gpio_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PWM, NULL, cmucal_vclk_ip_pwm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_PERIC0, NULL, cmucal_vclk_ip_sysreg_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI00_USI, NULL, cmucal_vclk_ip_usi00_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI01_USI, NULL, cmucal_vclk_ip_usi01_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI02_USI, NULL, cmucal_vclk_ip_usi02_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI03_USI, NULL, cmucal_vclk_ip_usi03_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC0P0, NULL, cmucal_vclk_ip_axi2apb_peric0p0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERIC0_CMU_PERIC0, NULL, cmucal_vclk_ip_peric0_cmu_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI04_USI, NULL, cmucal_vclk_ip_usi04_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC0P1, NULL, cmucal_vclk_ip_axi2apb_peric0p1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI05_USI, NULL, cmucal_vclk_ip_usi05_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI00_I2C, NULL, cmucal_vclk_ip_usi00_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI01_I2C, NULL, cmucal_vclk_ip_usi01_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI02_I2C, NULL, cmucal_vclk_ip_usi02_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI03_I2C, NULL, cmucal_vclk_ip_usi03_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI04_I2C, NULL, cmucal_vclk_ip_usi04_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI05_I2C, NULL, cmucal_vclk_ip_usi05_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UART_DBG, NULL, cmucal_vclk_ip_uart_dbg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_PERIC0, NULL, cmucal_vclk_ip_xiu_p_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhm_axi_p_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI12_USI, NULL, cmucal_vclk_ip_usi12_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI12_I2C, NULL, cmucal_vclk_ip_usi12_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI13_I2C, NULL, cmucal_vclk_ip_usi13_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI13_USI, NULL, cmucal_vclk_ip_usi13_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI14_USI, NULL, cmucal_vclk_ip_usi14_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI14_I2C, NULL, cmucal_vclk_ip_usi14_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_PERIC0, NULL, cmucal_vclk_ip_d_tzpc_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI15_I2C, NULL, cmucal_vclk_ip_usi15_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI15_USI, NULL, cmucal_vclk_ip_usi15_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC1P1, NULL, cmucal_vclk_ip_axi2apb_peric1p1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_PERIC1, NULL, cmucal_vclk_ip_gpio_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_PERIC1, NULL, cmucal_vclk_ip_sysreg_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UART_BT, NULL, cmucal_vclk_ip_uart_bt, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM1, NULL, cmucal_vclk_ip_i2c_cam1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM2, NULL, cmucal_vclk_ip_i2c_cam2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM3, NULL, cmucal_vclk_ip_i2c_cam3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI06_USI, NULL, cmucal_vclk_ip_usi06_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI07_USI, NULL, cmucal_vclk_ip_usi07_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI08_USI, NULL, cmucal_vclk_ip_usi08_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM0, NULL, cmucal_vclk_ip_i2c_cam0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_PERIC1, NULL, cmucal_vclk_ip_xiu_p_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC1P0, NULL, cmucal_vclk_ip_axi2apb_peric1p0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERIC1_CMU_PERIC1, NULL, cmucal_vclk_ip_peric1_cmu_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPI_CAM0, NULL, cmucal_vclk_ip_spi_cam0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI09_USI, NULL, cmucal_vclk_ip_usi09_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI06_I2C, NULL, cmucal_vclk_ip_usi06_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI10_USI, NULL, cmucal_vclk_ip_usi10_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI07_I2C, NULL, cmucal_vclk_ip_usi07_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI08_I2C, NULL, cmucal_vclk_ip_usi08_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI09_I2C, NULL, cmucal_vclk_ip_usi09_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI10_I2C, NULL, cmucal_vclk_ip_usi10_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhm_axi_p_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI11_USI, NULL, cmucal_vclk_ip_usi11_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI11_I2C, NULL, cmucal_vclk_ip_usi11_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_PERIC1, NULL, cmucal_vclk_ip_d_tzpc_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I3C, NULL, cmucal_vclk_ip_i3c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI16_USI, NULL, cmucal_vclk_ip_usi16_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI17_USI, NULL, cmucal_vclk_ip_usi17_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI16_I3C, NULL, cmucal_vclk_ip_usi16_i3c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI17_I2C, NULL, cmucal_vclk_ip_usi17_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERISP, NULL, cmucal_vclk_ip_axi2apb_perisp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_PERIS, NULL, cmucal_vclk_ip_xiu_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_PERIS, NULL, cmucal_vclk_ip_sysreg_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_CLUSTER2, NULL, cmucal_vclk_ip_wdt_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_CLUSTER0, NULL, cmucal_vclk_ip_wdt_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERIS_CMU_PERIS, NULL, cmucal_vclk_ip_peris_cmu_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_AXI_P_PERIS, NULL, cmucal_vclk_ip_ad_axi_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_OTP_CON_BIRA, NULL, cmucal_vclk_ip_otp_con_bira, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GIC, NULL, cmucal_vclk_ip_gic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhm_axi_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MCT, NULL, cmucal_vclk_ip_mct, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_OTP_CON_TOP, NULL, cmucal_vclk_ip_otp_con_top, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_PERIS, NULL, cmucal_vclk_ip_d_tzpc_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TMU_SUB, NULL, cmucal_vclk_ip_tmu_sub, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TMU_TOP, NULL, cmucal_vclk_ip_tmu_top, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_OTP_CON_BISR, NULL, cmucal_vclk_ip_otp_con_bisr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_S2D_CMU_S2D, NULL, cmucal_vclk_ip_s2d_cmu_s2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VRA2_CMU_VRA2, NULL, cmucal_vclk_ip_vra2_cmu_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_VRA2, NULL, cmucal_vclk_ip_as_apb_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_VRA2, NULL, cmucal_vclk_ip_axi2apb_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_VRA2, NULL, cmucal_vclk_ip_d_tzpc_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhm_axi_p_isplpvra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d_vra2isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_VRA2, NULL, cmucal_vclk_ip_qe_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_VRA2, NULL, cmucal_vclk_ip_sysreg_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_VRA2, NULL, cmucal_vclk_ip_vgen_lite_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VRA2, NULL, cmucal_vclk_ip_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_STR, NULL, cmucal_vclk_ip_as_apb_str, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_VRA2, NULL, cmucal_vclk_ip_btm_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_VRA2, NULL, cmucal_vclk_ip_ppmu_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_VRA2, NULL, cmucal_vclk_ip_sysmmu_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_STR, NULL, cmucal_vclk_ip_str, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhs_axi_d_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_IF, NULL, cmucal_vclk_ip_dmic_if, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_VTS, NULL, cmucal_vclk_ip_sysreg_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VTS_CMU_VTS, NULL, cmucal_vclk_ip_vts_cmu_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AHB_BUSMATRIX, NULL, cmucal_vclk_ip_ahb_busmatrix, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_VTS, NULL, cmucal_vclk_ip_lhm_axi_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_VTS, NULL, cmucal_vclk_ip_gpio_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_VTS, NULL, cmucal_vclk_ip_wdt_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB0, NULL, cmucal_vclk_ip_dmic_ahb0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB1, NULL, cmucal_vclk_ip_dmic_ahb1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_C_VTS, NULL, cmucal_vclk_ip_lhs_axi_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASYNCINTERRUPT, NULL, cmucal_vclk_ip_asyncinterrupt, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC0, NULL, cmucal_vclk_ip_hwacg_sys_dmic0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC1, NULL, cmucal_vclk_ip_hwacg_sys_dmic1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SS_VTS_GLUE, NULL, cmucal_vclk_ip_ss_vts_glue, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CORTEXM4INTEGRATION, NULL, cmucal_vclk_ip_cortexm4integration, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_U_DMIC_CLK_MUX, NULL, cmucal_vclk_ip_u_dmic_clk_mux, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhm_axi_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_VTS, NULL, cmucal_vclk_ip_lhs_axi_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_C_VTS, NULL, cmucal_vclk_ip_baaw_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_VTS, NULL, cmucal_vclk_ip_d_tzpc_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE, NULL, cmucal_vclk_ip_vgen_lite, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_LP_VTS, NULL, cmucal_vclk_ip_bps_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_P_VTS, NULL, cmucal_vclk_ip_bps_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XHB_LP_VTS, NULL, cmucal_vclk_ip_xhb_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XHB_P_VTS, NULL, cmucal_vclk_ip_xhb_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SWEEPER_C_VTS, NULL, cmucal_vclk_ip_sweeper_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SWEEPER_D_VTS, NULL, cmucal_vclk_ip_sweeper_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_D_VTS, NULL, cmucal_vclk_ip_baaw_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_ABOX_VTS, NULL, cmucal_vclk_ip_mailbox_abox_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB2, NULL, cmucal_vclk_ip_dmic_ahb2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB3, NULL, cmucal_vclk_ip_dmic_ahb3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC2, NULL, cmucal_vclk_ip_hwacg_sys_dmic2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC3, NULL, cmucal_vclk_ip_hwacg_sys_dmic3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_IF_3RD, NULL, cmucal_vclk_ip_dmic_if_3rd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_VTS, NULL, cmucal_vclk_ip_mailbox_ap_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TIMER, NULL, cmucal_vclk_ip_timer, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_APM, NULL, cmucal_vclk_ip_lhs_axi_d_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_APM, NULL, cmucal_vclk_ip_lhm_axi_p_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_APM, NULL, cmucal_vclk_ip_wdt_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_APM, NULL, cmucal_vclk_ip_sysreg_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_APM_AP, NULL, cmucal_vclk_ip_mailbox_apm_ap, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_PMU_ALIVE, NULL, cmucal_vclk_ip_apbif_pmu_alive, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_INTMEM, NULL, cmucal_vclk_ip_intmem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_C_MODEM, NULL, cmucal_vclk_ip_lhm_axi_c_modem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_SCAN2DRAM, NULL, cmucal_vclk_ip_lhs_axi_g_scan2dram, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PMU_INTR_GEN, NULL, cmucal_vclk_ip_pmu_intr_gen, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PEM, NULL, cmucal_vclk_ip_pem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPEEDY_APM, NULL, cmucal_vclk_ip_speedy_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_DP_APM, NULL, cmucal_vclk_ip_xiu_dp_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APM_CMU_APM, NULL, cmucal_vclk_ip_apm_cmu_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_APM, NULL, cmucal_vclk_ip_vgen_lite_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GREBEINTEGRATION, NULL, cmucal_vclk_ip_grebeintegration, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_GPIO_ALIVE, NULL, cmucal_vclk_ip_apbif_gpio_alive, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_TOP_RTC, NULL, cmucal_vclk_ip_apbif_top_rtc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_CP, NULL, cmucal_vclk_ip_mailbox_ap_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_CP_S, NULL, cmucal_vclk_ip_mailbox_ap_cp_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GREBEINTEGRATION_DBGCORE, NULL, cmucal_vclk_ip_grebeintegration_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DTZPC_APM, NULL, cmucal_vclk_ip_dtzpc_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_C_VTS, NULL, cmucal_vclk_ip_lhm_axi_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_APM_VTS, NULL, cmucal_vclk_ip_mailbox_apm_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_DBGCORE, NULL, cmucal_vclk_ip_mailbox_ap_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhs_axi_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_APM_CP, NULL, cmucal_vclk_ip_mailbox_apm_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_RTC, NULL, cmucal_vclk_ip_apbif_rtc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhs_axi_c_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPEEDY_SUB_APM, NULL, cmucal_vclk_ip_speedy_sub_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AUD_CMU_AUD, NULL, cmucal_vclk_ip_aud_cmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_AUD, NULL, cmucal_vclk_ip_lhs_axi_d_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_AUD, NULL, cmucal_vclk_ip_ppmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_AUD, NULL, cmucal_vclk_ip_sysreg_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ABOX, NULL, cmucal_vclk_ip_abox, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhs_atb_t0_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_AUD, NULL, cmucal_vclk_ip_gpio_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_US_32TO128, NULL, cmucal_vclk_ip_axi_us_32to128, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_AUD, NULL, cmucal_vclk_ip_btm_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERI_AXI_ASB, NULL, cmucal_vclk_ip_peri_axi_asb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_AUD, NULL, cmucal_vclk_ip_lhm_axi_p_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_AUD, NULL, cmucal_vclk_ip_wdt_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC, NULL, cmucal_vclk_ip_dmic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_AUD, NULL, cmucal_vclk_ip_trex_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DFTMUX_AUD, NULL, cmucal_vclk_ip_dftmux_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SMMU_AUD, NULL, cmucal_vclk_ip_smmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_AUD, NULL, cmucal_vclk_ip_wrap2_conv_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_AUD, NULL, cmucal_vclk_ip_xiu_p_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SMMU_AUD, NULL, cmucal_vclk_ip_ad_apb_smmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_AUD, NULL, cmucal_vclk_ip_axi2apb_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SMMU_AUD_S, NULL, cmucal_vclk_ip_ad_apb_smmu_aud_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhs_atb_t1_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_AUD, NULL, cmucal_vclk_ip_vgen_lite_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSC_CMU_BUSC, NULL, cmucal_vclk_ip_busc_cmu_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_BUSCP0, NULL, cmucal_vclk_ip_axi2apb_buscp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_BUSC_TDP, NULL, cmucal_vclk_ip_axi2apb_busc_tdp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_BUSC, NULL, cmucal_vclk_ip_sysreg_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_CMUTOPC, NULL, cmucal_vclk_ip_busif_cmutopc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_D0_BUSC, NULL, cmucal_vclk_ip_trex_d0_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_P_BUSC, NULL, cmucal_vclk_ip_trex_p_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF0, NULL, cmucal_vclk_ip_lhs_axi_p_mif0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhs_axi_p_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhs_axi_p_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhs_axi_p_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhs_axi_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhs_axi_p_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhs_axi_p_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASYNCSFR_WR_SMC, NULL, cmucal_vclk_ip_asyncsfr_wr_smc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhs_axi_d_ivasc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhm_acel_d0_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhm_acel_d1_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhm_acel_d2_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhm_acel_d_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhm_acel_d_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhm_acel_d_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhm_axi_d0_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhm_axi_d0_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_d_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhm_axi_d1_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhm_axi_d1_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhm_axi_d2_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d0_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DPU, NULL, cmucal_vclk_ip_lhs_axi_p_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_p_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhs_axi_p_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhs_axi_p_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_G2D, NULL, cmucal_vclk_ip_lhs_axi_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_p_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_p_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_IVA, NULL, cmucal_vclk_ip_lhs_axi_p_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MFC, NULL, cmucal_vclk_ip_lhs_axi_p_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhm_acel_d_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_APM, NULL, cmucal_vclk_ip_lhm_axi_d_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d1_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhs_axi_p_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SIREX, NULL, cmucal_vclk_ip_sirex, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d0_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d1_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_d_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_RB_BUSC, NULL, cmucal_vclk_ip_trex_rb_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPFW, NULL, cmucal_vclk_ip_ppfw, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_BUSC, NULL, cmucal_vclk_ip_wrap2_conv_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_PDMA0, NULL, cmucal_vclk_ip_vgen_pdma0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_BUSC, NULL, cmucal_vclk_ip_vgen_lite_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_BUSC, NULL, cmucal_vclk_ip_hpm_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMBUSC, NULL, cmucal_vclk_ip_busif_hpmbusc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PDMA0, NULL, cmucal_vclk_ip_pdma0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SBIC, NULL, cmucal_vclk_ip_sbic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPDMA, NULL, cmucal_vclk_ip_spdma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DIT, NULL, cmucal_vclk_ip_ad_apb_dit, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DIT, NULL, cmucal_vclk_ip_dit, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_BUSC, NULL, cmucal_vclk_ip_d_tzpc_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_NPU, NULL, cmucal_vclk_ip_lhs_axi_p_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MMCACHE, NULL, cmucal_vclk_ip_mmcache, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_D1_BUSC, NULL, cmucal_vclk_ip_trex_d1_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_BUSCP1, NULL, cmucal_vclk_ip_axi2apb_buscp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_AUD, NULL, cmucal_vclk_ip_lhm_axi_d_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_AUD, NULL, cmucal_vclk_ip_lhs_axi_p_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhs_dbg_g_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_VTS, NULL, cmucal_vclk_ip_lhm_axi_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_VTS, NULL, cmucal_vclk_ip_lhs_axi_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_SPDMA, NULL, cmucal_vclk_ip_qe_spdma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_PDMA0, NULL, cmucal_vclk_ip_qe_pdma0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_BUSC, NULL, cmucal_vclk_ip_xiu_d_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_P_VTS, NULL, cmucal_vclk_ip_baaw_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_US_64TO128, NULL, cmucal_vclk_ip_axi_us_64to128, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_P_NPU, NULL, cmucal_vclk_ip_baaw_p_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhm_axi_d_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CMGP_CMU_CMGP, NULL, cmucal_vclk_ip_cmgp_cmu_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADC_CMGP, NULL, cmucal_vclk_ip_adc_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_CMGP, NULL, cmucal_vclk_ip_gpio_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP0, NULL, cmucal_vclk_ip_i2c_cmgp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP1, NULL, cmucal_vclk_ip_i2c_cmgp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP2, NULL, cmucal_vclk_ip_i2c_cmgp2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP3, NULL, cmucal_vclk_ip_i2c_cmgp3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP, NULL, cmucal_vclk_ip_sysreg_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP0, NULL, cmucal_vclk_ip_usi_cmgp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP1, NULL, cmucal_vclk_ip_usi_cmgp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP2, NULL, cmucal_vclk_ip_usi_cmgp2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP3, NULL, cmucal_vclk_ip_usi_cmgp3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP2CP, NULL, cmucal_vclk_ip_sysreg_cmgp2cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP2PMU_AP, NULL, cmucal_vclk_ip_sysreg_cmgp2pmu_ap, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DTZPC_CMGP, NULL, cmucal_vclk_ip_dtzpc_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhm_axi_c_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP2APM, NULL, cmucal_vclk_ip_sysreg_cmgp2apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CORE_CMU_CORE, NULL, cmucal_vclk_ip_core_cmu_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CORE, NULL, cmucal_vclk_ip_sysreg_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_0, NULL, cmucal_vclk_ip_axi2apb_core_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE2AXI_0, NULL, cmucal_vclk_ip_mpace2axi_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE2AXI_1, NULL, cmucal_vclk_ip_mpace2axi_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_DEBUG_CCI, NULL, cmucal_vclk_ip_ppc_debug_cci, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_P0_CORE, NULL, cmucal_vclk_ip_trex_p0_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL2_0, NULL, cmucal_vclk_ip_ppmu_cpucl2_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G0_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g0_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G1_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g1_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G2_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g2_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G3_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g3_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T_BDU, NULL, cmucal_vclk_ip_lhs_atb_t_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_APB_G_BDU, NULL, cmucal_vclk_ip_adm_apb_g_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BDU, NULL, cmucal_vclk_ip_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_P1_CORE, NULL, cmucal_vclk_ip_trex_p1_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_TP, NULL, cmucal_vclk_ip_axi2apb_core_tp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPFW_G3D, NULL, cmucal_vclk_ip_ppfw_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_G3D, NULL, cmucal_vclk_ip_lhs_axi_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_CP, NULL, cmucal_vclk_ip_lhm_axi_d0_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D0_G3D, NULL, cmucal_vclk_ip_lhm_ace_d0_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D1_G3D, NULL, cmucal_vclk_ip_lhm_ace_d1_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D2_G3D, NULL, cmucal_vclk_ip_lhm_ace_d2_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D3_G3D, NULL, cmucal_vclk_ip_lhm_ace_d3_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_D_CORE, NULL, cmucal_vclk_ip_trex_d_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CORE, NULL, cmucal_vclk_ip_hpm_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMCORE, NULL, cmucal_vclk_ip_busif_hpmcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D0_G3D, NULL, cmucal_vclk_ip_bps_d0_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D1_G3D, NULL, cmucal_vclk_ip_bps_d1_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D2_G3D, NULL, cmucal_vclk_ip_bps_d2_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D3_G3D, NULL, cmucal_vclk_ip_bps_d3_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPCFW_G3D, NULL, cmucal_vclk_ip_ppcfw_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_CP, NULL, cmucal_vclk_ip_lhs_axi_p_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_PPFW_G3D, NULL, cmucal_vclk_ip_apb_async_ppfw_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_CP, NULL, cmucal_vclk_ip_baaw_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_P_G3D, NULL, cmucal_vclk_ip_bps_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_APM, NULL, cmucal_vclk_ip_lhs_axi_p_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL2_1, NULL, cmucal_vclk_ip_ppmu_cpucl2_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_CORE, NULL, cmucal_vclk_ip_d_tzpc_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_1, NULL, cmucal_vclk_ip_axi2apb_core_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_CORE, NULL, cmucal_vclk_ip_xiu_p_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL2_0, NULL, cmucal_vclk_ip_ppc_cpucl2_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL2_1, NULL, cmucal_vclk_ip_ppc_cpucl2_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D0, NULL, cmucal_vclk_ip_ppc_g3d0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D1, NULL, cmucal_vclk_ip_ppc_g3d1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D2, NULL, cmucal_vclk_ip_ppc_g3d2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D3, NULL, cmucal_vclk_ip_ppc_g3d3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_IRPS0, NULL, cmucal_vclk_ip_ppc_irps0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_IRPS1, NULL, cmucal_vclk_ip_ppc_irps1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_CP, NULL, cmucal_vclk_ip_lhm_axi_d1_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_L_CORE, NULL, cmucal_vclk_ip_lhs_axi_l_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_2, NULL, cmucal_vclk_ip_axi2apb_core_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_L_CORE, NULL, cmucal_vclk_ip_lhm_axi_l_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL0_0, NULL, cmucal_vclk_ip_ppc_cpucl0_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL0_1, NULL, cmucal_vclk_ip_ppc_cpucl0_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL0_0, NULL, cmucal_vclk_ip_ppmu_cpucl0_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL0_1, NULL, cmucal_vclk_ip_ppmu_cpucl0_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhm_dbg_g_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D0_MIF, NULL, cmucal_vclk_ip_mpace_asb_d0_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D1_MIF, NULL, cmucal_vclk_ip_mpace_asb_d1_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D2_MIF, NULL, cmucal_vclk_ip_mpace_asb_d2_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D3_MIF, NULL, cmucal_vclk_ip_mpace_asb_d3_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_ASB_CSSYS, NULL, cmucal_vclk_ip_axi_asb_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CCI, NULL, cmucal_vclk_ip_cci, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CPUCL0, NULL, cmucal_vclk_ip_axi2apb_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CPUCL0, NULL, cmucal_vclk_ip_sysreg_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMCPUCL0, NULL, cmucal_vclk_ip_busif_hpmcpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CSSYS, NULL, cmucal_vclk_ip_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhm_atb_t0_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T_BDU, NULL, cmucal_vclk_ip_lhm_atb_t_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T0_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T1_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t2_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t3_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SECJTAG, NULL, cmucal_vclk_ip_secjtag, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t2_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t3_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_APB_G_CLUSTER0, NULL, cmucal_vclk_ip_adm_apb_g_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL0_CMU_CPUCL0, NULL, cmucal_vclk_ip_cpucl0_cmu_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL0, NULL, cmucal_vclk_ip_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t4_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t5_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t4_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t5_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_CPUCL0, NULL, cmucal_vclk_ip_d_tzpc_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhm_atb_t1_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_int_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_int_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_int_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_int_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_CPUCL0, NULL, cmucal_vclk_ip_xiu_p_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_DP_CSSYS, NULL, cmucal_vclk_ip_xiu_dp_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_CPUCL0, NULL, cmucal_vclk_ip_trex_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_US_32TO64_G_DBGCORE, NULL, cmucal_vclk_ip_axi_us_32to64_g_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL0_1, NULL, cmucal_vclk_ip_hpm_cpucl0_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL0_0, NULL, cmucal_vclk_ip_hpm_cpucl0_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_P_CSSYS_0, NULL, cmucal_vclk_ip_apb_async_p_cssys_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhs_axi_g_int_etr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhm_axi_g_int_etr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_P_CSSYS, NULL, cmucal_vclk_ip_axi2apb_p_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_CPUCL0, NULL, cmucal_vclk_ip_bps_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL1_CMU_CPUCL1, NULL, cmucal_vclk_ip_cpucl1_cmu_cpucl1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL1, NULL, cmucal_vclk_ip_cpucl1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL2_CMU_CPUCL2, NULL, cmucal_vclk_ip_cpucl2_cmu_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CPUCL2, NULL, cmucal_vclk_ip_sysreg_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMCPUCL2, NULL, cmucal_vclk_ip_busif_hpmcpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL2_0, NULL, cmucal_vclk_ip_hpm_cpucl2_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CLUSTER2, NULL, cmucal_vclk_ip_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CPUCL2, NULL, cmucal_vclk_ip_axi2apb_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL2_1, NULL, cmucal_vclk_ip_hpm_cpucl2_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL2_2, NULL, cmucal_vclk_ip_hpm_cpucl2_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_CPUCL2, NULL, cmucal_vclk_ip_d_tzpc_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DPU_CMU_DPU, NULL, cmucal_vclk_ip_dpu_cmu_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DPUD0, NULL, cmucal_vclk_ip_btm_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DPUD1, NULL, cmucal_vclk_ip_btm_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_DPU, NULL, cmucal_vclk_ip_sysreg_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DPUP1, NULL, cmucal_vclk_ip_axi2apb_dpup1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DPUP0, NULL, cmucal_vclk_ip_axi2apb_dpup0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_sysmmu_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DPU, NULL, cmucal_vclk_ip_lhm_axi_p_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhs_axi_d1_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_DPU, NULL, cmucal_vclk_ip_xiu_p_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DECON0, NULL, cmucal_vclk_ip_ad_apb_decon0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DECON1, NULL, cmucal_vclk_ip_ad_apb_decon1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_MIPI_DSIM1, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPP, NULL, cmucal_vclk_ip_ad_apb_dpp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhs_axi_d2_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DPUD2, NULL, cmucal_vclk_ip_btm_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_sysmmu_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPU_DMA, NULL, cmucal_vclk_ip_ad_apb_dpu_dma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPU_WB_MUX, NULL, cmucal_vclk_ip_ad_apb_dpu_wb_mux, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_sysmmu_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DPUD0, NULL, cmucal_vclk_ip_ppmu_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DPUD1, NULL, cmucal_vclk_ip_ppmu_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DPUD2, NULL, cmucal_vclk_ip_ppmu_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_MIPI_DSIM0, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DECON2, NULL, cmucal_vclk_ip_ad_apb_decon2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD0_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD1_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD2_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DPU, NULL, cmucal_vclk_ip_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAPPER_FOR_S5I6280_HSI_DCPHY_COMBO_TOP, NULL, cmucal_vclk_ip_wrapper_for_s5i6280_hsi_dcphy_combo_top, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPU_DMA_PGEN, NULL, cmucal_vclk_ip_ad_apb_dpu_dma_pgen, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhs_axi_d0_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_DPU, NULL, cmucal_vclk_ip_d_tzpc_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_MCD, NULL, cmucal_vclk_ip_ad_apb_mcd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DSPM_CMU_DSPM, NULL, cmucal_vclk_ip_dspm_cmu_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_DSPM, NULL, cmucal_vclk_ip_sysreg_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DSPM, NULL, cmucal_vclk_ip_axi2apb_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DSPM0, NULL, cmucal_vclk_ip_ppmu_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DSPM0, NULL, cmucal_vclk_ip_sysmmu_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DSPM0, NULL, cmucal_vclk_ip_btm_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhm_axi_p_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d0_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhm_axi_p_ivadspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhs_axi_p_dspmiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_DSPM, NULL, cmucal_vclk_ip_wrap2_conv_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM0, NULL, cmucal_vclk_ip_ad_apb_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM1, NULL, cmucal_vclk_ip_ad_apb_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM3, NULL, cmucal_vclk_ip_ad_apb_dspm3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_AXI_DSPM0, NULL, cmucal_vclk_ip_ad_axi_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DSPM1, NULL, cmucal_vclk_ip_btm_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d1_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhs_axi_p_dspmdsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DSPM1, NULL, cmucal_vclk_ip_ppmu_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DSPM1, NULL, cmucal_vclk_ip_sysmmu_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_APB_DSPM, NULL, cmucal_vclk_ip_adm_apb_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhm_axi_d0_dspsdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_DSPM, NULL, cmucal_vclk_ip_xiu_p_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_DSPM, NULL, cmucal_vclk_ip_vgen_lite_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM2, NULL, cmucal_vclk_ip_ad_apb_dspm2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SCORE_TS_II, NULL, cmucal_vclk_ip_score_ts_ii, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_DSPM, NULL, cmucal_vclk_ip_d_tzpc_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhm_ast_isppredspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhm_ast_isplpdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhm_ast_isphqdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhs_ast_dspmisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhs_ast_dspmisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_DSPM, NULL, cmucal_vclk_ip_xiu_d_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_DSPM, NULL, cmucal_vclk_ip_baaw_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhs_axi_d_dspmnpu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DSPS_CMU_DSPS, NULL, cmucal_vclk_ip_dsps_cmu_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DSPS, NULL, cmucal_vclk_ip_axi2apb_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhm_axi_p_dspmdsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_DSPS, NULL, cmucal_vclk_ip_sysreg_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhs_axi_d_dspsiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhs_axi_d0_dspsdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SCORE_BARON, NULL, cmucal_vclk_ip_score_baron, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhm_axi_d_ivadsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_DSPS, NULL, cmucal_vclk_ip_d_tzpc_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_DSPS, NULL, cmucal_vclk_ip_vgen_lite_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_FSYS0_CMU_FSYS0, NULL, cmucal_vclk_ip_fsys0_cmu_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhs_acel_d_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhm_axi_p_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_FSYS0, NULL, cmucal_vclk_ip_gpio_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_FSYS0, NULL, cmucal_vclk_ip_sysreg_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_FSYS0, NULL, cmucal_vclk_ip_xiu_d_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_FSYS0, NULL, cmucal_vclk_ip_btm_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DP_LINK, NULL, cmucal_vclk_ip_dp_link, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_FSYS0, NULL, cmucal_vclk_ip_vgen_lite_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_USB, NULL, cmucal_vclk_ip_lhm_axi_d_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_USB, NULL, cmucal_vclk_ip_lhs_axi_p_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_FSYS0, NULL, cmucal_vclk_ip_ppmu_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_PCIE_GEN3A, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3a, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_PCIE_GEN3B, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3b, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P0_FSYS0, NULL, cmucal_vclk_ip_xiu_p0_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_GEN3, NULL, cmucal_vclk_ip_pcie_gen3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_IA_GEN3A, NULL, cmucal_vclk_ip_pcie_ia_gen3a, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_IA_GEN3B, NULL, cmucal_vclk_ip_pcie_ia_gen3b, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_FSYS0, NULL, cmucal_vclk_ip_d_tzpc_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_FSYS0A_CMU_FSYS0A, NULL, cmucal_vclk_ip_fsys0a_cmu_fsys0a, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USB31DRD, NULL, cmucal_vclk_ip_usb31drd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_USB, NULL, cmucal_vclk_ip_lhm_axi_p_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_USB, NULL, cmucal_vclk_ip_lhs_axi_d_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_FSYS1_CMU_FSYS1, NULL, cmucal_vclk_ip_fsys1_cmu_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MMC_CARD, NULL, cmucal_vclk_ip_mmc_card, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_GEN2, NULL, cmucal_vclk_ip_pcie_gen2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SSS, NULL, cmucal_vclk_ip_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_RTIC, NULL, cmucal_vclk_ip_rtic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_FSYS1, NULL, cmucal_vclk_ip_sysreg_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_FSYS1, NULL, cmucal_vclk_ip_gpio_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhs_acel_d_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhm_axi_p_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_FSYS1, NULL, cmucal_vclk_ip_xiu_d_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_FSYS1, NULL, cmucal_vclk_ip_xiu_p_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_FSYS1, NULL, cmucal_vclk_ip_ppmu_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_FSYS1, NULL, cmucal_vclk_ip_btm_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UFS_CARD, NULL, cmucal_vclk_ip_ufs_card, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_AHB_SSS, NULL, cmucal_vclk_ip_adm_ahb_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_FSYS1, NULL, cmucal_vclk_ip_sysmmu_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_FSYS1, NULL, cmucal_vclk_ip_vgen_lite_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_IA_GEN2, NULL, cmucal_vclk_ip_pcie_ia_gen2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_FSYS1, NULL, cmucal_vclk_ip_d_tzpc_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UFS_EMBD, NULL, cmucal_vclk_ip_ufs_embd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PUF, NULL, cmucal_vclk_ip_puf, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_RTIC, NULL, cmucal_vclk_ip_qe_rtic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_SSS, NULL, cmucal_vclk_ip_qe_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_SSS, NULL, cmucal_vclk_ip_baaw_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_G2D_CMU_G2D, NULL, cmucal_vclk_ip_g2d_cmu_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_G2DD0, NULL, cmucal_vclk_ip_ppmu_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_G2DD1, NULL, cmucal_vclk_ip_ppmu_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_G2DD0, NULL, cmucal_vclk_ip_sysmmu_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_G2D, NULL, cmucal_vclk_ip_sysreg_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhs_acel_d0_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhs_acel_d1_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_G2D, NULL, cmucal_vclk_ip_lhm_axi_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_G2D, NULL, cmucal_vclk_ip_as_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_G2DP0, NULL, cmucal_vclk_ip_axi2apb_g2dp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_G2DD0, NULL, cmucal_vclk_ip_btm_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_G2DD1, NULL, cmucal_vclk_ip_btm_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_G2D, NULL, cmucal_vclk_ip_xiu_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_G2DP1, NULL, cmucal_vclk_ip_axi2apb_g2dp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_G2DD2, NULL, cmucal_vclk_ip_btm_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_JPEG, NULL, cmucal_vclk_ip_qe_jpeg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_MSCL, NULL, cmucal_vclk_ip_qe_mscl, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_G2DD2, NULL, cmucal_vclk_ip_sysmmu_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_G2DD2, NULL, cmucal_vclk_ip_ppmu_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhs_acel_d2_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_JPEG, NULL, cmucal_vclk_ip_as_p_jpeg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_G2D, NULL, cmucal_vclk_ip_xiu_d_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_MSCL, NULL, cmucal_vclk_ip_as_p_mscl, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_ASTC, NULL, cmucal_vclk_ip_as_p_astc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_NS_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_NS_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_S_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_S_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_ASTC, NULL, cmucal_vclk_ip_qe_astc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_G2D, NULL, cmucal_vclk_ip_vgen_lite_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_G2D, NULL, cmucal_vclk_ip_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_NS_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_S_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_G2DD1, NULL, cmucal_vclk_ip_sysmmu_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_JPEG, NULL, cmucal_vclk_ip_jpeg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MSCL, NULL, cmucal_vclk_ip_mscl, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASTC, NULL, cmucal_vclk_ip_astc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_JSQZ, NULL, cmucal_vclk_ip_as_p_jsqz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_JSQZ, NULL, cmucal_vclk_ip_qe_jsqz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_G2D, NULL, cmucal_vclk_ip_d_tzpc_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_JSQZ, NULL, cmucal_vclk_ip_jsqz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_G3D, NULL, cmucal_vclk_ip_xiu_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_G3D, NULL, cmucal_vclk_ip_lhm_axi_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMG3D, NULL, cmucal_vclk_ip_busif_hpmg3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_G3D0, NULL, cmucal_vclk_ip_hpm_g3d0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_G3D, NULL, cmucal_vclk_ip_sysreg_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_G3D_CMU_G3D, NULL, cmucal_vclk_ip_g3d_cmu_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhs_axi_g3dsfr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_G3D, NULL, cmucal_vclk_ip_vgen_lite_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPU, NULL, cmucal_vclk_ip_gpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_G3D, NULL, cmucal_vclk_ip_axi2apb_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhm_axi_g3dsfr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GRAY2BIN_G3D, NULL, cmucal_vclk_ip_gray2bin_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_G3D, NULL, cmucal_vclk_ip_d_tzpc_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASB_G3D, NULL, cmucal_vclk_ip_asb_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_p_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_d_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IS_ISPHQ, NULL, cmucal_vclk_ip_is_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_ISPHQ, NULL, cmucal_vclk_ip_sysreg_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ISPHQ_CMU_ISPHQ, NULL, cmucal_vclk_ip_isphq_cmu_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhm_atb_isppreisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhs_atb_isphqisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPHQ, NULL, cmucal_vclk_ip_btm_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhm_atb_vo_isplpisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhs_ast_vo_isphqisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPHQ, NULL, cmucal_vclk_ip_d_tzpc_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhs_ast_isphqdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_p_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d0_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPLP0, NULL, cmucal_vclk_ip_btm_isplp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IS_ISPLP, NULL, cmucal_vclk_ip_is_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_ISPLP, NULL, cmucal_vclk_ip_sysreg_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ISPLP_CMU_ISPLP, NULL, cmucal_vclk_ip_isplp_cmu_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPLP1, NULL, cmucal_vclk_ip_btm_isplp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d1_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhm_atb_isphqisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_ast_vo_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_atb_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhs_atb_vo_isplpisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPLP, NULL, cmucal_vclk_ip_d_tzpc_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhs_ast_isplpdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhm_ast_dspmisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhs_axi_p_isplpvra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d_vra2isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IS_ISPPRE, NULL, cmucal_vclk_ip_is_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_d_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPPRE, NULL, cmucal_vclk_ip_btm_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_p_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_ISPPRE, NULL, cmucal_vclk_ip_sysreg_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ISPPRE_CMU_ISPPRE, NULL, cmucal_vclk_ip_isppre_cmu_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_atb_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhs_atb_isppreisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPPRE, NULL, cmucal_vclk_ip_d_tzpc_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhs_ast_isppredspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhm_ast_dspmisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMISPPRE, NULL, cmucal_vclk_ip_busif_hpmisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_ISPPRE, NULL, cmucal_vclk_ip_hpm_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPPRE1, NULL, cmucal_vclk_ip_d_tzpc_isppre1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_ast_vo_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhm_ast_vo_isphqisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IVA_CMU_IVA, NULL, cmucal_vclk_ip_iva_cmu_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhs_acel_d_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhs_axi_d_ivadsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhs_axi_p_ivadspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhm_axi_p_dspmiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_IVA, NULL, cmucal_vclk_ip_lhm_axi_p_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_IVA, NULL, cmucal_vclk_ip_btm_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_IVA, NULL, cmucal_vclk_ip_ppmu_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_IVA, NULL, cmucal_vclk_ip_sysmmu_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_IVA, NULL, cmucal_vclk_ip_xiu_p_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_IVA0, NULL, cmucal_vclk_ip_ad_apb_iva0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_2M_IVA, NULL, cmucal_vclk_ip_axi2apb_2m_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_IVA, NULL, cmucal_vclk_ip_axi2apb_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_IVA, NULL, cmucal_vclk_ip_sysreg_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhm_axi_d_ivasc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_DAP_IVA, NULL, cmucal_vclk_ip_adm_dap_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhm_axi_d_dspsiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_IVA1, NULL, cmucal_vclk_ip_ad_apb_iva1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_IVA2, NULL, cmucal_vclk_ip_ad_apb_iva2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_IVA, NULL, cmucal_vclk_ip_vgen_lite_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IVA, NULL, cmucal_vclk_ip_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IVA_INTMEM, NULL, cmucal_vclk_ip_iva_intmem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D0_IVA, NULL, cmucal_vclk_ip_xiu_d0_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D1_IVA, NULL, cmucal_vclk_ip_xiu_d1_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_IVA, NULL, cmucal_vclk_ip_d_tzpc_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D2_IVA, NULL, cmucal_vclk_ip_xiu_d2_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_RB1_IVA, NULL, cmucal_vclk_ip_trex_rb1_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_IVA, NULL, cmucal_vclk_ip_qe_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_IVA, NULL, cmucal_vclk_ip_wrap2_conv_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MFC_CMU_MFC, NULL, cmucal_vclk_ip_mfc_cmu_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_MFC, NULL, cmucal_vclk_ip_as_apb_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MFC, NULL, cmucal_vclk_ip_axi2apb_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MFC, NULL, cmucal_vclk_ip_sysreg_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhs_axi_d0_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhs_axi_d1_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MFC, NULL, cmucal_vclk_ip_lhm_axi_p_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_MFCD0, NULL, cmucal_vclk_ip_sysmmu_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_MFCD1, NULL, cmucal_vclk_ip_sysmmu_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_MFCD0, NULL, cmucal_vclk_ip_ppmu_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_MFCD1, NULL, cmucal_vclk_ip_ppmu_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_MFCD0, NULL, cmucal_vclk_ip_btm_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_MFCD1, NULL, cmucal_vclk_ip_btm_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_NS_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_NS_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_S_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_S_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_WFD_NS, NULL, cmucal_vclk_ip_as_apb_wfd_ns, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_AXI_WFD, NULL, cmucal_vclk_ip_as_axi_wfd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_MFCD2, NULL, cmucal_vclk_ip_ppmu_mfcd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_MFC, NULL, cmucal_vclk_ip_xiu_d_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_WFD_S, NULL, cmucal_vclk_ip_as_apb_wfd_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_MFC, NULL, cmucal_vclk_ip_vgen_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MFC, NULL, cmucal_vclk_ip_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WFD, NULL, cmucal_vclk_ip_wfd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LH_ATB_MFC, NULL, cmucal_vclk_ip_lh_atb_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_MFC, NULL, cmucal_vclk_ip_d_tzpc_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF_CMU_MIF, NULL, cmucal_vclk_ip_mif_cmu_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY, NULL, cmucal_vclk_ip_ddrphy, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF, NULL, cmucal_vclk_ip_sysreg_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF, NULL, cmucal_vclk_ip_busif_hpmmif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF, NULL, cmucal_vclk_ip_lhm_axi_p_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF, NULL, cmucal_vclk_ip_axi2apb_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_DVFS, NULL, cmucal_vclk_ip_ppc_dvfs, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_DEBUG, NULL, cmucal_vclk_ip_ppc_debug, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY, NULL, cmucal_vclk_ip_apbbr_ddrphy, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC, NULL, cmucal_vclk_ip_apbbr_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ, NULL, cmucal_vclk_ip_apbbr_dmctz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF, NULL, cmucal_vclk_ip_hpm_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC, NULL, cmucal_vclk_ip_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPC_DEBUG, NULL, cmucal_vclk_ip_qch_adapter_ppc_debug, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPC_DVFS, NULL, cmucal_vclk_ip_qch_adapter_ppc_dvfs, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_MIF, NULL, cmucal_vclk_ip_d_tzpc_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF1, NULL, cmucal_vclk_ip_hpm_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF1_CMU_MIF1, NULL, cmucal_vclk_ip_mif1_cmu_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY1, NULL, cmucal_vclk_ip_apbbr_ddrphy1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC1, NULL, cmucal_vclk_ip_apbbr_dmc1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ1, NULL, cmucal_vclk_ip_apbbr_dmctz1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF1, NULL, cmucal_vclk_ip_axi2apb_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF1, NULL, cmucal_vclk_ip_busif_hpmmif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY1, NULL, cmucal_vclk_ip_ddrphy1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC1, NULL, cmucal_vclk_ip_dmc1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhm_axi_p_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_ppmuppc_debug1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_ppmuppc_dvfs1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF1, NULL, cmucal_vclk_ip_sysreg_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF2, NULL, cmucal_vclk_ip_hpm_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY2, NULL, cmucal_vclk_ip_apbbr_ddrphy2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC2, NULL, cmucal_vclk_ip_apbbr_dmc2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ2, NULL, cmucal_vclk_ip_apbbr_dmctz2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF2, NULL, cmucal_vclk_ip_axi2apb_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF2, NULL, cmucal_vclk_ip_busif_hpmmif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY2, NULL, cmucal_vclk_ip_ddrphy2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC2, NULL, cmucal_vclk_ip_dmc2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhm_axi_p_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_ppmuppc_debug2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_ppmuppc_dvfs2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF2, NULL, cmucal_vclk_ip_sysreg_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF2_CMU_MIF2, NULL, cmucal_vclk_ip_mif2_cmu_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF3, NULL, cmucal_vclk_ip_hpm_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY3, NULL, cmucal_vclk_ip_apbbr_ddrphy3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC3, NULL, cmucal_vclk_ip_apbbr_dmc3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ3, NULL, cmucal_vclk_ip_apbbr_dmctz3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF3, NULL, cmucal_vclk_ip_axi2apb_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF3, NULL, cmucal_vclk_ip_busif_hpmmif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY3, NULL, cmucal_vclk_ip_ddrphy3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC3, NULL, cmucal_vclk_ip_dmc3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhm_axi_p_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_ppmuppc_debug3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_ppmuppc_dvfs3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF3, NULL, cmucal_vclk_ip_sysreg_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF3_CMU_MIF3, NULL, cmucal_vclk_ip_mif3_cmu_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhs_acel_d_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhs_axi_p_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU0_CMU_NPU0, NULL, cmucal_vclk_ip_npu0_cmu_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SI0, NULL, cmucal_vclk_ip_apb_async_si0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SMMU_NS, NULL, cmucal_vclk_ip_apb_async_smmu_ns, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_NPU0, NULL, cmucal_vclk_ip_axi2apb_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_NPU0, NULL, cmucal_vclk_ip_btm_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_NPU0, NULL, cmucal_vclk_ip_d_tzpc_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhm_ast_p_npu1_done, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhm_axi_d_dspmnpu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_NPU, NULL, cmucal_vclk_ip_lhm_axi_p_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhs_ast_p_npud1_setreg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPUC, NULL, cmucal_vclk_ip_npuc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPUD_UNIT0, NULL, cmucal_vclk_ip_npud_unit0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUDMA, NULL, cmucal_vclk_ip_ppmu_cpudma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_RFM, NULL, cmucal_vclk_ip_ppmu_rfm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_CPUDMA, NULL, cmucal_vclk_ip_qe_cpudma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_RFM, NULL, cmucal_vclk_ip_qe_rfm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SMMU_NPU0, NULL, cmucal_vclk_ip_smmu_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_NPU0, NULL, cmucal_vclk_ip_sysreg_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_NPU0, NULL, cmucal_vclk_ip_xiu_d_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SMMU_S, NULL, cmucal_vclk_ip_apb_async_smmu_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_NPU0, NULL, cmucal_vclk_ip_vgen_lite_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_NPU0, NULL, cmucal_vclk_ip_ppmu_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU0_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu0_ppc_wrapper, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU1_CMU_NPU1, NULL, cmucal_vclk_ip_npu1_cmu_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhm_axi_p_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SI1, NULL, cmucal_vclk_ip_apb_async_si1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_NPU1, NULL, cmucal_vclk_ip_axi2apb_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_NPU1, NULL, cmucal_vclk_ip_d_tzpc_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhm_ast_p_npud1_setreg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_NPU1, NULL, cmucal_vclk_ip_sysreg_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhs_ast_p_npu1_done, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPUD_UNIT1, NULL, cmucal_vclk_ip_npud_unit1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_NPU1, NULL, cmucal_vclk_ip_ppmu_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU1_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu1_ppc_wrapper, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_PERIC0, NULL, cmucal_vclk_ip_gpio_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PWM, NULL, cmucal_vclk_ip_pwm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_PERIC0, NULL, cmucal_vclk_ip_sysreg_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI00_USI, NULL, cmucal_vclk_ip_usi00_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI01_USI, NULL, cmucal_vclk_ip_usi01_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI02_USI, NULL, cmucal_vclk_ip_usi02_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI03_USI, NULL, cmucal_vclk_ip_usi03_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC0P0, NULL, cmucal_vclk_ip_axi2apb_peric0p0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERIC0_CMU_PERIC0, NULL, cmucal_vclk_ip_peric0_cmu_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI04_USI, NULL, cmucal_vclk_ip_usi04_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC0P1, NULL, cmucal_vclk_ip_axi2apb_peric0p1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI05_USI, NULL, cmucal_vclk_ip_usi05_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI00_I2C, NULL, cmucal_vclk_ip_usi00_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI01_I2C, NULL, cmucal_vclk_ip_usi01_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI02_I2C, NULL, cmucal_vclk_ip_usi02_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI03_I2C, NULL, cmucal_vclk_ip_usi03_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI04_I2C, NULL, cmucal_vclk_ip_usi04_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI05_I2C, NULL, cmucal_vclk_ip_usi05_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UART_DBG, NULL, cmucal_vclk_ip_uart_dbg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_PERIC0, NULL, cmucal_vclk_ip_xiu_p_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhm_axi_p_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI12_USI, NULL, cmucal_vclk_ip_usi12_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI12_I2C, NULL, cmucal_vclk_ip_usi12_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI13_I2C, NULL, cmucal_vclk_ip_usi13_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI13_USI, NULL, cmucal_vclk_ip_usi13_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI14_USI, NULL, cmucal_vclk_ip_usi14_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI14_I2C, NULL, cmucal_vclk_ip_usi14_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_PERIC0, NULL, cmucal_vclk_ip_d_tzpc_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI15_I2C, NULL, cmucal_vclk_ip_usi15_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI15_USI, NULL, cmucal_vclk_ip_usi15_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC1P1, NULL, cmucal_vclk_ip_axi2apb_peric1p1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_PERIC1, NULL, cmucal_vclk_ip_gpio_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_PERIC1, NULL, cmucal_vclk_ip_sysreg_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UART_BT, NULL, cmucal_vclk_ip_uart_bt, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM1, NULL, cmucal_vclk_ip_i2c_cam1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM2, NULL, cmucal_vclk_ip_i2c_cam2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM3, NULL, cmucal_vclk_ip_i2c_cam3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI06_USI, NULL, cmucal_vclk_ip_usi06_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI07_USI, NULL, cmucal_vclk_ip_usi07_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI08_USI, NULL, cmucal_vclk_ip_usi08_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM0, NULL, cmucal_vclk_ip_i2c_cam0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_PERIC1, NULL, cmucal_vclk_ip_xiu_p_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC1P0, NULL, cmucal_vclk_ip_axi2apb_peric1p0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERIC1_CMU_PERIC1, NULL, cmucal_vclk_ip_peric1_cmu_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPI_CAM0, NULL, cmucal_vclk_ip_spi_cam0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI09_USI, NULL, cmucal_vclk_ip_usi09_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI06_I2C, NULL, cmucal_vclk_ip_usi06_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI10_USI, NULL, cmucal_vclk_ip_usi10_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI07_I2C, NULL, cmucal_vclk_ip_usi07_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI08_I2C, NULL, cmucal_vclk_ip_usi08_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI09_I2C, NULL, cmucal_vclk_ip_usi09_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI10_I2C, NULL, cmucal_vclk_ip_usi10_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhm_axi_p_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI11_USI, NULL, cmucal_vclk_ip_usi11_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI11_I2C, NULL, cmucal_vclk_ip_usi11_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_PERIC1, NULL, cmucal_vclk_ip_d_tzpc_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I3C, NULL, cmucal_vclk_ip_i3c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI16_USI, NULL, cmucal_vclk_ip_usi16_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI17_USI, NULL, cmucal_vclk_ip_usi17_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI16_I3C, NULL, cmucal_vclk_ip_usi16_i3c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI17_I2C, NULL, cmucal_vclk_ip_usi17_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERISP, NULL, cmucal_vclk_ip_axi2apb_perisp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_PERIS, NULL, cmucal_vclk_ip_xiu_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_PERIS, NULL, cmucal_vclk_ip_sysreg_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_CLUSTER2, NULL, cmucal_vclk_ip_wdt_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_CLUSTER0, NULL, cmucal_vclk_ip_wdt_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERIS_CMU_PERIS, NULL, cmucal_vclk_ip_peris_cmu_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_AXI_P_PERIS, NULL, cmucal_vclk_ip_ad_axi_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_OTP_CON_BIRA, NULL, cmucal_vclk_ip_otp_con_bira, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GIC, NULL, cmucal_vclk_ip_gic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhm_axi_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MCT, NULL, cmucal_vclk_ip_mct, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_OTP_CON_TOP, NULL, cmucal_vclk_ip_otp_con_top, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_PERIS, NULL, cmucal_vclk_ip_d_tzpc_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TMU_SUB, NULL, cmucal_vclk_ip_tmu_sub, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TMU_TOP, NULL, cmucal_vclk_ip_tmu_top, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_OTP_CON_BISR, NULL, cmucal_vclk_ip_otp_con_bisr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_S2D_CMU_S2D, NULL, cmucal_vclk_ip_s2d_cmu_s2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VRA2_CMU_VRA2, NULL, cmucal_vclk_ip_vra2_cmu_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_VRA2, NULL, cmucal_vclk_ip_as_apb_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_VRA2, NULL, cmucal_vclk_ip_axi2apb_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_VRA2, NULL, cmucal_vclk_ip_d_tzpc_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhm_axi_p_isplpvra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d_vra2isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_VRA2, NULL, cmucal_vclk_ip_qe_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_VRA2, NULL, cmucal_vclk_ip_sysreg_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_VRA2, NULL, cmucal_vclk_ip_vgen_lite_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VRA2, NULL, cmucal_vclk_ip_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_STR, NULL, cmucal_vclk_ip_as_apb_str, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_VRA2, NULL, cmucal_vclk_ip_btm_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_VRA2, NULL, cmucal_vclk_ip_ppmu_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_VRA2, NULL, cmucal_vclk_ip_sysmmu_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_STR, NULL, cmucal_vclk_ip_str, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhs_axi_d_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_IF, NULL, cmucal_vclk_ip_dmic_if, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_VTS, NULL, cmucal_vclk_ip_sysreg_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VTS_CMU_VTS, NULL, cmucal_vclk_ip_vts_cmu_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AHB_BUSMATRIX, NULL, cmucal_vclk_ip_ahb_busmatrix, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_VTS, NULL, cmucal_vclk_ip_lhm_axi_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_VTS, NULL, cmucal_vclk_ip_gpio_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_VTS, NULL, cmucal_vclk_ip_wdt_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB0, NULL, cmucal_vclk_ip_dmic_ahb0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB1, NULL, cmucal_vclk_ip_dmic_ahb1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_C_VTS, NULL, cmucal_vclk_ip_lhs_axi_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASYNCINTERRUPT, NULL, cmucal_vclk_ip_asyncinterrupt, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC0, NULL, cmucal_vclk_ip_hwacg_sys_dmic0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC1, NULL, cmucal_vclk_ip_hwacg_sys_dmic1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SS_VTS_GLUE, NULL, cmucal_vclk_ip_ss_vts_glue, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CORTEXM4INTEGRATION, NULL, cmucal_vclk_ip_cortexm4integration, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_U_DMIC_CLK_MUX, NULL, cmucal_vclk_ip_u_dmic_clk_mux, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhm_axi_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_VTS, NULL, cmucal_vclk_ip_lhs_axi_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_C_VTS, NULL, cmucal_vclk_ip_baaw_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_VTS, NULL, cmucal_vclk_ip_d_tzpc_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE, NULL, cmucal_vclk_ip_vgen_lite, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_LP_VTS, NULL, cmucal_vclk_ip_bps_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_P_VTS, NULL, cmucal_vclk_ip_bps_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XHB_LP_VTS, NULL, cmucal_vclk_ip_xhb_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XHB_P_VTS, NULL, cmucal_vclk_ip_xhb_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SWEEPER_C_VTS, NULL, cmucal_vclk_ip_sweeper_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SWEEPER_D_VTS, NULL, cmucal_vclk_ip_sweeper_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_D_VTS, NULL, cmucal_vclk_ip_baaw_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_ABOX_VTS, NULL, cmucal_vclk_ip_mailbox_abox_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB2, NULL, cmucal_vclk_ip_dmic_ahb2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB3, NULL, cmucal_vclk_ip_dmic_ahb3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC2, NULL, cmucal_vclk_ip_hwacg_sys_dmic2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC3, NULL, cmucal_vclk_ip_hwacg_sys_dmic3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_IF_3RD, NULL, cmucal_vclk_ip_dmic_if_3rd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_VTS, NULL, cmucal_vclk_ip_mailbox_ap_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TIMER, NULL, cmucal_vclk_ip_timer, NULL, NULL), }; From 642b98f62491a61dad1a215c8b15066350871793 Mon Sep 17 00:00:00 2001 From: Andreas Schneider Date: Sun, 1 Mar 2020 09:49:23 +0100 Subject: [PATCH 356/439] arch:arm64:boot:dts: Fix include path for autoconf.h Signed-off-by: Andreas Schneider --- arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi | 2 +- arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi | 2 +- scripts/Makefile.lib | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi b/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi index cea9dd757368..d2bfa2f6687c 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#include "../../../../../include/generated/autoconf.h" +#include #include #include diff --git a/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi b/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi index c063378c0d57..395520f0cbe4 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi +++ b/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#include "../../../../../include/generated/autoconf.h" +#include / { fragment@model { diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index cff0f35e1c4f..2d4087a75c78 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -304,7 +304,7 @@ $(obj)/%.dtb.S: $(obj)/%.dtb quiet_cmd_dtc = DTC $@ cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \ - $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \ + $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp $(LINUXINCLUDE) -o $(dtc-tmp) $< ; \ $(DTC) -O dtb -o $@ -b 0 -a 4\ $(addprefix -i,$(dir $<) $(DTC_INCLUDE)) $(DTC_FLAGS) \ -d $(depfile).dtc.tmp $(dtc-tmp) ; \ From 621c8fd9e6ea2eeed572769f309cf61c59046f04 Mon Sep 17 00:00:00 2001 From: Erfan Abdi Date: Wed, 19 Feb 2020 00:08:46 +0800 Subject: [PATCH 357/439] scripts: FIPS: use readelf full path --- scripts/crypto/fips_crypto_integrity.py | 9 +++++---- scripts/fmp/fips_fmp_integrity.py | 9 +++++---- scripts/link-vmlinux.sh | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-) mode change 100644 => 100755 scripts/link-vmlinux.sh diff --git a/scripts/crypto/fips_crypto_integrity.py b/scripts/crypto/fips_crypto_integrity.py index 479a7d289861..ce07280139dd 100755 --- a/scripts/crypto/fips_crypto_integrity.py +++ b/scripts/crypto/fips_crypto_integrity.py @@ -33,16 +33,17 @@ if __name__ == "__main__": #print("python version:\n{}\n".format(sys.version)) - if len(sys.argv) != 2: - print("Usage " + sys.argv[0] + " elf_file") + if len(sys.argv) != 3: + print("Usage " + sys.argv[0] + " elf_file readelf_path") sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - modules = sys.argv[2:] + readelf_path = os.path.abspath(sys.argv[2]) + modules = sys.argv[3:] utils = Utils() utils.paths_exists([elf_file]) - integrity = IntegrityRoutine(elf_file) + integrity = IntegrityRoutine(elf_file, readelf_path) integrity.make_integrity(sec_sym=sec_sym, module_name=module_name, debug=False, print_reloc_addrs=False, sort_by="address", reverse=False) diff --git a/scripts/fmp/fips_fmp_integrity.py b/scripts/fmp/fips_fmp_integrity.py index 29ab8a73e033..d2dd6e922ed4 100755 --- a/scripts/fmp/fips_fmp_integrity.py +++ b/scripts/fmp/fips_fmp_integrity.py @@ -29,16 +29,17 @@ module_name = "fmp" if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage " + sys.argv[0] + " elf_file") + if len(sys.argv) != 3: + print("Usage " + sys.argv[0] + " elf_file readelf_path") sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - modules = sys.argv[2:] + readelf_path = os.path.abspath(sys.argv[2]) + modules = sys.argv[3:] utils = Utils() utils.paths_exists([elf_file]) - integrity = IntegrityRoutine(elf_file) + integrity = IntegrityRoutine(elf_file, readelf_path) integrity.make_integrity(sec_sym=sec_sym, module_name=module_name, debug=False, print_reloc_addrs=False, sort_by="address", reverse=False) diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh old mode 100644 new mode 100755 index 15617ff0b21b..ba25de7025fc --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -412,12 +412,12 @@ fi if [ -n "${CONFIG_CRYPTO_FIPS}" ]; then echo ' FIPS : Generating hmac of crypto and updating vmlinux... ' - PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/crypto/fips_crypto_integrity.py" "${objtree}/vmlinux" + PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/crypto/fips_crypto_integrity.py" "${objtree}/vmlinux" "${READELF}" fi if [ -n "${CONFIG_EXYNOS_FMP_FIPS}" ]; then echo ' FIPS : Generating hmac of fmp and updating vmlinux... ' - PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/fmp/fips_fmp_integrity.py" "${objtree}/vmlinux" + PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/fmp/fips_fmp_integrity.py" "${objtree}/vmlinux" "${READELF}" fi # We made a new kernel - delete old version file From c2f66aa2e65309794caf0b291a1c0bec729e689d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 8 Sep 2020 20:59:47 +0300 Subject: [PATCH 358/439] scripts: FIPS: check readelf binary existance Signed-off-by: Denis Efremov --- scripts/crypto/fips_crypto_integrity.py | 3 ++- scripts/fmp/fips_fmp_integrity.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/crypto/fips_crypto_integrity.py b/scripts/crypto/fips_crypto_integrity.py index ce07280139dd..8da25be8ce39 100755 --- a/scripts/crypto/fips_crypto_integrity.py +++ b/scripts/crypto/fips_crypto_integrity.py @@ -10,6 +10,7 @@ import sys from IntegrityRoutine import IntegrityRoutine from Utils import Utils +from shutil import which __author__ = "Vadym Stupakov" @@ -38,7 +39,7 @@ sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - readelf_path = os.path.abspath(sys.argv[2]) + readelf_path = which(sys.argv[2]) or os.path.abspath(sys.argv[2]) modules = sys.argv[3:] utils = Utils() diff --git a/scripts/fmp/fips_fmp_integrity.py b/scripts/fmp/fips_fmp_integrity.py index d2dd6e922ed4..66856c341310 100755 --- a/scripts/fmp/fips_fmp_integrity.py +++ b/scripts/fmp/fips_fmp_integrity.py @@ -10,6 +10,7 @@ import sys from IntegrityRoutine import IntegrityRoutine from Utils import Utils +from shutil import which __author__ = "Vadym Stupakov" @@ -34,7 +35,7 @@ sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - readelf_path = os.path.abspath(sys.argv[2]) + readelf_path = which(sys.argv[2]) or os.path.abspath(sys.argv[2]) modules = sys.argv[3:] utils = Utils() From a2dd19be1578b0c9b7d39904b82143033c4c8f34 Mon Sep 17 00:00:00 2001 From: jimzrt Date: Thu, 8 Feb 2018 12:55:48 +0100 Subject: [PATCH 359/439] usb: Modify mass_storage gadget to work with configfs Signed-off-by: djb77 --- drivers/usb/gadget/function/f_mass_storage.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 25ba30329533..ee78a5840e62 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -3350,6 +3350,8 @@ static void fsg_free_inst(struct usb_function_instance *fi) kfree(opts); } +extern struct device *create_function_device(char *name); + static struct usb_function_instance *fsg_alloc_inst(void) { struct fsg_opts *opts; @@ -3389,6 +3391,9 @@ static struct usb_function_instance *fsg_alloc_inst(void) config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type); configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group); + //create dummy device + create_function_device("f_mass_storage"); + return &opts->func_inst; release_buffers: From 4ae9a46fa768d59bb150841dd9b1ac213a0ad30d Mon Sep 17 00:00:00 2001 From: Noxxxious Date: Mon, 2 Jul 2018 02:31:57 +0200 Subject: [PATCH 360/439] usb: correct function name Other drivers like the mtp driver use a proper 'function.name' to make the configfs work. So lets correct mass storages name which will allow drivedroid to work. --- drivers/usb/gadget/function/f_mass_storage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index ee78a5840e62..a1d81c390670 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -228,7 +228,7 @@ /*------------------------------------------------------------------------*/ -#define FSG_DRIVER_DESC "Mass Storage Function" +#define FSG_DRIVER_DESC "mass_storage" #define FSG_DRIVER_VERSION "2009/09/11" static const char fsg_string_interface[] = "Mass Storage"; From 0d65bc23ac32a21e96f02a0433b932c97ce408fd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 6 Mar 2018 17:03:31 -0800 Subject: [PATCH 361/439] fs: don't clear I_DIRTY_TIME before calling mark_inode_dirty_sync __mark_inode_dirty already takes care of that, and for the XFS lazytime implementation we need to know that ->dirty_inode was called because I_DIRTY_TIME was set. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/inode.c | 1 - fs/sync.c | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/inode.c b/fs/inode.c index d662e7a25ae1..c4a2046b5db6 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1538,7 +1538,6 @@ void iput(struct inode *inode) if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { atomic_inc(&inode->i_count); - inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); trace_writeback_lazytime_iput(inode); mark_inode_dirty_sync(inode); diff --git a/fs/sync.c b/fs/sync.c index 2f6aca5f0cdd..df38797a2769 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -451,12 +451,8 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) if (!file->f_op->fsync) return -EINVAL; - if (!datasync && (inode->i_state & I_DIRTY_TIME)) { - spin_lock(&inode->i_lock); - inode->i_state &= ~I_DIRTY_TIME; - spin_unlock(&inode->i_lock); + if (!datasync && (inode->i_state & I_DIRTY_TIME)) mark_inode_dirty_sync(inode); - } return file->f_op->fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); From 68e8f5a8192a7d3e02066fce80b7850616db7a88 Mon Sep 17 00:00:00 2001 From: Ilie Halip Date: Tue, 26 Nov 2019 16:45:44 +0200 Subject: [PATCH 362/439] x86/boot: Discard .eh_frame sections When using GCC as compiler and LLVM's lld as linker, linking setup.elf fails: LD arch/x86/boot/setup.elf ld.lld: error: init sections too big! This happens because GCC generates .eh_frame sections for most of the files in that directory, then ld.lld places the merged section before __end_init, triggering an assert in the linker script. Fix this by discarding the .eh_frame sections, as suggested by Boris. The kernel proper linker script discards them too. [ bp: Going back in history, 64-bit kernel proper has been discarding .eh_frame since 2002: commit acca80acefe20420e69561cf55be64f16c34ea97 Author: Andi Kleen Date: Tue Oct 29 23:54:35 2002 -0800 [PATCH] x86-64 updates for 2.5.44 ... - Remove the .eh_frame on linking. This saves several hundred KB in the bzImage ] Suggested-by: Borislav Petkov Signed-off-by: Ilie Halip Signed-off-by: Borislav Petkov Reviewed-by: Nick Desaulniers Cc: clang-built-linux@googlegroups.com Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Cc: x86-ml Link: https://lore.kernel.org/lkml/20191118175223.GM6363@zn.tnic/ Link: https://github.com/ClangBuiltLinux/linux/issues/760 Link: https://lkml.kernel.org/r/20191126144545.19354-1-ilie.halip@gmail.com --- arch/x86/boot/setup.ld | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld index 96a6c7563538..00fd2d6d91ee 100644 --- a/arch/x86/boot/setup.ld +++ b/arch/x86/boot/setup.ld @@ -51,7 +51,10 @@ SECTIONS . = ALIGN(16); _end = .; - /DISCARD/ : { *(.note*) } + /DISCARD/ : { + *(.eh_frame) + *(.note*) + } /* * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: From ff3cbcd2f57e5eaf3cbed104720c3870aef454b6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 16 Mar 2018 16:37:09 +0900 Subject: [PATCH 363/439] kbuild: clear LDFLAGS in the top Makefile Currently LDFLAGS is not cleared, so same flags are accumulated in LDFLAGS when the top Makefile is recursively invoked. I found unneeded rebuild for ARCH=arm64 when CONFIG_TRIM_UNUSED_KSYMS is enabled. If include/generated/autoksyms.h is updated, the top Makefile is recursively invoked, then arch/arm64/Makefile adds one more '-maarch64linux'. Due to the command line change, modules are rebuilt needlessly. Signed-off-by: Masahiro Yamada Acked-by: Nicolas Pitre --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 692bafceb978..6b57cdc316b3 100644 --- a/Makefile +++ b/Makefile @@ -432,6 +432,7 @@ KBUILD_CFLAGS_KERNEL := KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +LDFLAGS := GCC_PLUGINS_CFLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC From ae90f67b0b7c5c68617e427d778ff8150e0730c1 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 21 Feb 2019 13:13:38 +0900 Subject: [PATCH 364/439] kbuild: compute false-positive -Wmaybe-uninitialized cases in Kconfig Since -Wmaybe-uninitialized was introduced by GCC 4.7, we have patched various false positives: - commit e74fc973b6e5 ("Turn off -Wmaybe-uninitialized when building with -Os") turned off this option for -Os. - commit 815eb71e7149 ("Kbuild: disable 'maybe-uninitialized' warning for CONFIG_PROFILE_ALL_BRANCHES") turned off this option for CONFIG_PROFILE_ALL_BRANCHES - commit a76bcf557ef4 ("Kbuild: enable -Wmaybe-uninitialized warning for "make W=1"") turned off this option for GCC < 4.9 Arnd provided more explanation in https://lkml.org/lkml/2017/3/14/903 I think this looks better by shifting the logic from Makefile to Kconfig. Link: https://github.com/ClangBuiltLinux/linux/issues/350 Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Tested-by: Nick Desaulniers --- Makefile | 10 +++------- init/Kconfig | 17 +++++++++++++++++ kernel/trace/Kconfig | 1 + 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 6b57cdc316b3..0b447f94a64f 100644 --- a/Makefile +++ b/Makefile @@ -704,17 +704,13 @@ KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) -KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) -else -ifdef CONFIG_PROFILE_ALL_BRANCHES -KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) else KBUILD_CFLAGS += -O2 endif -endif -KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ - $(call cc-disable-warning,maybe-uninitialized,)) +ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED +KBUILD_CFLAGS += -Wno-maybe-uninitialized +endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) diff --git a/init/Kconfig b/init/Kconfig index ce912e8924d5..0d2e3e6acc08 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -16,6 +16,22 @@ config DEFCONFIG_LIST default "$ARCH_DEFCONFIG" default "arch/$ARCH/defconfig" +config CC_HAS_WARN_MAYBE_UNINITIALIZED + def_bool $(cc-option,-Wmaybe-uninitialized) + help + GCC >= 4.7 supports this option. + +config CC_DISABLE_WARN_MAYBE_UNINITIALIZED + bool + depends on CC_HAS_WARN_MAYBE_UNINITIALIZED + default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9 + help + GCC's -Wmaybe-uninitialized is not reliable by definition. + Lots of false positive warnings are produced in some cases. + + If this option is enabled, -Wno-maybe-uninitialzed is passed + to the compiler to suppress maybe-uninitialized warnings. + config CONSTRUCTORS bool depends on !UML @@ -1162,6 +1178,7 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size" + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Enabling this option will pass "-Os" instead of "-O2" to your compiler resulting in a smaller kernel. diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 3ec4922a2655..a2d799bd3ed7 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -359,6 +359,7 @@ config PROFILE_ANNOTATED_BRANCHES config PROFILE_ALL_BRANCHES bool "Profile all if conditionals" if !FORTIFY_SOURCE select TRACE_BRANCH_PROFILING + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help This tracer profiles all branch conditions. Every if () taken in the kernel is recorded whether it hit or miss. From dee8476dceaa444c8e9adfbefa92c06099bf2149 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Mon, 18 Mar 2019 17:10:05 -0400 Subject: [PATCH 365/439] Revert "kbuild: use -Oz instead of -Os when using clang" The clang option -Oz enables *aggressive* optimization for size, which doesn't necessarily result in smaller images, but can have negative impact on performance. Switch back to the less aggressive -Os. This reverts commit 6748cb3c299de1ffbe56733647b01dbcc398c419. Suggested-by: Peter Zijlstra Signed-off-by: Matthias Kaehlcke Reviewed-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0b447f94a64f..24d8126d3860 100644 --- a/Makefile +++ b/Makefile @@ -703,7 +703,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) +KBUILD_CFLAGS += -Os else KBUILD_CFLAGS += -O2 endif From 6841dbf410257264abd2f0698d723c98b1dbbadb Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 21 Aug 2019 02:09:40 +0900 Subject: [PATCH 366/439] kbuild,arc: add CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 for ARC arch/arc/Makefile overrides -O2 with -O3. This is the only user of ARCH_CFLAGS. There is no user of ARCH_CPPFLAGS or ARCH_AFLAGS. My plan is to remove ARCH_{CPP,A,C}FLAGS after refactoring the ARC Makefile. Currently, ARC has no way to enable -Wmaybe-uninitialized because both -O3 and -Os disable it. Enabling it will be useful for compile-testing. This commit allows allmodconfig (, which defaults to -O2) to enable it. Add CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y to all the defconfig files in arch/arc/configs/ in order to keep the current config settings. Signed-off-by: Masahiro Yamada Acked-by: Vineet Gupta --- Makefile | 10 ++++++---- arch/arc/Makefile | 8 -------- arch/arc/configs/axs101_defconfig | 1 + arch/arc/configs/axs103_defconfig | 1 + arch/arc/configs/axs103_smp_defconfig | 1 + arch/arc/configs/haps_hs_defconfig | 1 + arch/arc/configs/haps_hs_smp_defconfig | 1 + arch/arc/configs/hsdk_defconfig | 1 + arch/arc/configs/nps_defconfig | 1 + arch/arc/configs/nsim_700_defconfig | 1 + arch/arc/configs/nsim_hs_defconfig | 1 + arch/arc/configs/nsim_hs_smp_defconfig | 1 + arch/arc/configs/nsimosci_defconfig | 1 + arch/arc/configs/nsimosci_hs_defconfig | 1 + arch/arc/configs/nsimosci_hs_smp_defconfig | 1 + arch/arc/configs/tb10x_defconfig | 1 + arch/arc/configs/vdk_hs38_defconfig | 1 + arch/arc/configs/vdk_hs38_smp_defconfig | 1 + init/Kconfig | 12 ++++++++++-- 19 files changed, 32 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 24d8126d3860..702a3b3aeee0 100644 --- a/Makefile +++ b/Makefile @@ -702,10 +702,12 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) -ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += -Os -else -KBUILD_CFLAGS += -O2 +ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE +KBUILD_CFLAGS += -O2 +else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 +KBUILD_CFLAGS += -O3 +else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +KBUILD_CFLAGS += -Os endif ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 2917f56f0ea4..960635c20d5a 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -44,14 +44,6 @@ endif cfi := $(call as-instr,.cfi_startproc\n.cfi_endproc,-DARC_DW2_UNWIND_AS_CFI) cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi) -ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE -# Generic build system uses -O2, we want -O3 -# Note: No need to add to cflags-y as that happens anyways -# -# Disable the false maybe-uninitialized warings gcc spits out at -O3 -ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,) -endif - # small data is default for elf32 tool-chain. If not usable, disable it # This also allows repurposing GP as scratch reg to gcc reg allocator disable_small_data := y diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 5d5ba2104ba7..0ea55ce41966 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 0874db2d48a8..e4088688abb8 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index cf5df0e1cb08..01604b72aa42 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index aa8240a92b60..fb0487b0f63f 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index bc5a24ea6cf7..dcd1f70a3f70 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 4dac1169f528..274ad3e51ebf 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_RAM=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 9121c6ba15d0..8c014cb87146 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -6,6 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_SYSCTL_SYSCALL=y # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index cdb06417d3d9..5c67398ec494 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 217d7ea3c956..36af90920caf 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index e733e4f1a320..f3c8434a1f25 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index c4577bd9196c..13406a9dcaf9 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index b20692c82d3c..b9a1e9971ee5 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 5ad4949af6d0..ba6cf24ea9eb 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -9,6 +9,7 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_KPROBES=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 0130e29eeca1..7433ba017f9b 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -14,6 +14,7 @@ CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio" CONFIG_INITRAMFS_ROOT_UID=2100 CONFIG_INITRAMFS_ROOT_GID=501 # CONFIG_RD_GZIP is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 4587c9af5afe..df4a690ad26b 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 1855aa995bc9..0eca91d14fd6 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/init/Kconfig b/init/Kconfig index 0d2e3e6acc08..13476e15a1d7 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1170,14 +1170,22 @@ choice default CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE - bool "Optimize for performance" + bool "Optimize for performance (-O2)" help This is the default optimization level for the kernel, building with the "-O2" compiler flag for best performance and most helpful compile-time warnings. +config CC_OPTIMIZE_FOR_PERFORMANCE_O3 + bool "Optimize more for performance (-O3)" + depends on ARC + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives + help + Choosing this option will pass "-O3" to your compiler to optimize + the kernel yet more for performance. + config CC_OPTIMIZE_FOR_SIZE - bool "Optimize for size" + bool "Optimize for size (-Os)" imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Enabling this option will pass "-Os" instead of "-O2" to From e00b28d9f80f3f3b5f37073c33c2b3cddaacf078 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 9 May 2020 13:57:10 -0700 Subject: [PATCH 367/439] Stop the ad-hoc games with -Wno-maybe-initialized We have some rather random rules about when we accept the "maybe-initialized" warnings, and when we don't. For example, we consider it unreliable for gcc versions < 4.9, but also if -O3 is enabled, or if optimizing for size. And then various kernel config options disabled it, because they know that they trigger that warning by confusing gcc sufficiently (ie PROFILE_ALL_BRANCHES). And now gcc-10 seems to be introducing a lot of those warnings too, so it falls under the same heading as 4.9 did. At the same time, we have a very straightforward way to _enable_ that warning when wanted: use "W=2" to enable more warnings. So stop playing these ad-hoc games, and just disable that warning by default, with the known and straight-forward "if you want to work on the extra compiler warnings, use W=123". Would it be great to have code that is always so obvious that it never confuses the compiler whether a variable is used initialized or not? Yes, it would. In a perfect world, the compilers would be smarter, and our source code would be simpler. That's currently not the world we live in, though. Signed-off-by: Linus Torvalds --- Makefile | 7 +++---- init/Kconfig | 18 ------------------ kernel/trace/Kconfig | 1 - 3 files changed, 3 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index 702a3b3aeee0..b45176bb77d9 100644 --- a/Makefile +++ b/Makefile @@ -710,10 +710,6 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif -ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED -KBUILD_CFLAGS += -Wno-maybe-uninitialized -endif - # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) @@ -908,6 +904,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# Enabled with W=2, disabled by default as noisy +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) diff --git a/init/Kconfig b/init/Kconfig index 13476e15a1d7..fa13dc7a973e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -16,22 +16,6 @@ config DEFCONFIG_LIST default "$ARCH_DEFCONFIG" default "arch/$ARCH/defconfig" -config CC_HAS_WARN_MAYBE_UNINITIALIZED - def_bool $(cc-option,-Wmaybe-uninitialized) - help - GCC >= 4.7 supports this option. - -config CC_DISABLE_WARN_MAYBE_UNINITIALIZED - bool - depends on CC_HAS_WARN_MAYBE_UNINITIALIZED - default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9 - help - GCC's -Wmaybe-uninitialized is not reliable by definition. - Lots of false positive warnings are produced in some cases. - - If this option is enabled, -Wno-maybe-uninitialzed is passed - to the compiler to suppress maybe-uninitialized warnings. - config CONSTRUCTORS bool depends on !UML @@ -1179,14 +1163,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" depends on ARC - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size (-Os)" - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Enabling this option will pass "-Os" instead of "-O2" to your compiler resulting in a smaller kernel. diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a2d799bd3ed7..3ec4922a2655 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -359,7 +359,6 @@ config PROFILE_ANNOTATED_BRANCHES config PROFILE_ALL_BRANCHES bool "Profile all if conditionals" if !FORTIFY_SOURCE select TRACE_BRANCH_PROFILING - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help This tracer profiles all branch conditions. Every if () taken in the kernel is recorded whether it hit or miss. From fdad8225150301925e589b872fbf03fdd889cb47 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 17 Jun 2020 01:16:39 +0300 Subject: [PATCH 368/439] Kconfig: allow CC_OPTIMIZE_FOR_PERFORMANCE_O3 on all arches Signed-off-by: Denis Efremov --- init/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index fa13dc7a973e..c3c5710a9a47 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1162,7 +1162,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" - depends on ARC help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. From 2cbde01146668160d8726c0aec6610104b92fd1d Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 14 Jan 2020 18:53:41 +0100 Subject: [PATCH 369/439] scripts/dtc: Remove redundant YYLOC global declaration gcc 10 will default to -fno-common, which causes this error at link time: (.text+0x0): multiple definition of `yylloc'; dtc-lexer.lex.o (symbol from plugin):(.text+0x0): first defined here This is because both dtc-lexer as well as dtc-parser define the same global symbol yyloc. Before with -fcommon those were merged into one defintion. The proper solution would be to to mark this as "extern", however that leads to: dtc-lexer.l:26:16: error: redundant redeclaration of 'yylloc' [-Werror=redundant-decls] 26 | extern YYLTYPE yylloc; | ^~~~~~ In file included from dtc-lexer.l:24: dtc-parser.tab.h:127:16: note: previous declaration of 'yylloc' was here 127 | extern YYLTYPE yylloc; | ^~~~~~ cc1: all warnings being treated as errors which means the declaration is completely redundant and can just be dropped. Signed-off-by: Dirk Mueller Signed-off-by: David Gibson [robh: cherry-pick from upstream] Cc: stable@vger.kernel.org Signed-off-by: Rob Herring [evdenis: scripts/dtc/dtc-parser.tab.c_shipped fixed] Signed-off-by: Denis Efremov --- scripts/dtc/dtc-lexer.l | 1 - scripts/dtc/dtc-lexer.lex.c_shipped | 1 - 2 files changed, 2 deletions(-) diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l index c600603044f3..cf7707be43aa 100644 --- a/scripts/dtc/dtc-lexer.l +++ b/scripts/dtc/dtc-lexer.l @@ -38,7 +38,6 @@ LINECOMMENT "//".*\n #include "srcpos.h" #include "dtc-parser.tab.h" -YYLTYPE yylloc; extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped index e0835ad4a848..a2a93b35f961 100644 --- a/scripts/dtc/dtc-lexer.lex.c_shipped +++ b/scripts/dtc/dtc-lexer.lex.c_shipped @@ -646,7 +646,6 @@ char *yytext; #include "srcpos.h" #include "dtc-parser.tab.h" -YYLTYPE yylloc; extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ From edfaf32d1649f92b5395c48da5df75b085c1d623 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 11 Aug 2020 12:12:56 +0300 Subject: [PATCH 370/439] fs/proc: hide magisk mounts for IsolatedService The issue described here: https://darvincitech.wordpress.com/2019/11/04/detecting-magisk-hide/ Signed-off-by: Denis Efremov --- fs/proc/Kconfig | 4 ++++ fs/proc_namespace.c | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 5e14d454c865..17e26e132a36 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -99,6 +99,10 @@ config PROC_CHILDREN Say Y if you are running any user-space software which takes benefit from this interface. For example, rkt is such a piece of software. +config PROC_MAGISK_HIDE_MOUNT + bool "Hide magisk mounts for IsolatedService" + default n + config PROC_UID bool "Include /proc/uid/ files" default y diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index b859aaeecb27..6fec2a071ec3 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -94,6 +94,21 @@ static void show_type(struct seq_file *m, struct super_block *sb) } } +static inline int skip_magisk_entry(const char *devname) +{ +#ifdef CONFIG_PROC_MAGISK_HIDE_MOUNT + if (devname && strstr(devname, "magisk")) { + char name[TASK_COMM_LEN]; + get_task_comm(name, current); + if (strstr(name, "Binder") || + strstr(name, "JavaBridge")) { + return SEQ_SKIP; + } + } +#endif + return 0; +} + static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) { struct proc_mounts *p = m->private; @@ -107,6 +122,9 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) if (err) goto out; } else { + err = skip_magisk_entry(r->mnt_devname); + if (err) + goto out; mangle(m, r->mnt_devname ? r->mnt_devname : "none"); } seq_putc(m, ' '); @@ -179,6 +197,9 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) if (err) goto out; } else { + err = skip_magisk_entry(r->mnt_devname); + if (err) + goto out; mangle(m, r->mnt_devname ? r->mnt_devname : "none"); } seq_puts(m, sb_rdonly(sb) ? " ro" : " rw"); @@ -210,6 +231,10 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) goto out; } else { if (r->mnt_devname) { + err = skip_magisk_entry(r->mnt_devname); + if (err) + goto out; + seq_puts(m, "device "); mangle(m, r->mnt_devname); } else From e4c8183ab7039a28fa1e17c0a54c7832c0d2d594 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 1 May 2019 11:05:41 -0700 Subject: [PATCH 371/439] gcc-9: silence 'address-of-packed-member' warning commit 6f303d60534c46aa1a239f29c321f95c83dda748 upstream. We already did this for clang, but now gcc has that warning too. Yes, yes, the address may be unaligned. And that's kind of the point. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b45176bb77d9..0a0c52b66ca0 100644 --- a/Makefile +++ b/Makefile @@ -700,6 +700,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE @@ -761,7 +762,6 @@ ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) From 2994219aec12fbdd583ceb03f4ded92eaa8d3317 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Fri, 8 Feb 2019 23:51:05 +0100 Subject: [PATCH 372/439] Compiler Attributes: add support for __copy (gcc >= 9) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c0d9782f5b6d7157635ae2fd782a4b27d55a6013 upstream. From the GCC manual: copy copy(function) The copy attribute applies the set of attributes with which function has been declared to the declaration of the function to which the attribute is applied. The attribute is designed for libraries that define aliases or function resolvers that are expected to specify the same set of attributes as their targets. The copy attribute can be used with functions, variables, or types. However, the kind of symbol to which the attribute is applied (either function or variable) must match the kind of symbol to which the argument refers. The copy attribute copies only syntactic and semantic attributes but not attributes that affect a symbol’s linkage or visibility such as alias, visibility, or weak. The deprecated attribute is also not copied. https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html The upcoming GCC 9 release extends the -Wmissing-attributes warnings (enabled by -Wall) to C and aliases: it warns when particular function attributes are missing in the aliases but not in their target, e.g.: void __cold f(void) {} void __alias("f") g(void); diagnoses: warning: 'g' specifies less restrictive attribute than its target 'f': 'cold' [-Wmissing-attributes] Using __copy(f) we can copy the __cold attribute from f to g: void __cold f(void) {} void __copy(f) __alias("f") g(void); This attribute is most useful to deal with situations where an alias is declared but we don't know the exact attributes the target has. For instance, in the kernel, the widely used module_init/exit macros define the init/cleanup_module aliases, but those cannot be marked always as __init/__exit since some modules do not have their functions marked as such. Suggested-by: Martin Sebor Reviewed-by: Nick Desaulniers Signed-off-by: Miguel Ojeda Signed-off-by: Stefan Agner Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler-gcc.h | 4 ++++ include/linux/compiler_types.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 4816355b9875..6d7ead22c1b4 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -343,6 +343,10 @@ #define __designated_init __attribute__((designated_init)) #endif +#if GCC_VERSION >= 90100 +#define __copy(symbol) __attribute__((__copy__(symbol))) +#endif + #endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e9ce90615869..a207f820d3b0 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -230,6 +230,10 @@ struct ftrace_likely_data { # define __latent_entropy #endif +#ifndef __copy +# define __copy(symbol) +#endif + #ifndef __randomize_layout # define __randomize_layout __designated_init #endif From 34f804130461906292f28e2f46813942c928be35 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Sat, 19 Jan 2019 20:59:34 +0100 Subject: [PATCH 373/439] include/linux/module.h: copy __init/__exit attrs to init/cleanup_module commit a6e60d84989fa0e91db7f236eda40453b0e44afa upstream. The upcoming GCC 9 release extends the -Wmissing-attributes warnings (enabled by -Wall) to C and aliases: it warns when particular function attributes are missing in the aliases but not in their target. In particular, it triggers for all the init/cleanup_module aliases in the kernel (defined by the module_init/exit macros), ending up being very noisy. These aliases point to the __init/__exit functions of a module, which are defined as __cold (among other attributes). However, the aliases themselves do not have the __cold attribute. Since the compiler behaves differently when compiling a __cold function as well as when compiling paths leading to calls to __cold functions, the warning is trying to point out the possibly-forgotten attribute in the alias. In order to keep the warning enabled, we decided to silence this case. Ideally, we would mark the aliases directly as __init/__exit. However, there are currently around 132 modules in the kernel which are missing __init/__exit in their init/cleanup functions (either because they are missing, or for other reasons, e.g. the functions being called from somewhere else); and a section mismatch is a hard error. A conservative alternative was to mark the aliases as __cold only. However, since we would like to eventually enforce __init/__exit to be always marked, we chose to use the new __copy function attribute (introduced by GCC 9 as well to deal with this). With it, we copy the attributes used by the target functions into the aliases. This way, functions that were not marked as __init/__exit won't have their aliases marked either, and therefore there won't be a section mismatch. Note that the warning would go away marking either the extern declaration, the definition, or both. However, we only mark the definition of the alias, since we do not want callers (which only see the declaration) to be compiled as if the function was __cold (and therefore the paths leading to those calls would be assumed to be unlikely). Link: https://lore.kernel.org/lkml/20190123173707.GA16603@gmail.com/ Link: https://lore.kernel.org/lkml/20190206175627.GA20399@gmail.com/ Suggested-by: Martin Sebor Acked-by: Jessica Yu Signed-off-by: Miguel Ojeda Signed-off-by: Stefan Agner Signed-off-by: Greg Kroah-Hartman --- include/linux/module.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/module.h b/include/linux/module.h index 2d01e23c34a9..96c51179d500 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -129,13 +129,13 @@ extern void cleanup_module(void); #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ - int init_module(void) __attribute__((alias(#initfn))); + int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ - void cleanup_module(void) __attribute__((alias(#exitfn))); + void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); #endif From 3982af956edcb24f3e0f866f2b54a45fd68685df Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 11 Jun 2019 11:43:31 -0700 Subject: [PATCH 374/439] kbuild: Add -Werror=unknown-warning-option to CLANG_FLAGS [ Upstream commit 589834b3a0097a4908f4112eac0ca2feb486fa32 ] In commit ebcc5928c5d9 ("arm64: Silence gcc warnings about arch ABI drift"), the arm64 Makefile added -Wno-psabi to KBUILD_CFLAGS, which is a GCC only option so clang rightfully complains: warning: unknown warning option '-Wno-psabi' [-Wunknown-warning-option] https://clang.llvm.org/docs/DiagnosticsReference.html#wunknown-warning-option However, by default, this is merely a warning so the build happily goes on with a slew of these warnings in the process. Commit c3f0d0bc5b01 ("kbuild, LLVMLinux: Add -Werror to cc-option to support clang") worked around this behavior in cc-option by adding -Werror so that unknown flags cause an error. However, this all happens silently and when an unknown flag is added to the build unconditionally like -Wno-psabi, cc-option will always fail because there is always an unknown flag in the list of flags. This manifested as link time failures in the arm64 libstub because -fno-stack-protector didn't get added to KBUILD_CFLAGS. To avoid these weird cryptic failures in the future, make clang behave like gcc and immediately error when it encounters an unknown flag by adding -Werror=unknown-warning-option to CLANG_FLAGS. This can be added unconditionally for clang because it is supported by at least 3.0.0, according to godbolt [1] and 4.0.0, according to its documentation [2], which is far earlier than we typically support. [1]: https://godbolt.org/z/7F7rm3 [2]: https://releases.llvm.org/4.0.0/tools/clang/docs/DiagnosticsReference.html#wunknown-warning-option Link: https://github.com/ClangBuiltLinux/linux/issues/511 Link: https://github.com/ClangBuiltLinux/linux/issues/517 Suggested-by: Peter Smith Signed-off-by: Nathan Chancellor Tested-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 0a0c52b66ca0..c581e9fdd4e8 100644 --- a/Makefile +++ b/Makefile @@ -519,6 +519,7 @@ ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif CLANG_FLAGS += -no-integrated-as +CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) export CLANG_FLAGS From cef9d1e62844927d3ad2263dc8aa58e6ae69e693 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 29 Jul 2019 18:15:17 +0900 Subject: [PATCH 375/439] kbuild: initialize CLANG_FLAGS correctly in the top Makefile commit 5241ab4cf42d3a93b933b55d3d53f43049081fa1 upstream. CLANG_FLAGS is initialized by the following line: CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) ..., which is run only when CROSS_COMPILE is set. Some build targets (bindeb-pkg etc.) recurse to the top Makefile. When you build the kernel with Clang but without CROSS_COMPILE, the same compiler flags such as -no-integrated-as are accumulated into CLANG_FLAGS. If you run 'make CC=clang' and then 'make CC=clang bindeb-pkg', Kbuild will recompile everything needlessly due to the build command change. Fix this by correctly initializing CLANG_FLAGS. Fixes: 238bcbc4e07f ("kbuild: consolidate Clang compiler flags") Cc: # v5.0+ Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Acked-by: Nick Desaulniers Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c581e9fdd4e8..c51396defc63 100644 --- a/Makefile +++ b/Makefile @@ -434,6 +434,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds LDFLAGS := GCC_PLUGINS_CFLAGS := +CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES @@ -510,7 +511,7 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) CLANG_TRIPLE ?= $(CROSS_COMPILE) -CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%)) +CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) From a5bcea1d03041c4d4edb1d484399fc864e97a11b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 30 Mar 2018 13:15:26 +0900 Subject: [PATCH 376/439] kbuild: use -fmacro-prefix-map to make __FILE__ a relative path [ Upstream commit a73619a845d5625079cc1b3b820f44c899618388 ] The __FILE__ macro is used everywhere in the kernel to locate the file printing the log message, such as WARN_ON(), etc. If the kernel is built out of tree, this can be a long absolute path, like this: WARNING: CPU: 1 PID: 1 at /path/to/build/directory/arch/arm64/kernel/foo.c:... This is because Kbuild runs in the objtree instead of the srctree, then __FILE__ is expanded to a file path prefixed with $(srctree)/. Commit 9da0763bdd82 ("kbuild: Use relative path when building in a subdir of the source tree") improved this to some extent; $(srctree) becomes ".." if the objtree is a child of the srctree. For other cases of out-of-tree build, __FILE__ is still the absolute path. It also means the kernel image depends on where it was built. A brand-new option from GCC, -fmacro-prefix-map, solves this problem. If your compiler supports it, __FILE__ is the relative path from the srctree regardless of O= option. This provides more readable log and more reproducible builds. Please note __FILE__ is always an absolute path for external modules. Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index c51396defc63..eb66e9ee2ce2 100644 --- a/Makefile +++ b/Makefile @@ -942,6 +942,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) # Require designated initializers for all marked structures KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) +# change __FILE__ to the relative path from the srctree +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) From a33e8f2ec01da8b804349ac987594d166dd803cc Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Wed, 17 Jul 2019 11:06:26 -0500 Subject: [PATCH 377/439] kbuild: add -fcf-protection=none when using retpoline flags [ Upstream commit 29be86d7f9cb18df4123f309ac7857570513e8bc ] The gcc -fcf-protection=branch option is not compatible with -mindirect-branch=thunk-extern. The latter is used when CONFIG_RETPOLINE is selected, and this will fail to build with a gcc which has -fcf-protection=branch enabled by default. Adding -fcf-protection=none when building with retpoline enabled prevents such build failures. Signed-off-by: Seth Forshee Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index eb66e9ee2ce2..94caddcfe41b 100644 --- a/Makefile +++ b/Makefile @@ -945,6 +945,12 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) # change __FILE__ to the relative path from the srctree KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) +# ensure -fcf-protection is disabled when using retpoline as it is +# incompatible with -mindirect-branch=thunk-extern +ifdef CONFIG_RETPOLINE +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +endif + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) From bbfa585058ee5b088954f0bdc5c1cff7865d28ad Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 22 Nov 2018 08:11:54 +0900 Subject: [PATCH 378/439] kbuild: fix single target build for external module [ Upstream commit e07db28eea38ed4e332b3a89f3995c86b713cb5b ] Building a single target in an external module fails due to missing .tmp_versions directory. For example, $ make -C /lib/modules/$(uname -r)/build M=$PWD foo.o will fail in the following way: CC [M] /home/masahiro/foo/foo.o /bin/sh: 1: cannot create /home/masahiro/foo/.tmp_versions/foo.mod: Directory nonexistent This is because $(cmd_crmodverdir) is executed only before building /, %/, %.ko single targets of external modules. Create .tmp_versions in the 'prepare' target. Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin --- Makefile | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 94caddcfe41b..dd4f8550f8fe 100644 --- a/Makefile +++ b/Makefile @@ -1668,9 +1668,6 @@ else # KBUILD_EXTMOD # We are always building modules KBUILD_MODULES := 1 -PHONY += crmodverdir -crmodverdir: - $(cmd_crmodverdir) PHONY += $(objtree)/Module.symvers $(objtree)/Module.symvers: @@ -1682,7 +1679,7 @@ $(objtree)/Module.symvers: module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) PHONY += $(module-dirs) modules -$(module-dirs): crmodverdir $(objtree)/Module.symvers +$(module-dirs): prepare $(objtree)/Module.symvers $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) modules: $(module-dirs) @@ -1723,7 +1720,8 @@ help: # Dummies... PHONY += prepare scripts -prepare: ; +prepare: + $(cmd_crmodverdir) scripts: ; endif # KBUILD_EXTMOD @@ -1849,17 +1847,14 @@ endif # Modules /: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) # Make sure the latest headers are built for Documentation Documentation/ samples/: headers_install %/: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) %.ko: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) $(@:.ko=.o) $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost From 687e1592f666208724d951edb35d766dedc1f7b8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 15 Jan 2019 16:19:00 +0900 Subject: [PATCH 379/439] kbuild: mark prepare0 as PHONY to fix external module build [ Upstream commit e00d8880481497474792d28c14479a9fb6752046 ] Commit c3ff2a5193fa ("powerpc/32: add stack protector support") caused kernel panic on PowerPC when an external module is used with CONFIG_STACKPROTECTOR because the 'prepare' target was not executed for the external module build. Commit e07db28eea38 ("kbuild: fix single target build for external module") turned it into a build error because the 'prepare' target is now executed but the 'prepare0' target is missing for the external module build. External module on arm/arm64 with CONFIG_STACKPROTECTOR_PER_TASK is also broken in the same way. Move 'PHONY += prepare0' to the common place. GNU Make is fine with missing rule for phony targets. I also removed the comment which is wrong irrespective of this commit. I minimize the change so it can be easily backported to 4.20.x To fix v4.20, please backport e07db28eea38 ("kbuild: fix single target build for external module"), and then this commit. Link: https://bugzilla.kernel.org/show_bug.cgi?id=201891 Fixes: e07db28eea38 ("kbuild: fix single target build for external module") Fixes: c3ff2a5193fa ("powerpc/32: add stack protector support") Fixes: 189af4657186 ("ARM: smp: add support for per-task stack canaries") Fixes: 0a1213fa7432 ("arm64: enable per-task stack canaries") Cc: linux-stable # v4.20 Reported-by: Samuel Holland Reported-by: Alexey Kardashevskiy Signed-off-by: Masahiro Yamada Acked-by: Ard Biesheuvel Tested-by: Alexey Kardashevskiy Signed-off-by: Sasha Levin --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index dd4f8550f8fe..93adda2a63d5 100644 --- a/Makefile +++ b/Makefile @@ -1088,6 +1088,7 @@ ifdef CONFIG_STACK_VALIDATION endif endif +PHONY += prepare0 ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1182,8 +1183,7 @@ include/config/kernel.release: include/config/auto.conf FORCE # archprepare is used in arch Makefiles and when processed asm symlink, # version.h and scripts_basic is processed / created. -# Listed in dependency order -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 +PHONY += prepare archprepare prepare1 prepare2 prepare3 # prepare3 is used to check if we are building in a separate output directory, # and if so do: From 18118b9da4da2ac71f8d2d57493d547f13742885 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 9 May 2020 14:30:29 -0700 Subject: [PATCH 380/439] gcc-10: disable 'zero-length-bounds' warning for now commit 5c45de21a2223fe46cf9488c99a7fbcf01527670 upstream. This is a fine warning, but we still have a number of zero-length arrays in the kernel that come from the traditional gcc extension. Yes, they are getting converted to flexible arrays, but in the meantime the gcc-10 warning about zero-length bounds is very verbose, and is hiding other issues. I missed one actual build failure because it was hidden among hundreds of lines of warning. Thankfully I caught it on the second go before pushing things out, but it convinced me that I really need to disable the new warnings for now. We'll hopefully be all done with our conversion to flexible arrays in the not too distant future, and we can then re-enable this warning. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 93adda2a63d5..72148229fa17 100644 --- a/Makefile +++ b/Makefile @@ -906,6 +906,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# We'll want to enable this eventually, but it's not going away for 5.7 at least +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) + # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From 6e556e755fbcbaa9479bdf32a602c783c9fe4c65 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 9 May 2020 14:52:44 -0700 Subject: [PATCH 381/439] gcc-10: disable 'array-bounds' warning for now commit 44720996e2d79e47d508b0abe99b931a726a3197 upstream. This is another fine warning, related to the 'zero-length-bounds' one, but hitting the same historical code in the kernel. Because C didn't historically support flexible array members, we have code that instead uses a one-sized array, the same way we have cases of zero-sized arrays. The one-sized arrays come from either not wanting to use the gcc zero-sized array extension, or from a slight convenience-feature, where particularly for strings, the size of the structure now includes the allocation for the final NUL character. So with a "char name[1];" at the end of a structure, you can do things like v = my_malloc(sizeof(struct vendor) + strlen(name)); and avoid the "+1" for the terminator. Yes, the modern way to do that is with a flexible array, and using 'offsetof()' instead of 'sizeof()', and adding the "+1" by hand. That also technically gets the size "more correct" in that it avoids any alignment (and thus padding) issues, but this is another long-term cleanup thing that will not happen for 5.7. So disable the warning for now, even though it's potentially quite useful. Having a slew of warnings that then hide more urgent new issues is not an improvement. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 72148229fa17..f18bbc947921 100644 --- a/Makefile +++ b/Makefile @@ -908,6 +908,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) # We'll want to enable this eventually, but it's not going away for 5.7 at least KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From ae4ff0218274e4a0ab3059931de10f2c875d8e7d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 9 May 2020 15:40:52 -0700 Subject: [PATCH 382/439] gcc-10: disable 'stringop-overflow' warning for now commit 5a76021c2eff7fcf2f0918a08fd8a37ce7922921 upstream. This is the final array bounds warning removal for gcc-10 for now. Again, the warning is good, and we should re-enable all these warnings when we have converted all the legacy array declaration cases to flexible arrays. But in the meantime, it's just noise. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index f18bbc947921..51bb24ef3599 100644 --- a/Makefile +++ b/Makefile @@ -909,6 +909,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) # We'll want to enable this eventually, but it's not going away for 5.7 at least KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From 399de985cd5bdd42e732b7248044c3ba4d6332b4 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 9 May 2020 15:45:21 -0700 Subject: [PATCH 383/439] gcc-10: disable 'restrict' warning for now commit adc71920969870dfa54e8f40dac8616284832d02 upstream. gcc-10 now warns about passing aliasing pointers to functions that take restricted pointers. That's actually a great warning, and if we ever start using 'restrict' in the kernel, it might be quite useful. But right now we don't, and it turns out that the only thing this warns about is an idiom where we have declared a few functions to be "printf-like" (which seems to make gcc pick up the restricted pointer thing), and then we print to the same buffer that we also use as an input. And people do that as an odd concatenation pattern, with code like this: #define sysfs_show_gen_prop(buffer, fmt, ...) \ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__) where we have 'buffer' as both the destination of the final result, and as the initial argument. Yes, it's a bit questionable. And outside of the kernel, people do have standard declarations like int snprintf( char *restrict buffer, size_t bufsz, const char *restrict format, ... ); where that output buffer is marked as a restrict pointer that cannot alias with any other arguments. But in the context of the kernel, that 'use snprintf() to concatenate to the end result' does work, and the pattern shows up in multiple places. And we have not marked our own version of snprintf() as taking restrict pointers, so the warning is incorrect for now, and gcc picks it up on its own. If we do start using 'restrict' in the kernel (and it might be a good idea if people find places where it matters), we'll need to figure out how to avoid this issue for snprintf and friends. But in the meantime, this warning is not useful. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 51bb24ef3599..a817a4e2de53 100644 --- a/Makefile +++ b/Makefile @@ -911,6 +911,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) +# Another good warning that we'll want to enable eventually +KBUILD_CFLAGS += $(call cc-disable-warning, restrict) + # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From 2e390dfe022947dbfaed5a3a50421db524608e52 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Tue, 17 Mar 2020 00:07:18 +0000 Subject: [PATCH 384/439] Makefile: disallow data races on gcc-10 as well commit b1112139a103b4b1101d0d2d72931f2d33d8c978 upstream. gcc-10 will rename --param=allow-store-data-races=0 to -fno-allow-store-data-races. The flag change happened at https://gcc.gnu.org/PR92046. Signed-off-by: Sergei Trofimovich Acked-by: Jiri Kosina Signed-off-by: Masahiro Yamada Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index a817a4e2de53..3ba2aaaa19e8 100644 --- a/Makefile +++ b/Makefile @@ -715,6 +715,7 @@ endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) # check for 'asm goto' ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) From b28a59aec90a942473399eae67285314db43d8c7 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 10 Apr 2020 14:34:10 -0700 Subject: [PATCH 385/439] kernel/gcov/fs.c: gcov_seq_next() should increase position index [ Upstream commit f4d74ef6220c1eda0875da30457bef5c7111ab06 ] If seq_file .next function does not change position index, read after some lseek can generate unexpected output. https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Signed-off-by: Andrew Morton Acked-by: Peter Oberparleiter Cc: Al Viro Cc: Davidlohr Bueso Cc: Ingo Molnar Cc: Manfred Spraul Cc: NeilBrown Cc: Steven Rostedt Cc: Waiman Long Link: http://lkml.kernel.org/r/f65c6ee7-bd00-f910-2f8a-37cc67e4ff88@virtuozzo.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/gcov/fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 6e40ff6be083..291e0797125b 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -109,9 +109,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; + (*pos)++; if (gcov_iter_next(iter)) return NULL; - (*pos)++; return iter; } From 0012c5cf52fe8052bb9cc60c588c6c5e842290bc Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Fri, 4 Sep 2020 18:58:08 +0300 Subject: [PATCH 386/439] gcov: Disable gcov build with GCC 10 [ Upstream commit cfc905f158eaa099d6258031614d11869e7ef71c ] GCOV built with GCC 10 doesn't initialize n_function variable. This produces different kernel panics as was seen by Colin in Ubuntu and me in FC 32. As a workaround, let's disable GCOV build for broken GCC 10 version. Link: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1891288 Link: https://lore.kernel.org/lkml/20200827133932.3338519-1-leon@kernel.org Link: https://lore.kernel.org/lkml/CAHk-=whbijeSdSvx-Xcr0DPMj0BiwhJ+uiNnDSVZcr_h_kg7UA@mail.gmail.com/ Cc: Colin Ian King Signed-off-by: Leon Romanovsky Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/gcov/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 1276aabaab55..1d78ed19a351 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -3,6 +3,7 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS + depends on !CC_IS_GCC || GCC_VERSION < 100000 select CONSTRUCTORS if !UML default n ---help--- From ffd0fd6315d4a1d03874741e8fde67ec1ed43408 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Thu, 10 Sep 2020 14:52:01 +0200 Subject: [PATCH 387/439] gcov: add support for GCC 10.1 [ Upstream commit 40249c6962075c040fd071339acae524f18bfac9 ] Using gcov to collect coverage data for kernels compiled with GCC 10.1 causes random malfunctions and kernel crashes. This is the result of a changed GCOV_COUNTERS value in GCC 10.1 that causes a mismatch between the layout of the gcov_info structure created by GCC profiling code and the related structure used by the kernel. Fix this by updating the in-kernel GCOV_COUNTERS value. Also re-enable config GCOV_KERNEL for use with GCC 10. Reported-by: Colin Ian King Reported-by: Leon Romanovsky Signed-off-by: Peter Oberparleiter Tested-by: Leon Romanovsky Tested-and-Acked-by: Colin Ian King Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/gcov/Kconfig | 1 - kernel/gcov/gcc_4_7.c | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 1d78ed19a351..1276aabaab55 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -3,7 +3,6 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS - depends on !CC_IS_GCC || GCC_VERSION < 100000 select CONSTRUCTORS if !UML default n ---help--- diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ca5e5c0ef853..5b9e76117ded 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 From 08d429cbec4e679c25ebf98dc9cd76950b1b6617 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 31 May 2020 17:47:06 +0900 Subject: [PATCH 388/439] kbuild: force to build vmlinux if CONFIG_MODVERSION=y commit 4b50c8c4eaf06a825d1c005c0b1b4a8307087b83 upstream. This code does not work as stated in the comment. $(CONFIG_MODVERSIONS) is always empty because it is expanded before include/config/auto.conf is included. Hence, 'make modules' with CONFIG_MODVERSION=y cannot record the version CRCs. This has been broken since 2003, commit ("kbuild: Enable modules to be build using the "make dir/" syntax"). [1] [1]: https://git.kernel.org/pub/scm/linux/kernel/git/history/history.git/commit/?id=15c6240cdc44bbeef3c4797ec860f9765ef4f1a7 Cc: linux-stable # v2.5.71+ Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- Makefile | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 3ba2aaaa19e8..10ce0697fb9c 100644 --- a/Makefile +++ b/Makefile @@ -573,12 +573,8 @@ KBUILD_MODULES := KBUILD_BUILTIN := 1 # If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) + KBUILD_BUILTIN := endif # If we have "make modules", compile modules @@ -1391,6 +1387,13 @@ ifdef CONFIG_MODULES all: modules +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. +ifdef CONFIG_MODVERSIONS + KBUILD_BUILTIN := 1 +endif + # Build modules # # A module can be listed more than once in obj-m resulting in From 204fa39b7b5aaa334ab3f6992965cd3226ccc161 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Tue, 21 Jul 2020 10:31:23 -0700 Subject: [PATCH 389/439] Makefile: Fix GCC_TOOLCHAIN_DIR prefix for Clang cross compilation commit ca9b31f6bb9c6aa9b4e5f0792f39a97bbffb8c51 upstream. When CROSS_COMPILE is set (e.g. aarch64-linux-gnu-), if $(CROSS_COMPILE)elfedit is found at /usr/bin/aarch64-linux-gnu-elfedit, GCC_TOOLCHAIN_DIR will be set to /usr/bin/. --prefix= will be set to /usr/bin/ and Clang as of 11 will search for both $(prefix)aarch64-linux-gnu-$needle and $(prefix)$needle. GCC searchs for $(prefix)aarch64-linux-gnu/$version/$needle, $(prefix)aarch64-linux-gnu/$needle and $(prefix)$needle. In practice, $(prefix)aarch64-linux-gnu/$needle rarely contains executables. To better model how GCC's -B/--prefix takes in effect in practice, newer Clang (since https://github.com/llvm/llvm-project/commit/3452a0d8c17f7166f479706b293caf6ac76ffd90) only searches for $(prefix)$needle. Currently it will find /usr/bin/as instead of /usr/bin/aarch64-linux-gnu-as. Set --prefix= to $(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) (/usr/bin/aarch64-linux-gnu-) so that newer Clang can find the appropriate cross compiling GNU as (when -no-integrated-as is in effect). Cc: stable@vger.kernel.org Reported-by: Nathan Chancellor Signed-off-by: Fangrui Song Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Link: https://github.com/ClangBuiltLinux/linux/issues/1099 Reviewed-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 10ce0697fb9c..b19ec32d5933 100644 --- a/Makefile +++ b/Makefile @@ -513,7 +513,7 @@ ifneq ($(CROSS_COMPILE),) CLANG_TRIPLE ?= $(CROSS_COMPILE) CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) From 02fe4e89bfae22c06fbe07def0a4d3478f67567c Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 27 Jun 2019 12:14:48 -0700 Subject: [PATCH 390/439] kbuild: Add ability to test Clang's integrated assembler There are some people interested in experimenting with Clang's integrated assembler. To make it easy to do so without source modification, allow the user to specify 'AS=clang' as part of the make command to avoid adding '-no-integrated-as' to the {A,C}FLAGS. Link: https://github.com/ClangBuiltLinux/linux/issues/577 Suggested-by: Dmitry Golovin Signed-off-by: Nathan Chancellor Reviewed-by: Nick Desaulniers Tested-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index b19ec32d5933..1110b035853e 100644 --- a/Makefile +++ b/Makefile @@ -519,7 +519,9 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif +ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as +endif CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) From e7b0ce3fb59c85dad87405cd43b603b71f9fb10a Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 9 May 2019 04:48:25 -0700 Subject: [PATCH 391/439] kbuild: Don't try to add '-fcatch-undefined-behavior' flag This is no longer a valid option in clang, it was removed in 3.5, which we don't support. https://github.com/llvm/llvm-project/commit/cb3f812b6b9fab8f3b41414f24e90222170417b4 Signed-off-by: Nathan Chancellor Reviewed-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 1110b035853e..a07fbe5dc830 100644 --- a/Makefile +++ b/Makefile @@ -770,7 +770,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # source of a reference will be _MergedGlobals and not on of the whitelisted names. # See modpost pattern 2 KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) -KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) else # These warnings generated too much noise in a regular build. From 9846e1c50395f0f03ebbb2cca6e8f236d35beda6 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 12 Apr 2018 14:21:54 -0500 Subject: [PATCH 392/439] objtool: Support HOSTCFLAGS and HOSTLDFLAGS It may be useful to compile host programs with different flags (e.g. hardening). Ensure that objtool picks up the appropriate flags. Signed-off-by: Laura Abbott Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Masahiro Yamada Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/05a360681176f1423cb2fde8faae3a0a0261afc5.1523560825.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- tools/objtool/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 8ae824dbfca3..f76d9914686a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ -I$(srctree)/tools/objtool/arch/$(ARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) -LDFLAGS += -lelf $(LIBSUBCMD) +CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) +LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) # Allow old libelf to be used: elfshdr := $(shell echo '$(pound)include ' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) From 0762c2b402bb00ea4e934dec4e74898e4df2f518 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 9 Jul 2018 17:45:56 -0700 Subject: [PATCH 393/439] tools: build: Fixup host c flags Commit 0c3b7e42616f ("tools build: Add support for host programs format") introduced host_c_flags which referenced CHOSTFLAGS. The actual name of the variable is HOSTCFLAGS. Fix this up. Fixes: 0c3b7e42616f ("tools build: Add support for host programs format") Signed-off-by: Laura Abbott Acked-by: Jiri Olsa Signed-off-by: Masahiro Yamada --- tools/build/Build.include | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build/Build.include b/tools/build/Build.include index d9048f145f97..950c1504ca37 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX ### ## HOSTCC C flags -host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj)) +host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) From 33ec6e64098dc99d672b92f0942e14c23eaaff35 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 14:57:18 +0900 Subject: [PATCH 394/439] kbuild: remove AS variable As commit 5ef872636ca7 ("kbuild: get rid of misleading $(AS) from documents") noted, we rarely use $(AS) directly in the kernel build. Now that the only/last user of $(AS) in drivers/net/wan/Makefile was converted to $(CC), $(AS) is no longer used in the build process. You can still pass in AS=clang, which is just a switch to turn on the LLVM integrated assembler. Signed-off-by: Masahiro Yamada Reviewed-by: Nick Desaulniers Tested-by: Nick Desaulniers Reviewed-by: Nathan Chancellor --- Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a07fbe5dc830..039812fbb763 100644 --- a/Makefile +++ b/Makefile @@ -372,7 +372,6 @@ HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOST_LOADLIBES := $(HOST_LFS_LIBS) # Make variables (CC, etc...) -AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld LDGOLD = $(CROSS_COMPILE)ld.gold CC = $(CROSS_COMPILE)gcc @@ -436,7 +435,7 @@ LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC +export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS @@ -519,7 +518,7 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) +ifeq ($(if $(AS),$(shell $(AS) --version 2>&1 | head -n 1 | grep clang)),) CLANG_FLAGS += -no-integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option From 5099425f0e3342738afc6aa540f3f65556732525 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 8 Apr 2020 10:36:22 +0900 Subject: [PATCH 395/439] kbuild: replace AS=clang with LLVM_IAS=1 The 'AS' variable is unused for building the kernel. Only the remaining usage is to turn on the integrated assembler. A boolean flag is a better fit for this purpose. AS=clang was added for experts. So, I replaced it with LLVM_IAS=1, breaking the backward compatibility. Suggested-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Reviewed-by: Nick Desaulniers --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 039812fbb763..823f489131b8 100644 --- a/Makefile +++ b/Makefile @@ -518,7 +518,7 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -ifeq ($(if $(AS),$(shell $(AS) --version 2>&1 | head -n 1 | grep clang)),) +ifneq ($(LLVM_IAS),1) CLANG_FLAGS += -no-integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option From 6c0c1d6b0a24fbc546eb8fa44936d28190f43774 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 16 May 2019 12:49:42 -0500 Subject: [PATCH 396/439] objtool: Allow AR to be overridden with HOSTAR Currently, this Makefile hardcodes GNU ar, meaning that if it is not available, there is no way to supply a different one and the build will fail. $ make AR=llvm-ar CC=clang LD=ld.lld HOSTAR=llvm-ar HOSTCC=clang \ HOSTLD=ld.lld HOSTLDFLAGS=-fuse-ld=lld defconfig modules_prepare ... AR /out/tools/objtool/libsubcmd.a /bin/sh: 1: ar: not found ... Follow the logic of HOST{CC,LD} and allow the user to specify a different ar tool via HOSTAR (which is used elsewhere in other tools/ Makefiles). Signed-off-by: Nathan Chancellor Signed-off-by: Josh Poimboeuf Reviewed-by: Nick Desaulniers Reviewed-by: Mukesh Ojha Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/80822a9353926c38fd7a152991c6292491a9d0e8.1558028966.git.jpoimboe@redhat.com Link: https://github.com/ClangBuiltLinux/linux/issues/481 Signed-off-by: Ingo Molnar --- tools/objtool/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index f76d9914686a..29c8c358451a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,11 +7,12 @@ ARCH := x86 endif # always use the host compiler +HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) -AR = ar ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) From e05d517d962a07accfeb3bf77d4ee76a4d734edc Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 8 Apr 2020 10:36:23 +0900 Subject: [PATCH 397/439] kbuild: support LLVM=1 to switch the default tools to Clang/LLVM As Documentation/kbuild/llvm.rst implies, building the kernel with a full set of LLVM tools gets very verbose and unwieldy. Provide a single switch LLVM=1 to use Clang and LLVM tools instead of GCC and Binutils. You can pass it from the command line or as an environment variable. Please note LLVM=1 does not turn on the integrated assembler. You need to pass LLVM_IAS=1 to use it. When the upstream kernel is ready for the integrated assembler, I think we can make it default. We discussed what we need, and we agreed to go with a simple boolean flag that switches both target and host tools: https://lkml.org/lkml/2020/3/28/494 https://lkml.org/lkml/2020/4/3/43 Some items discussed, but not adopted: - LLVM_DIR When multiple versions of LLVM are installed, I just thought supporting LLVM_DIR=/path/to/my/llvm/bin/ might be useful. CC = $(LLVM_DIR)clang LD = $(LLVM_DIR)ld.lld ... However, we can handle this by modifying PATH. So, we decided to not do this. - LLVM_SUFFIX Some distributions (e.g. Debian) package specific versions of LLVM with naming conventions that use the version as a suffix. CC = clang$(LLVM_SUFFIX) LD = ld.lld(LLVM_SUFFIX) ... will allow a user to pass LLVM_SUFFIX=-11 to use clang-11 etc., but the suffixed versions in /usr/bin/ are symlinks to binaries in /usr/lib/llvm-#/bin/, so this can also be handled by PATH. Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor # build Tested-by: Nick Desaulniers Reviewed-by: Nick Desaulniers --- Makefile | 29 ++++++++++++++++++++++++----- tools/objtool/Makefile | 6 ++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 823f489131b8..d43f80de1f34 100644 --- a/Makefile +++ b/Makefile @@ -363,8 +363,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) -HOSTCC = gcc -HOSTCXX = g++ +ifneq ($(LLVM),) +HOSTCC = clang +HOSTCXX = clang++ +else +HOSTCC = gcc +HOSTCXX = g++ +endif HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) @@ -372,16 +377,30 @@ HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOST_LOADLIBES := $(HOST_LFS_LIBS) # Make variables (CC, etc...) +CPP = $(CC) -E +ifneq ($(LLVM),) +CC = clang +LD = ld.lld +LDGOLD = ld.gold +AR = llvm-ar +NM = llvm-nm +OBJCOPY = llvm-objcopy +OBJDUMP = llvm-objdump +READELF = llvm-readelf +OBJSIZE = llvm-size +STRIP = llvm-strip +else +CC = $(CROSS_COMPILE)gcc LD = $(CROSS_COMPILE)ld LDGOLD = $(CROSS_COMPILE)ld.gold -CC = $(CROSS_COMPILE)gcc -CPP = $(CC) -E AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump READELF = $(CROSS_COMPILE)readelf +OBJSIZE = $(CROSS_COMPILE)size +STRIP = $(CROSS_COMPILE)strip +endif AWK = awk GENKSYMS = scripts/genksyms/genksyms INSTALLKERNEL := installkernel diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 29c8c358451a..0b9c598a8eb8 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,9 +7,15 @@ ARCH := x86 endif # always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +endif AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) From 7c8772451c3e114520183567bb0811abf091a7e1 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Tue, 25 Aug 2020 07:00:00 -0700 Subject: [PATCH 398/439] lib/string.c: implement stpcpy LLVM implemented a recent "libcall optimization" that lowers calls to `sprintf(dest, "%s", str)` where the return value is used to `stpcpy(dest, str) - dest`. This generally avoids the machinery involved in parsing format strings. `stpcpy` is just like `strcpy` except it returns the pointer to the new tail of `dest`. This optimization was introduced into clang-12. Implement this so that we don't observe linkage failures due to missing symbol definitions for `stpcpy`. Similar to last year's fire drill with: commit 5f074f3e192f ("lib/string.c: implement a basic bcmp") The kernel is somewhere between a "freestanding" environment (no full libc) and "hosted" environment (many symbols from libc exist with the same type, function signature, and semantics). As H. Peter Anvin notes, there's not really a great way to inform the compiler that you're targeting a freestanding environment but would like to opt-in to some libcall optimizations (see pr/47280 below), rather than opt-out. Arvind notes, -fno-builtin-* behaves slightly differently between GCC and Clang, and Clang is missing many __builtin_* definitions, which I consider a bug in Clang and am working on fixing. Masahiro summarizes the subtle distinction between compilers justly: To prevent transformation from foo() into bar(), there are two ways in Clang to do that; -fno-builtin-foo, and -fno-builtin-bar. There is only one in GCC; -fno-buitin-foo. (Any difference in that behavior in Clang is likely a bug from a missing __builtin_* definition.) Masahiro also notes: We want to disable optimization from foo() to bar(), but we may still benefit from the optimization from foo() into something else. If GCC implements the same transform, we would run into a problem because it is not -fno-builtin-bar, but -fno-builtin-foo that disables that optimization. In this regard, -fno-builtin-foo would be more future-proof than -fno-built-bar, but -fno-builtin-foo is still potentially overkill. We may want to prevent calls from foo() being optimized into calls to bar(), but we still may want other optimization on calls to foo(). It seems that compilers today don't quite provide the fine grain control over which libcall optimizations pseudo-freestanding environments would prefer. Finally, Kees notes that this interface is unsafe, so we should not encourage its use. As such, I've removed the declaration from any header, but it still needs to be exported to avoid linkage errors in modules. Reported-by: Sami Tolvanen Suggested-by: Andy Lavr Suggested-by: Arvind Sankar Suggested-by: Joe Perches Suggested-by: Masahiro Yamada Suggested-by: Rasmus Villemoes Signed-off-by: Nick Desaulniers Acked-by: Kees Cook Cc: stable@vger.kernel.org Link: https://bugs.llvm.org/show_bug.cgi?id=47162 Link: https://bugs.llvm.org/show_bug.cgi?id=47280 Link: https://github.com/ClangBuiltLinux/linux/issues/1126 Link: https://man7.org/linux/man-pages/man3/stpcpy.3.html Link: https://pubs.opengroup.org/onlinepubs/9699919799/functions/stpcpy.html Link: https://reviews.llvm.org/D85963 --- lib/string.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/string.c b/lib/string.c index 33befc6ba3fa..38450d042444 100644 --- a/lib/string.c +++ b/lib/string.c @@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count) EXPORT_SYMBOL(strscpy); #endif +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is the new + * %NUL-terminated character. (for strcpy, the return value is a pointer to + * src. This interface is considered unsafe as it doesn't perform bounds + * checking of the inputs. As such it's not recommended for usage. Instead, + * its definition is provided in case the compiler lowers other libcalls to + * stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another From 4496fb8f1da13076a364e7334298f04d53e3d220 Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski Date: Mon, 2 Jul 2018 11:16:59 +0200 Subject: [PATCH 399/439] arm64: Use aarch64elf and aarch64elfb emulation mode variants The aarch64linux and aarch64linuxb emulation modes are not supported by bare-metal toolchains and Linux using them forbids building the kernel with these toolchains. Since there is apparently no reason to target these emulation modes, the more generic elf modes are used instead, allowing to build on bare-metal toolchains as well as the already-supported ones. Fixes: 3d6a7b99e3fa ("arm64: ensure the kernel is compiled for LP64") Cc: stable@vger.kernel.org Acked-by: Will Deacon Signed-off-by: Paul Kocialkowski Signed-off-by: Catalin Marinas --- arch/arm64/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index a221b9f8f98c..615cc6620d50 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -88,7 +88,7 @@ LD += -EB ifeq ($(ld-name),gold) LDFLAGS += -maarch64_elf64_be_vec else -LDFLAGS += -maarch64linuxb +LDFLAGS += -maarch64elfb endif UTS_MACHINE := aarch64_be else @@ -99,7 +99,7 @@ LD += -EL ifeq ($(ld-name),gold) LDFLAGS += -maarch64_elf64_le_vec else -LDFLAGS += -maarch64linux +LDFLAGS += -maarch64elf endif UTS_MACHINE := aarch64 endif From 9b437082f9a9c37dfdf9f2e8cda5891b63c3e89c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 3 Jul 2018 10:22:00 +0900 Subject: [PATCH 400/439] arm64: add endianness option to LDFLAGS instead of LD With the recent syntax extension, Kconfig is now able to evaluate the compiler / toolchain capability. However, accumulating flags to 'LD' is not compatible with the way it works; 'LD' must be passed to Kconfig to call $(ld-option,...) from Kconfig files. If you tweak 'LD' in arch Makefile depending on CONFIG_CPU_BIG_ENDIAN, this would end up with circular dependency between Makefile and Kconfig. Acked-by: Will Deacon Signed-off-by: Masahiro Yamada Signed-off-by: Catalin Marinas --- arch/arm64/Makefile | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 615cc6620d50..0c630482f80f 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -84,22 +84,20 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB -LD += -EB ifeq ($(ld-name),gold) -LDFLAGS += -maarch64_elf64_be_vec +LDFLAGS += -EB -maarch64_elf64_be_vec else -LDFLAGS += -maarch64elfb +LDFLAGS += -EB -maarch64elfb endif UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL -LD += -EL ifeq ($(ld-name),gold) -LDFLAGS += -maarch64_elf64_le_vec +LDFLAGS += -EL -maarch64_elf64_le_vec else -LDFLAGS += -maarch64elf +LDFLAGS += -EL -maarch64elf endif UTS_MACHINE := aarch64 endif From 28e478e00b733d1cdd8cf8526490bc5db219fb3a Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 9 Jul 2018 13:09:56 -0700 Subject: [PATCH 401/439] Revert "arm64: Use aarch64elf and aarch64elfb emulation mode variants" This reverts commit 38fc4248677552ce35efc09902fdcb06b61d7ef9. Distributions such as Fedora and Debian do not package the ELF linker scripts with their toolchains, resulting in kernel build failures such as: | CHK include/generated/compile.h | LD [M] arch/arm64/crypto/sha512-ce.o | aarch64-linux-gnu-ld: cannot open linker script file ldscripts/aarch64elf.xr: No such file or directory | make[1]: *** [scripts/Makefile.build:530: arch/arm64/crypto/sha512-ce.o] Error 1 | make: *** [Makefile:1029: arch/arm64/crypto] Error 2 Revert back to the linux targets for now, adding a comment to the Makefile so we don't accidentally break this in the future. Cc: Paul Kocialkowski Cc: Fixes: 38fc42486775 ("arm64: Use aarch64elf and aarch64elfb emulation mode variants") Tested-by: Kevin Hilman Signed-off-by: Laura Abbott Signed-off-by: Will Deacon --- arch/arm64/Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 0c630482f80f..f78f77ce12ed 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -87,7 +87,9 @@ AS += -EB ifeq ($(ld-name),gold) LDFLAGS += -EB -maarch64_elf64_be_vec else -LDFLAGS += -EB -maarch64elfb +# We must use the linux target here, since distributions don't tend to package +# the ELF linker scripts with binutils, and this results in a build failure. +LDFLAGS += -EB -maarch64linuxb endif UTS_MACHINE := aarch64_be else @@ -97,7 +99,7 @@ AS += -EL ifeq ($(ld-name),gold) LDFLAGS += -EL -maarch64_elf64_le_vec else -LDFLAGS += -EL -maarch64elf +LDFLAGS += -EL -maarch64linux # See comment above endif UTS_MACHINE := aarch64 endif From 9809bca77dd0e5eaa7163c86488c33bdc2740135 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 23 Feb 2018 13:56:53 +0900 Subject: [PATCH 402/439] kbuild: simplify ld-option implementation commit 0294e6f4a0006856e1f36b8cd8fa088d9e499e98 upstream. Currently, linker options are tested by the coordination of $(CC) and $(LD) because $(LD) needs some object to link. As commit 86a9df597cdd ("kbuild: fix linker feature test macros when cross compiling with Clang") addressed, we need to make sure $(CC) and $(LD) agree the underlying architecture of the passed object. This could be a bit complex when we combine tools from different groups. For example, we can use clang for $(CC), but we still need to rely on GCC toolchain for $(LD). So, I was searching for a way of standalone testing of linker options. A trick I found is to use '-v'; this not only prints the version string, but also tests if the given option is recognized. If a given option is supported, $ aarch64-linux-gnu-ld -v --fix-cortex-a53-843419 GNU ld (Linaro_Binutils-2017.11) 2.28.2.20170706 $ echo $? 0 If unsupported, $ aarch64-linux-gnu-ld -v --fix-cortex-a53-843419 GNU ld (crosstool-NG linaro-1.13.1-4.7-2013.04-20130415 - Linaro GCC 2013.04) 2.23.1 aarch64-linux-gnu-ld: unrecognized option '--fix-cortex-a53-843419' aarch64-linux-gnu-ld: use the --help option for usage information $ echo $? 1 Gold works likewise. $ aarch64-linux-gnu-ld.gold -v --fix-cortex-a53-843419 GNU gold (Linaro_Binutils-2017.11 2.28.2.20170706) 1.14 masahiro@pug:~/ref/linux$ echo $? 0 $ aarch64-linux-gnu-ld.gold -v --fix-cortex-a53-999999 GNU gold (Linaro_Binutils-2017.11 2.28.2.20170706) 1.14 aarch64-linux-gnu-ld.gold: --fix-cortex-a53-999999: unknown option aarch64-linux-gnu-ld.gold: use the --help option for usage information $ echo $? 1 LLD too. $ ld.lld -v --gc-sections LLD 7.0.0 (http://llvm.org/git/lld.git 4a0e4190e74cea19f8a8dc625ccaebdf8b5d1585) (compatible with GNU linkers) $ echo $? 0 $ ld.lld -v --fix-cortex-a53-843419 LLD 7.0.0 (http://llvm.org/git/lld.git 4a0e4190e74cea19f8a8dc625ccaebdf8b5d1585) (compatible with GNU linkers) $ echo $? 0 $ ld.lld -v --fix-cortex-a53-999999 ld.lld: error: unknown argument: --fix-cortex-a53-999999 LLD 7.0.0 (http://llvm.org/git/lld.git 4a0e4190e74cea19f8a8dc625ccaebdf8b5d1585) (compatible with GNU linkers) $ echo $? 1 Signed-off-by: Masahiro Yamada Tested-by: Nick Desaulniers [nc: try-run-cached was added later, just use try-run, which is the current mainline state] Signed-off-by: Nathan Chancellor Signed-off-by: Greg Kroah-Hartman --- scripts/Kbuild.include | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 1f2f528b7f23..ae30702c1f15 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -196,9 +196,7 @@ cc-ldoption = $(call try-run,\ # ld-option # Usage: LDFLAGS += $(call ld-option, -X) -ld-option = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \ - $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2)) +ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2)) # ar-option # Usage: KBUILD_ARFLAGS := $(call ar-option,D) From f821b47594a57a3f5434172d9292dc4d7cee8bd8 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Fri, 13 Jul 2018 08:30:33 -0700 Subject: [PATCH 403/439] arm64: build with baremetal linker target instead of Linux when available Not all toolchains have the baremetal elf targets, RedHat/Fedora ones in particular. So, probe for whether it's available and use the previous (linux) targets if it isn't. Reported-by: Laura Abbott Tested-by: Laura Abbott Acked-by: Masahiro Yamada Cc: Paul Kocialkowski Signed-off-by: Olof Johansson Signed-off-by: Will Deacon --- arch/arm64/Makefile | 9 +++++---- scripts/Kbuild.include | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f78f77ce12ed..8ed3708a6838 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -87,9 +87,9 @@ AS += -EB ifeq ($(ld-name),gold) LDFLAGS += -EB -maarch64_elf64_be_vec else -# We must use the linux target here, since distributions don't tend to package -# the ELF linker scripts with binutils, and this results in a build failure. -LDFLAGS += -EB -maarch64linuxb +# Prefer the baremetal ELF build target, but not all toolchains include +# it so fall back to the standard linux version if needed. +LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb) endif UTS_MACHINE := aarch64_be else @@ -99,7 +99,8 @@ AS += -EL ifeq ($(ld-name),gold) LDFLAGS += -EL -maarch64_elf64_le_vec else -LDFLAGS += -EL -maarch64linux # See comment above +# Same as above, prefer ELF but fall back to linux target if needed. +LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux) endif UTS_MACHINE := aarch64 endif diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index ae30702c1f15..99cb231a93bf 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -195,8 +195,8 @@ cc-ldoption = $(call try-run,\ $(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) # ld-option -# Usage: LDFLAGS += $(call ld-option, -X) -ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2)) +# Usage: LDFLAGS += $(call ld-option, -X, -Y) +ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2),$(3)) # ar-option # Usage: KBUILD_ARFLAGS := $(call ar-option,D) From 50193a205c650b23a125890d0fb9ffcf68a64716 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 5 Jun 2020 10:39:55 +0300 Subject: [PATCH 404/439] kbuild: add variables for compression tools commit 8dfb61dcbaceb19a5ded5e9c9dcf8d05acc32294 upstream. Allow user to use alternative implementations of compression tools, such as pigz, pbzip2, pxz. For example, multi-threaded tools to speed up the build: $ make KGZIP=pigz KBZIP2=pbzip2 Variables KGZIP, KBZIP2, KLZOP are used internally because original env vars are reserved by the tools. The use of GZIP in gzip tool is obsolete since 2015. However, alternative implementations (e.g., pigz) still rely on it. BZIP2, BZIP, LZOP vars are not obsolescent. The credit goes to @grsecurity. As a sidenote, for multi-threaded lzma, xz compression one can use: $ export XZ_OPT="--threads=0" Signed-off-by: Denis Efremov Signed-off-by: Masahiro Yamada Signed-off-by: Matthias Maennich Signed-off-by: Greg Kroah-Hartman --- Makefile | 11 +++++++++-- arch/m68k/Makefile | 8 ++++---- arch/parisc/Makefile | 2 +- scripts/Makefile.lib | 12 ++++++------ scripts/package/Makefile | 8 ++++---- scripts/package/buildtar | 6 +++--- scripts/xz_wrap.sh | 2 +- 7 files changed, 28 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index d43f80de1f34..b95984938d47 100644 --- a/Makefile +++ b/Makefile @@ -408,6 +408,12 @@ DEPMOD = /sbin/depmod PERL = perl PYTHON = python CHECK = sparse +KGZIP = gzip +KBZIP2 = bzip2 +KLZOP = lzop +LZMA = lzma +LZ4 = lz4c +XZ = xz CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void $(CF) @@ -457,6 +463,7 @@ CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE +export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS @@ -1071,10 +1078,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n -f + mod_compress_cmd = $(KGZIP) -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz -f + mod_compress_cmd = $(XZ) -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index a229d28e14cc..c44a53b743e4 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -135,10 +135,10 @@ vmlinux.gz: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - gzip -9c vmlinux.tmp >vmlinux.gz + $(KGZIP) -9c vmlinux.tmp >vmlinux.gz rm vmlinux.tmp else - gzip -9c vmlinux >vmlinux.gz + $(KGZIP) -9c vmlinux >vmlinux.gz endif bzImage: vmlinux.bz2 @@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - bzip2 -1c vmlinux.tmp >vmlinux.bz2 + $(KBZIP2) -1c vmlinux.tmp >vmlinux.bz2 rm vmlinux.tmp else - bzip2 -1c vmlinux >vmlinux.bz2 + $(KBZIP2) -1c vmlinux >vmlinux.bz2 endif archclean: diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01946ebaff72..efbc6e36df3e 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -134,7 +134,7 @@ vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ else vmlinuz: vmlinux - @gzip -cf -9 $< > $@ + @$(KGZIP) -cf -9 $< > $@ endif install: diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 2d4087a75c78..2976b68d01a8 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -269,7 +269,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ # --------------------------------------------------------------------------- quiet_cmd_gzip = GZIP $@ -cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \ +cmd_gzip = (cat $(filter-out FORCE,$^) | $(KGZIP) -n -f -9 > $@) || \ (rm -f $@ ; false) # DTC @@ -347,7 +347,7 @@ printf "%08x\n" $$dec_size | \ quiet_cmd_bzip2 = BZIP2 $@ cmd_bzip2 = (cat $(filter-out FORCE,$^) | \ - bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(KBZIP2) -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) # Lzma @@ -355,17 +355,17 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^) | \ quiet_cmd_lzma = LZMA $@ cmd_lzma = (cat $(filter-out FORCE,$^) | \ - lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(LZMA) -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) quiet_cmd_lzo = LZO $@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ - lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(KLZOP) -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) quiet_cmd_lz4 = LZ4 $@ cmd_lz4 = (cat $(filter-out FORCE,$^) | \ - lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(LZ4) -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) # U-Boot mkimage @@ -417,7 +417,7 @@ cmd_xzkern = (cat $(filter-out FORCE,$^) | \ quiet_cmd_xzmisc = XZMISC $@ cmd_xzmisc = (cat $(filter-out FORCE,$^) | \ - xz --check=crc32 --lzma2=dict=1MiB) > $@ || \ + $(XZ) --check=crc32 --lzma2=dict=1MiB) > $@ || \ (rm -f $@ ; false) # ASM offsets diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 348af5b20618..3567749d6be4 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -39,7 +39,7 @@ if test "$(objtree)" != "$(srctree)"; then \ false; \ fi ; \ $(srctree)/scripts/setlocalversion --save-scmversion; \ -tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \ +tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \ rm -f $(objtree)/.scmversion @@ -121,9 +121,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \ tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \ rm -r $(perf-tar); \ $(if $(findstring tar-src,$@),, \ -$(if $(findstring bz2,$@),bzip2, \ -$(if $(findstring gz,$@),gzip, \ -$(if $(findstring xz,$@),xz, \ +$(if $(findstring bz2,$@),$(KBZIP2), \ +$(if $(findstring gz,$@),$(KGZIP), \ +$(if $(findstring xz,$@),$(XZ), \ $(error unknown target $@)))) \ -f -9 $(perf-tar).tar) diff --git a/scripts/package/buildtar b/scripts/package/buildtar index e8cc72a51b32..d6c0fc3ac004 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -28,15 +28,15 @@ case "${1}" in opts= ;; targz-pkg) - opts=--gzip + opts="-I ${KGZIP}" tarball=${tarball}.gz ;; tarbz2-pkg) - opts=--bzip2 + opts="-I ${KBZIP2}" tarball=${tarball}.bz2 ;; tarxz-pkg) - opts=--xz + opts="-I ${XZ}" tarball=${tarball}.xz ;; *) diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh index 7a2d372f4885..76e9cbcfbeab 100755 --- a/scripts/xz_wrap.sh +++ b/scripts/xz_wrap.sh @@ -20,4 +20,4 @@ case $SRCARCH in sparc) BCJ=--sparc ;; esac -exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB +exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB From 36096ff49a2d3d44e83cf4baaeee6a8d118544e5 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 6 Jun 2019 11:33:43 +0100 Subject: [PATCH 405/439] arm64: Silence gcc warnings about arch ABI drift Since GCC 9, the compiler warns about evolution of the platform-specific ABI, in particular relating for the marshaling of certain structures involving bitfields. The kernel is a standalone binary, and of course nobody would be so stupid as to expose structs containing bitfields as function arguments in ABI. (Passing a pointer to such a struct, however inadvisable, should be unaffected by this change. perf and various drivers rely on that.) So these warnings do more harm than good: turn them off. We may miss warnings about future ABI drift, but that's too bad. Future ABI breaks of this class will have to be debugged and fixed the traditional way unless the compiler evolves finer-grained diagnostics. Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 8ed3708a6838..f953a6c24fe7 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -75,6 +75,7 @@ KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_CFLAGS += -fno-pic +KBUILD_CFLAGS += -Wno-psabi KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) From 3403ab7d3f3b5d11e71f4dfa78ab512d48dc8a62 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 11 Jun 2019 10:19:32 -0700 Subject: [PATCH 406/439] arm64: Don't unconditionally add -Wno-psabi to KBUILD_CFLAGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a GCC only option, which warns about ABI changes within GCC, so unconditionally adding it breaks Clang with tons of: warning: unknown warning option '-Wno-psabi' [-Wunknown-warning-option] and link time failures: ld.lld: error: undefined symbol: __efistub___stack_chk_guard >>> referenced by arm-stub.c:73 (/home/nathan/cbl/linux/drivers/firmware/efi/libstub/arm-stub.c:73) >>> arm-stub.stub.o:(__efistub_install_memreserve_table) in archive ./drivers/firmware/efi/libstub/lib.a These failures come from the lack of -fno-stack-protector, which is added via cc-option in drivers/firmware/efi/libstub/Makefile. When an unknown flag is added to KBUILD_CFLAGS, clang will noisily warn that it is ignoring the option like above, unlike gcc, who will just error. $ echo "int main() { return 0; }" > tmp.c $ clang -Wno-psabi tmp.c; echo $? warning: unknown warning option '-Wno-psabi' [-Wunknown-warning-option] 1 warning generated. 0 $ gcc -Wsometimes-uninitialized tmp.c; echo $? gcc: error: unrecognized command line option ‘-Wsometimes-uninitialized’; did you mean ‘-Wmaybe-uninitialized’? 1 For cc-option to work properly with clang and behave like gcc, -Werror is needed, which was done in commit c3f0d0bc5b01 ("kbuild, LLVMLinux: Add -Werror to cc-option to support clang"). $ clang -Werror -Wno-psabi tmp.c; echo $? error: unknown warning option '-Wno-psabi' [-Werror,-Wunknown-warning-option] 1 As a consequence of this, when an unknown flag is unconditionally added to KBUILD_CFLAGS, it will cause cc-option to always fail and those flags will never get added: $ clang -Werror -Wno-psabi -fno-stack-protector tmp.c; echo $? error: unknown warning option '-Wno-psabi' [-Werror,-Wunknown-warning-option] 1 This can be seen when compiling the whole kernel as some warnings that are normally disabled (see below) show up. The full list of flags missing from drivers/firmware/efi/libstub are the following (gathered from diffing .arm64-stub.o.cmd): -fno-delete-null-pointer-checks -Wno-address-of-packed-member -Wframe-larger-than=2048 -Wno-unused-const-variable -fno-strict-overflow -fno-merge-all-constants -fno-stack-check -Werror=date-time -Werror=incompatible-pointer-types -ffreestanding -fno-stack-protector Use cc-disable-warning so that it gets disabled for GCC and does nothing for Clang. Fixes: ebcc5928c5d9 ("arm64: Silence gcc warnings about arch ABI drift") Link: https://github.com/ClangBuiltLinux/linux/issues/511 Reported-by: Qian Cai Acked-by: Dave Martin Reviewed-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Signed-off-by: Will Deacon --- arch/arm64/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f953a6c24fe7..75e36c1de54d 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -75,7 +75,7 @@ KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_CFLAGS += -fno-pic -KBUILD_CFLAGS += -Wno-psabi +KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) From 26b3c5132d5ed973558d5fbb159209c27aca654d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 9 May 2020 15:58:04 -0700 Subject: [PATCH 407/439] gcc-10: avoid shadowing standard library 'free()' in crypto MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1a263ae60b04de959d9ce9caea4889385eefcc7b upstream. gcc-10 has started warning about conflicting types for a few new built-in functions, particularly 'free()'. This results in warnings like: crypto/xts.c:325:13: warning: conflicting types for built-in function ‘free’; expected ‘void(void *)’ [-Wbuiltin-declaration-mismatch] because the crypto layer had its local freeing functions called 'free()'. Gcc-10 is in the wrong here, since that function is marked 'static', and thus there is no chance of confusion with any standard library function namespace. But the simplest thing to do is to just use a different name here, and avoid this gcc mis-feature. [ Side note: gcc knowing about 'free()' is in itself not the mis-feature: the semantics of 'free()' are special enough that a compiler can validly do special things when seeing it. So the mis-feature here is that gcc thinks that 'free()' is some restricted name, and you can't shadow it as a local static function. Making the special 'free()' semantics be a function attribute rather than tied to the name would be the much better model ] Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- crypto/lrw.c | 4 ++-- crypto/xts.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index 886f91f2426c..1b73fec817cf 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -531,7 +531,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->child); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -642,7 +642,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) diff --git a/crypto/xts.c b/crypto/xts.c index e31828ed0046..f5fba941d6f6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -469,7 +469,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_cipher(ctx->tweak); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -580,7 +580,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) From dcc4e61dbb49b004bcbe48adb1cb0f7f117c545d Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 20 Nov 2020 17:53:09 +0300 Subject: [PATCH 408/439] dpu20: make vendor check of cables for dex mode optional Add CONFIG_DISPLAYPORT_DEX_FORCE_WQHD config. Thanks, @fart1-git. Signed-off-by: Denis Efremov --- drivers/video/fbdev/exynos/dpu20/Kconfig | 7 +++++++ drivers/video/fbdev/exynos/dpu20/displayport_drv.c | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/video/fbdev/exynos/dpu20/Kconfig b/drivers/video/fbdev/exynos/dpu20/Kconfig index 020b81473176..2ad5e91f1c1c 100644 --- a/drivers/video/fbdev/exynos/dpu20/Kconfig +++ b/drivers/video/fbdev/exynos/dpu20/Kconfig @@ -23,6 +23,13 @@ config EXYNOS_DISPLAYPORT help Enable DISPLAYPORT driver. +config DISPLAYPORT_DEX_FORCE_WQHD + bool "Force WQHD in DEX" + depends on EXYNOS_DISPLAYPORT + default n + help + Force WQHD mode in dex_adapter_type. + config EXYNOS_WINDOW_UPDATE bool "Support Window Update Mode" depends on EXYNOS_DPU20 diff --git a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c index 111bbc9a4535..e6e8d8263e5c 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c @@ -2668,6 +2668,12 @@ static void displayport_aux_sel(struct displayport_device *displayport) } } +#ifdef CONFIG_DISPLAYPORT_DEX_FORCE_WQHD +static void displayport_check_adapter_type(struct displayport_device *displayport) +{ + displayport->dex_adapter_type = DEX_WQHD_SUPPORT; +} +#else static void displayport_check_adapter_type(struct displayport_device *displayport) { #ifdef FEATURE_DEX_ADAPTER_TWEAK @@ -2688,6 +2694,7 @@ static void displayport_check_adapter_type(struct displayport_device *displaypor break; }; } +#endif static int usb_typec_displayport_notification(struct notifier_block *nb, unsigned long action, void *data) From 9b024a341eed922a10fd97982a064db41ad48335 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 22 Nov 2020 00:49:44 +0300 Subject: [PATCH 409/439] Makefile: don't use -fconserve-stack with CC_OPTIMIZE_FOR_PERFORMANCE* Signed-off-by: Denis Efremov --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index b95984938d47..077587bbcf68 100644 --- a/Makefile +++ b/Makefile @@ -956,7 +956,9 @@ KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) # conserve stack if available +ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) +endif # disallow errors like 'EXPORT_GPL(foo);' with missing header KBUILD_CFLAGS += $(call cc-option,-Werror=implicit-int) From 01390af4f9c2bef0ee6bfe7c18973f3b6150b7e5 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 15 Apr 2019 09:49:56 -0700 Subject: [PATCH 410/439] x86/build/lto: Fix truncated .bss with -fdata-sections [ Upstream commit 6a03469a1edc94da52b65478f1e00837add869a3 ] With CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y, we compile the kernel with -fdata-sections, which also splits the .bss section. The new section, with a new .bss.* name, which pattern gets missed by the main x86 linker script which only expects the '.bss' name. This results in the discarding of the second part and a too small, truncated .bss section and an unhappy, non-working kernel. Use the common BSS_MAIN macro in the linker script to properly capture and merge all the generated BSS sections. Signed-off-by: Sami Tolvanen Reviewed-by: Nick Desaulniers Reviewed-by: Kees Cook Cc: Borislav Petkov Cc: Kees Cook Cc: Linus Torvalds Cc: Nicholas Piggin Cc: Nick Desaulniers Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190415164956.124067-1-samitolvanen@google.com [ Extended the changelog. ] Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin --- arch/x86/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2384a2ae5ec3..b2f6e4f3e927 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -352,7 +352,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) - *(.bss) + *(BSS_MAIN) . = ALIGN(PAGE_SIZE); __bss_stop = .; } From 4769c7a6fc5e030a222c282f98072b46446b2146 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 9 May 2018 22:59:59 +1000 Subject: [PATCH 411/439] kbuild: LD_DEAD_CODE_DATA_ELIMINATION no -ffunction-sections/-fdata-sections for module build Modules do not tend to cope with -ffunction-sections, even though they do not link with -gc-sections. It may be possible for unused symbols to be trimmed from modules, but in general that would take much more work in architecture module linker scripts. For now, enable these only for kernel build. Signed-off-by: Nicholas Piggin Signed-off-by: Masahiro Yamada --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 077587bbcf68..c5465a239641 100644 --- a/Makefile +++ b/Makefile @@ -865,8 +865,8 @@ KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once) endif ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION -KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) -KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,) endif ifdef CONFIG_LTO_CLANG From 15069a83e00a8a80a1b4457951d886feab4a1cfe Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 9 May 2018 23:00:00 +1000 Subject: [PATCH 412/439] kbuild: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selectable if enabled Architectures that are capable can select HAVE_LD_DEAD_CODE_DATA_ELIMINATION to enable selection of that option (as an EXPERT kernel option). Signed-off-by: Nicholas Piggin Signed-off-by: Masahiro Yamada --- arch/Kconfig | 15 --------------- init/Kconfig | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index eb9975a67673..c6caaeffc124 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -596,21 +596,6 @@ config THIN_ARCHIVES Select this if the architecture wants to use thin archives instead of ld -r to create the built-in.o files. -config LD_DEAD_CODE_DATA_ELIMINATION - bool - help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections and linking with - --gc-sections. - - This requires that the arch annotates or otherwise protects - its external entry points from being discarded. Linker scripts - must also merge .text.*, .data.*, and .bss.* correctly into - output sections. Care must be taken not to pull in unrelated - sections (e.g., '.text.init'). Typically '.' in section names - is used to distinguish them from label names / C identifiers. - config LTO def_bool n diff --git a/init/Kconfig b/init/Kconfig index c3c5710a9a47..6258b47e72b8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1176,6 +1176,33 @@ config CC_OPTIMIZE_FOR_SIZE endchoice +config HAVE_LD_DEAD_CODE_DATA_ELIMINATION + bool + help + This requires that the arch annotates or otherwise protects + its external entry points from being discarded. Linker scripts + must also merge .text.*, .data.*, and .bss.* correctly into + output sections. Care must be taken not to pull in unrelated + sections (e.g., '.text.init'). Typically '.' in section names + is used to distinguish them from label names / C identifiers. + +config LD_DEAD_CODE_DATA_ELIMINATION + bool "Dead code and data elimination (EXPERIMENTAL)" + depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION + depends on EXPERT + help + Select this if the architecture wants to do dead code and + data elimination with the linker by compiling with + -ffunction-sections -fdata-sections, and linking with + --gc-sections. + + This can reduce on disk and in-memory size of the kernel + code and static data, particularly for small configs and + on small systems. This has the possibility of introducing + silently broken kernel if the required annotations are not + present. This option is not well tested yet, so use at your + own risk. + config SYSCTL bool From ddc901ad499bdbec9d0a5ad95eeed5dc2c832c06 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 24 Jun 2018 01:41:51 +0900 Subject: [PATCH 413/439] kbuild: reword help of LD_DEAD_CODE_DATA_ELIMINATION Since commit 5d20ee3192a5 ("kbuild: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selectable if enabled"), HAVE_LD_DEAD_CODE_DATA_ELIMINATION is supposed to be selected by architectures that are capable of this functionality. LD_DEAD_CODE_DATA_ELIMINATION is now users' selection. Update the help message. Signed-off-by: Masahiro Yamada --- init/Kconfig | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 6258b47e72b8..e8ca1428f6b8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1191,10 +1191,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections, and linking with - --gc-sections. + Enable this if you want to do dead code and data elimination with + the linker by compiling with -ffunction-sections -fdata-sections, + and linking with --gc-sections. This can reduce on disk and in-memory size of the kernel code and static data, particularly for small configs and From 8c5347f9b3580f0f06ad701b2bc860218d7dcda1 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sat, 21 Nov 2020 21:30:04 +0300 Subject: [PATCH 414/439] arm64: allow LD_DEAD_CODE_DATA_ELIMINATION Signed-off-by: Denis Efremov --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 84166a9b7c05..94683e02421d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -98,6 +98,7 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP if NUMA select HAVE_NMI if ACPI_APEI_SEA From 5efe26c5faae1581a1b3f2d45782b8b76458e758 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 9 May 2018 22:59:58 +1000 Subject: [PATCH 415/439] kbuild: Fix asm-generic/vmlinux.lds.h for LD_DEAD_CODE_DATA_ELIMINATION KEEP more tables, and add the function/data section wildcard to more section selections. This is a little ad-hoc at the moment, but kernel code should be moved to consistently use .text..x (note: double dots) for explicit sections and all references to it in the linker script can be made with TEXT_MAIN, and similarly for other sections. For now, let's see if major architectures move to enabling this option then we can do some refactoring passes. Otherwise if it remains unused or superseded by LTO, this may not be required. Signed-off-by: Nicholas Piggin Signed-off-by: Masahiro Yamada --- include/asm-generic/vmlinux.lds.h | 39 +++++++++++++++++++------------ 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 1489c85241f6..12b7fcca75f9 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -64,17 +64,26 @@ * generates .data.identifier sections, which need to be pulled in with * .data. We don't want to pull in .data..other sections, which Linux * has defined. Same for text and bss. + * + * RODATA_MAIN is not used because existing code already defines .rodata.x + * sections to be brought in with rodata. */ #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi #define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* +#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text #define TEXT_CFI_MAIN .text.cfi #define DATA_MAIN .data +#define SDATA_MAIN .sdata +#define RODATA_MAIN .rodata #define BSS_MAIN .bss +#define SBSS_MAIN .sbss #endif /* @@ -115,7 +124,7 @@ #ifdef CONFIG_TRACE_BRANCH_PROFILING #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ - *(_ftrace_annotated_branch) \ + KEEP(*(_ftrace_annotated_branch)) \ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; #else #define LIKELY_PROFILE() @@ -123,7 +132,7 @@ #ifdef CONFIG_PROFILE_ALL_BRANCHES #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ - *(_ftrace_branch) \ + KEEP(*(_ftrace_branch)) \ VMLINUX_SYMBOL(__stop_branch_profile) = .; #else #define BRANCH_PROFILE() @@ -222,8 +231,8 @@ *(DATA_MAIN) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - MEM_KEEP(init.data) \ - MEM_KEEP(exit.data) \ + MEM_KEEP(init.data*) \ + MEM_KEEP(exit.data*) \ *(.data.unlikely) \ STRUCT_ALIGN(); \ *(__tracepoints) \ @@ -267,7 +276,7 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ VMLINUX_SYMBOL(__start_init_task) = .; \ - *(.data..init_task) \ + KEEP(*(.data..init_task)) \ VMLINUX_SYMBOL(__end_init_task) = .; /* @@ -526,8 +535,8 @@ *(.text..ftrace) \ *(TEXT_CFI_MAIN) \ *(.ref.text) \ - MEM_KEEP(init.text) \ - MEM_KEEP(exit.text) \ + MEM_KEEP(init.text*) \ + MEM_KEEP(exit.text*) \ /* sched.text is aling to function alignment to secure we have same @@ -618,8 +627,8 @@ /* init and exit section handling */ #define INIT_DATA \ KEEP(*(SORT(___kentry+*))) \ - *(.init.data) \ - MEM_DISCARD(init.data) \ + *(.init.data init.data.*) \ + MEM_DISCARD(init.data*) \ KERNEL_CTORS() \ MCOUNT_REC() \ *(.init.rodata .init.rodata.*) \ @@ -643,14 +652,14 @@ #define INIT_TEXT \ *(.init.text .init.text.*) \ *(.text.startup) \ - MEM_DISCARD(init.text) + MEM_DISCARD(init.text*) #define EXIT_DATA \ - *(.exit.data) \ + *(.exit.data .exit.data.*) \ *(.fini_array) \ *(.dtors) \ - MEM_DISCARD(exit.data) \ - MEM_DISCARD(exit.rodata) + MEM_DISCARD(exit.data*) \ + MEM_DISCARD(exit.rodata*) #define EXIT_TEXT \ *(.exit.text) \ @@ -668,7 +677,7 @@ . = ALIGN(sbss_align); \ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ *(.dynsbss) \ - *(.sbss) \ + *(SBSS_MAIN) \ *(.scommon) \ } @@ -806,7 +815,7 @@ #define NOTES \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_notes) = .; \ - *(.note.*) \ + KEEP(*(.note.*)) \ VMLINUX_SYMBOL(__stop_notes) = .; \ } From c5c9e36e0fce45328d7f338195cb29a36c2b4472 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Thu, 13 Sep 2018 13:00:00 +0200 Subject: [PATCH 416/439] vmlinux.lds.h: Fix linker warnings about orphan .LPBX sections Enabling both CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y and CONFIG_GCOV_PROFILE_ALL=y results in linker warnings: warning: orphan section `.data..LPBX1' being placed in section `.data..LPBX1'. LD_DEAD_CODE_DATA_ELIMINATION adds compiler flag -fdata-sections. This option causes GCC to create separate data sections for data objects, including those generated by GCC internally for gcov profiling. The names of these objects start with a dot (.LPBX0, .LPBX1), resulting in section names starting with 'data..'. As section names starting with 'data..' are used for specific purposes in the Linux kernel, the linker script does not automatically include them in the output data section, resulting in the "orphan section" linker warnings. Fix this by specifically including sections named "data..LPBX*" in the data section. Reported-by: Stephen Rothwell Tested-by: Stephen Rothwell Tested-by: Arnd Bergmann Acked-by: Arnd Bergmann Signed-off-by: Peter Oberparleiter Signed-off-by: Stephen Rothwell --- include/asm-generic/vmlinux.lds.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 12b7fcca75f9..803d16157b42 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -71,7 +71,7 @@ #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* From 72d89c0a2030b6322e4723139ad8593241a004ba Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Thu, 2 Jul 2020 16:27:13 -0700 Subject: [PATCH 417/439] vmlinux.lds.h: Coalesce transient LLVM dead code elimination sections A recent LLVM 11 commit [1] made LLD stop implicitly coalescing some temporary LLVM sections, namely .{data,bss}..compoundliteral.XXX: [30] .data..compoundli PROGBITS ffffffff9ac9a000 19e9a000 000000000000cea0 0000000000000000 WA 0 0 32 [31] .rela.data..compo RELA 0000000000000000 40965440 0000000000001d88 0000000000000018 I 2238 30 8 [32] .data..compoundli PROGBITS ffffffff9aca6ea0 19ea6ea0 00000000000033c0 0000000000000000 WA 0 0 32 [33] .rela.data..compo RELA 0000000000000000 409671c8 0000000000000948 0000000000000018 I 2238 32 8 [...] [2213] .bss..compoundlit NOBITS ffffffffa3000000 1d85c000 00000000000000a0 0000000000000000 WA 0 0 32 [2214] .bss..compoundlit NOBITS ffffffffa30000a0 1d85c000 0000000000000040 0000000000000000 WA 0 0 32 [...] .{data,bss}..L sections are also created in some cases. While there aren't any in this example, they should also be coalesced to be safe in case some config or future LLVM change makes it start creating more of those sections in the future. For example, enabling global merging causes ..L_MergedGlobals sections to be created, but it's likely that other changes will result in such sections as well. While these extra sections don't typically cause any breakage, they do inflate the vmlinux size due to the overhead of storing metadata for thousands of extra sections. It's also worth noting that for some reason, some downstream Android kernels can't boot at all if these sections aren't coalesced. This issue isn't limited to any specific architecture; it affects arm64 and x86 if CONFIG_LD_DEAD_CODE_DATA_ELIMINATION is forced on. Example on x86 allyesconfig: Before: 2241 sections, 1171169 KiB After: 56 sections, 1170972 KiB [1] https://github.com/llvm/llvm-project/commit/9e33c096476ab5e02ab1c8442cc3cb4e32e29f17 Suggested-by: Fangrui Song Signed-off-by: Danny Lin Reviewed-by: Nathan Chancellor Cc: stable@vger.kernel.org # v4.4+ Link: https://github.com/ClangBuiltLinux/linux/issues/958 --- include/asm-generic/vmlinux.lds.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 803d16157b42..ebb89377e870 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -72,9 +72,12 @@ #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* \ + .data..compoundliteral* .data..L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* .bss..L* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text From 70b79b3c6f86de63edf6a3164a2c8ae92d6e3bfb Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Tue, 17 Dec 2019 11:53:52 -0800 Subject: [PATCH 418/439] ANDROID: kbuild: disable clang-specific configs with other compilers cuttlefish_defconfig explicitly enables options that fail to compile with compilers other than clang. This change detects when a different compiler is used and disables clang-specific features after printing a warning. Bug: 145297810 Change-Id: I3371576b45c9715a63c5668ab58e996cab612f53 Signed-off-by: Sami Tolvanen --- Makefile | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Makefile b/Makefile index c5465a239641..857dc4f00308 100644 --- a/Makefile +++ b/Makefile @@ -641,6 +641,8 @@ ifeq ($(dot-config),1) -include include/config/auto.conf ifeq ($(KBUILD_EXTMOD),) +include/config/auto.conf.cmd: check-clang-specific-options + # Read in dependencies to all Kconfig* files, make sure to run # oldconfig if changes are detected. -include include/config/auto.conf.cmd @@ -1268,6 +1270,22 @@ else endif endif +# Disable clang-specific config options when using a different compiler +clang-specific-configs := LTO_CLANG CFI_CLANG SHADOW_CALL_STACK + +PHONY += check-clang-specific-options +check-clang-specific-options: $(KCONFIG_CONFIG) FORCE +ifneq ($(cc-name),clang) +ifneq ($(findstring y,$(shell $(CONFIG_SHELL) \ + $(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(clang-specific-configs),-s $(c)))),) + @echo WARNING: Disabling clang-specific options with $(cc-name) >&2 + $(Q)$(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(clang-specific-configs),-d $(c)) && \ + $(MAKE) -f $(srctree)/Makefile olddefconfig +endif +endif + # Check for CONFIG flags that require compiler support. Abort the build # after .config has been processed, but before the kernel build starts. # From 5075d93dfecba681ae468fe159c7127072f6911a Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Wed, 31 Jul 2019 22:26:08 -0700 Subject: [PATCH 419/439] kbuild: Add support for LLVM's Polly optimizer This adds support for compiling the kernel with optimizations offered by LLVM's polyhedral loop optimizer known as Polly, which can improve performance by improving cache locality in loops. Note that LLVM is not compiled with Polly by default -- it must be enabled explicitly. Signed-off-by: Danny Lin --- Makefile | 13 +++++++++++++ arch/Kconfig | 8 ++++++++ 2 files changed, 21 insertions(+) diff --git a/Makefile b/Makefile index 857dc4f00308..b9c251489405 100644 --- a/Makefile +++ b/Makefile @@ -738,6 +738,19 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif +ifeq ($(cc-name),clang) +ifdef CONFIG_LLVM_POLLY +KBUILD_CFLAGS += -mllvm -polly \ + -mllvm -polly-run-dce \ + -mllvm -polly-run-inliner \ + -mllvm -polly-opt-fusion=max \ + -mllvm -polly-ast-use-context \ + -mllvm -polly-detect-keep-going \ + -mllvm -polly-vectorizer=stripmine \ + -mllvm -polly-invariant-load-hoisting +endif +endif + # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) diff --git a/arch/Kconfig b/arch/Kconfig index c6caaeffc124..3db1e7a13335 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -664,6 +664,14 @@ config CFI_CLANG_SHADOW If you select this option, the kernel builds a fast look-up table of CFI check functions in loaded modules to reduce overhead. +config LLVM_POLLY + bool "Enable LLVM's polyhedral loop optimizer (Polly)" + help + This option enables LLVM's polyhedral loop optimizer known as Polly. + Polly is able to optimize various loops throughout the kernel for + maximum cache locality. This requires an LLVM toolchain explicitly + compiled with Polly support. + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help From c933680fdb02352ae0b88ed167195b84c7f32e53 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 25 Nov 2020 16:53:16 +0300 Subject: [PATCH 420/439] kbuild: Add support for GCC's graphite optimizer Signed-off-by: Denis Efremov --- Makefile | 4 ++++ arch/Kconfig | 3 +++ 2 files changed, 7 insertions(+) diff --git a/Makefile b/Makefile index b9c251489405..3c1f63666619 100644 --- a/Makefile +++ b/Makefile @@ -749,6 +749,10 @@ KBUILD_CFLAGS += -mllvm -polly \ -mllvm -polly-vectorizer=stripmine \ -mllvm -polly-invariant-load-hoisting endif +else ifeq ($(cc-name),gcc) +ifdef CONFIG_GCC_GRAPHITE +KBUILD_CFLAGS += -fgraphite-identity +endif endif # Tell gcc to never replace conditional load with a non-conditional one diff --git a/arch/Kconfig b/arch/Kconfig index 3db1e7a13335..94802f909d95 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -672,6 +672,9 @@ config LLVM_POLLY maximum cache locality. This requires an LLVM toolchain explicitly compiled with Polly support. +config GCC_GRAPHITE + bool "Enable GCC's optimizations using the polyhedral model (Graphite)" + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help From 83f537d8fff421edb802a8b29c9209035538a30c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 29 May 2020 10:52:09 -0700 Subject: [PATCH 421/439] BACKPORT: arm64: vdso: use $(LD) instead of $(CC) to link VDSO We use $(LD) to link vmlinux, modules, decompressors, etc. VDSO is the only exceptional case where $(CC) is used as the linker driver, but I do not know why we need to do so. VDSO uses a special linker script, and does not link standard libraries at all. I changed the Makefile to use $(LD) rather than $(CC). I tested this, and VDSO worked for me. Users will be able to use their favorite linker (e.g. lld instead of of bfd) by passing LD= from the command line. My plan is to rewrite all VDSO Makefiles to use $(LD), then delete cc-ldoption. Signed-off-by: Masahiro Yamada Signed-off-by: Will Deacon Change-Id: I8a14d6dd51d46b6942e68720e24217d1564b7869 [nd: conflicts due to ANDROID patches for LTO and SCS] (cherry picked from commit 691efbedc60d2a7364a90e38882fc762f06f52c4) Bug: 153418016 Bug: 157279372 Signed-off-by: Nick Desaulniers (cherry picked from commit 64ea9b4b072b37bd624dd98b963161fd22c1be34) --- arch/arm64/kernel/vdso/Makefile | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index ef3f9d9d4062..83cf80d64add 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,18 +12,13 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ccflags-y := -shared -fno-common -fno-builtin -ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ + $(call ld-option, --hash-style=sysv) -n -T ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code GCOV_PROFILE := n -# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared -# down to collect2, resulting in silent corruption of the vDSO image. -ccflags-y += -Wl,-shared - obj-y += vdso.o extra-y += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) @@ -33,7 +28,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) - $(call if_changed,vdsold) + $(call if_changed,ld) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -55,8 +50,6 @@ $(obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< From d8f26291957a060c09190f3fff0e15144afa920f Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 29 May 2020 10:54:25 -0700 Subject: [PATCH 422/439] BACKPORT: arm64: vdso: Explicitly add build-id option Commit 691efbedc60d ("arm64: vdso: use $(LD) instead of $(CC) to link VDSO") switched to using LD explicitly. The --build-id option needs to be passed explicitly, similar to x86. Add this option. Fixes: 691efbedc60d ("arm64: vdso: use $(LD) instead of $(CC) to link VDSO") Reviewed-by: Masahiro Yamada Signed-off-by: Laura Abbott [will: drop redundant use of 'call ld-option' as requested by Masahiro] Signed-off-by: Will Deacon Change-Id: I4a0f5c1bb60bda682221a7ff96a783bf8731cc00 [nd: conflict due to ANDROID LTO and CFI] (cherry picked from commit 7a0a93c51799edc45ee57c6cc1679aa94f1e03d5) Bug: 153418016 Bug: 157279372 Signed-off-by: Nick Desaulniers (cherry picked from commit a9ee8bba814d956404c12b1c2e2c24cf4b710f08) --- arch/arm64/kernel/vdso/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 83cf80d64add..01d6f496d1eb 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ - $(call ld-option, --hash-style=sysv) -n -T +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + --build-id -n -T ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code From ad1e90188fff8bb951f7c66fae1d05ece9267dd0 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 4 Jun 2020 11:49:55 -0700 Subject: [PATCH 423/439] ANDROID: arm64: vdso: wrap -n in ld-option ld.lld distributed in clang-r353983c AOSP LLVM release (the final AOSP LLVM release for Android Q) did not support `-n` linker flag. It was eventually added to clang-r360593. Android OEM's may wish to still use ld.lld to link their kernels for Q. This flag was disabled for Pixel 4 in go/pag/1258086. This patch is equivalent, but rebased on upstream changes that removed cc-ldoption in favor of ld-option. For Android R, the final AOSP LLVM release, clang-r383902 has long supported `-n` for ld.lld. Change-Id: Iab41c9e1039e163113b428fc487a4a0708822faa Bug: 63740206 Bug: 157279372 Link: https://github.com/ClangBuiltLinux/linux/issues/340 Link: https://bugs.llvm.org/show_bug.cgi?id=40542 Signed-off-by: Nick Desaulniers --- arch/arm64/kernel/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 01d6f496d1eb..b2c1695c4d4c 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -13,7 +13,7 @@ targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ - --build-id -n -T + --build-id $(call ld-option,-n) -T ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code From 8189c16749c250ed4a75c5b2dfdb615deba8c310 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 16 Mar 2018 11:25:08 -0700 Subject: [PATCH 424/439] mm: add config for readahead window Change VM_MAX_READAHEAD value from the default 128KB to a configurable value. This will allow the readahead window to grow to a maximum size bigger than 128KB during boot, which could benefit to sequential read throughput and thus boot performance. Signed-off-by: Wei Wang --- include/linux/mm.h | 2 +- mm/Kconfig | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 1f3df994812f..cf7774a0d808 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2247,7 +2247,7 @@ int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ -#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MAX_READAHEAD CONFIG_VM_MAX_READAHEAD_KB #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ extern int mmap_readaround_limit; diff --git a/mm/Kconfig b/mm/Kconfig index c5dd9c30fd6f..29e60847251b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -764,6 +764,14 @@ config PERCPU_STATS information includes global and per chunk statistics, which can be used to help understand percpu memory usage. +config VM_MAX_READAHEAD_KB + int "Default max readahead window size in Kilobytes" + default 128 + help + This sets the VM_MAX_READAHEAD value to allow the readahead window + to grow to a maximum size of configured. Increasing this value will + benefit sequential read throughput. + config INCREASE_MAXIMUM_SWAPPINESS bool "Allow swappiness to be set up to 200" depends on SWAP From 1aed3b24202fc1d96934fc73d012dec5860b634b Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Wed, 29 Jan 2020 16:04:42 -0800 Subject: [PATCH 425/439] ANDROID: kallsyms: strip hashes from function names with ThinLTO With CONFIG_THINLTO and CFI both enabled, LLVM appends a hash to the names of all static functions. This breaks userspace tools, so strip out the hash from output. Bug: 147422318 Change-Id: Ibea6be089d530e92dcd191481cb02549041203f6 Signed-off-by: Sami Tolvanen --- kernel/kallsyms.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index f8674e39e770..ea06361d6bfc 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -352,6 +352,12 @@ static inline void cleanup_symbol_name(char *s) { char *res; +#ifdef CONFIG_THINLTO + /* Filter out hashes from static functions */ + res = strrchr(s, '$'); + if (res) + *res = '\0'; +#endif res = strrchr(s, '.'); if (res && !strcmp(res, ".cfi")) *res = '\0'; From 3bd1fe46347e3c4564e73c0e419d439fc3f6ca66 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 7 Oct 2020 01:48:39 +0300 Subject: [PATCH 426/439] arm64/boot/dts: remove verify,avb fsmgr_flags Signed-off-by: Denis Efremov --- arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi | 6 +++--- arch/arm64/boot/dts/exynos/exynos9820.dts | 6 +++--- arch/arm64/boot/dts/exynos/exynos9820_evt0.dts | 6 +++--- arch/arm64/boot/dts/exynos/exynos9825.dts | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi b/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi index d42ad40e1083..034e24a0da28 100644 --- a/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi @@ -66,21 +66,21 @@ dev = "/dev/block/platform/11120000.ufs/by-name/SYSTEM"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait"; }; vendor { compatible = "android,vendor"; dev = "/dev/block/platform/11120000.ufs/by-name/VENDOR"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait"; }; odm { compatible = "android,odm"; dev = "/dev/block/platform/11120000.ufs/by-name/ODM"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait"; }; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos9820.dts b/arch/arm64/boot/dts/exynos/exynos9820.dts index ea6d8b8087ae..dd71b2d8e253 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820.dts +++ b/arch/arm64/boot/dts/exynos/exynos9820.dts @@ -5318,7 +5318,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/system"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait, avb"; + fsmgr_flags = "wait"; status = "disabled"; }; @@ -5327,7 +5327,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/vendor"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; @@ -5336,7 +5336,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/product"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts b/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts index 138af210d6d2..c9bec066f2e1 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts +++ b/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts @@ -251,7 +251,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/system"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait, avb"; + fsmgr_flags = "wait"; status = "disabled"; }; vendor { @@ -259,7 +259,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/vendor"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; product { @@ -267,7 +267,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/product"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos9825.dts b/arch/arm64/boot/dts/exynos/exynos9825.dts index 1ee08b451abb..edfc49362474 100644 --- a/arch/arm64/boot/dts/exynos/exynos9825.dts +++ b/arch/arm64/boot/dts/exynos/exynos9825.dts @@ -5318,7 +5318,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/system"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait, avb"; + fsmgr_flags = "wait"; status = "disabled"; }; @@ -5327,7 +5327,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/vendor"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; @@ -5336,7 +5336,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/product"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; }; From 9c80166e469a94a358def8863c195ac511c482d3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 Nov 2020 13:56:54 +0300 Subject: [PATCH 427/439] arm64/boot/dts: disable vbmeta Signed-off-by: Denis Efremov --- arch/arm64/boot/dts/exynos/exynos9820.dts | 5 ----- arch/arm64/boot/dts/exynos/exynos9825.dts | 5 ----- 2 files changed, 10 deletions(-) diff --git a/arch/arm64/boot/dts/exynos/exynos9820.dts b/arch/arm64/boot/dts/exynos/exynos9820.dts index dd71b2d8e253..51114b10a57d 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820.dts +++ b/arch/arm64/boot/dts/exynos/exynos9820.dts @@ -5305,11 +5305,6 @@ android { compatible = "android,firmware"; - vbmeta { - compatible = "android,vbmeta"; - parts = "vbmeta,boot,recovery,system,vendor,product,dtb,dtbo,keystorage"; - }; - fstab { compatible = "android,fstab"; diff --git a/arch/arm64/boot/dts/exynos/exynos9825.dts b/arch/arm64/boot/dts/exynos/exynos9825.dts index edfc49362474..eefc4a0890da 100644 --- a/arch/arm64/boot/dts/exynos/exynos9825.dts +++ b/arch/arm64/boot/dts/exynos/exynos9825.dts @@ -5305,11 +5305,6 @@ android { compatible = "android,firmware"; - vbmeta { - compatible = "android,vbmeta"; - parts = "vbmeta,boot,recovery,system,vendor,product,dtb,dtbo,keystorage"; - }; - fstab { compatible = "android,fstab"; From 458d0adba9d4bef8d3d1bbaeef631b42195ee798 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 8 Apr 2017 17:37:26 -0700 Subject: [PATCH 428/439] afs: Fix const confusion in AFS A trace point string cannot be const because the underlying special section is not marked const. An LTO build complains about the section attribute mismatch. Fix it by not marking the trace point string in afs const. Cc: dhowells@redhat.com Signed-off-by: Andi Kleen --- fs/afs/cmservice.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index c7475867a52b..fdcda968d4b6 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -31,7 +31,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *); static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); #define CM_NAME(name) \ - const char afs_SRXCB##name##_name[] __tracepoint_string = \ + char afs_SRXCB##name##_name[] __tracepoint_string = \ "CB." #name /* From 31133031198f155a2dcb0859ff49a91369b322b2 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 10 Aug 2012 03:11:20 +0200 Subject: [PATCH 429/439] locking/spinlocks: Mark spinlocks noinline when inline spinlocks are disabled Otherwise LTO will inline them anyways and cause a large kernel text increase. Since the explicit intention here is to not inline them marking them noinline is good documentation even for the non LTO case. Signed-off-by: Andi Kleen --- kernel/locking/spinlock.c | 56 +++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 6e40fdfba326..f32ba58fcd70 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -131,7 +131,7 @@ BUILD_LOCK_OPS(write, rwlock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK -int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) +noinline int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { return __raw_spin_trylock(lock); } @@ -139,7 +139,7 @@ EXPORT_SYMBOL(_raw_spin_trylock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH -int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) +noinline int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) { return __raw_spin_trylock_bh(lock); } @@ -147,7 +147,7 @@ EXPORT_SYMBOL(_raw_spin_trylock_bh); #endif #ifndef CONFIG_INLINE_SPIN_LOCK -void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { __raw_spin_lock(lock); } @@ -155,7 +155,7 @@ EXPORT_SYMBOL(_raw_spin_lock); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) +noinline unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) { return __raw_spin_lock_irqsave(lock); } @@ -163,7 +163,7 @@ EXPORT_SYMBOL(_raw_spin_lock_irqsave); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ -void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) { __raw_spin_lock_irq(lock); } @@ -171,7 +171,7 @@ EXPORT_SYMBOL(_raw_spin_lock_irq); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_BH -void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) { __raw_spin_lock_bh(lock); } @@ -179,7 +179,7 @@ EXPORT_SYMBOL(_raw_spin_lock_bh); #endif #ifdef CONFIG_UNINLINE_SPIN_UNLOCK -void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { __raw_spin_unlock(lock); } @@ -187,7 +187,7 @@ EXPORT_SYMBOL(_raw_spin_unlock); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) +noinline void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { __raw_spin_unlock_irqrestore(lock, flags); } @@ -195,7 +195,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ -void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) { __raw_spin_unlock_irq(lock); } @@ -203,7 +203,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_irq); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH -void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) { __raw_spin_unlock_bh(lock); } @@ -211,7 +211,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif #ifndef CONFIG_INLINE_READ_TRYLOCK -int __lockfunc _raw_read_trylock(rwlock_t *lock) +noinline int __lockfunc _raw_read_trylock(rwlock_t *lock) { return __raw_read_trylock(lock); } @@ -219,7 +219,7 @@ EXPORT_SYMBOL(_raw_read_trylock); #endif #ifndef CONFIG_INLINE_READ_LOCK -void __lockfunc _raw_read_lock(rwlock_t *lock) +noinline void __lockfunc _raw_read_lock(rwlock_t *lock) { __raw_read_lock(lock); } @@ -227,7 +227,7 @@ EXPORT_SYMBOL(_raw_read_lock); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE -unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) +noinline unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) { return __raw_read_lock_irqsave(lock); } @@ -235,7 +235,7 @@ EXPORT_SYMBOL(_raw_read_lock_irqsave); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQ -void __lockfunc _raw_read_lock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_read_lock_irq(rwlock_t *lock) { __raw_read_lock_irq(lock); } @@ -243,7 +243,7 @@ EXPORT_SYMBOL(_raw_read_lock_irq); #endif #ifndef CONFIG_INLINE_READ_LOCK_BH -void __lockfunc _raw_read_lock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_read_lock_bh(rwlock_t *lock) { __raw_read_lock_bh(lock); } @@ -251,7 +251,7 @@ EXPORT_SYMBOL(_raw_read_lock_bh); #endif #ifndef CONFIG_INLINE_READ_UNLOCK -void __lockfunc _raw_read_unlock(rwlock_t *lock) +noinline void __lockfunc _raw_read_unlock(rwlock_t *lock) { __raw_read_unlock(lock); } @@ -259,7 +259,7 @@ EXPORT_SYMBOL(_raw_read_unlock); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +noinline void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { __raw_read_unlock_irqrestore(lock, flags); } @@ -267,7 +267,7 @@ EXPORT_SYMBOL(_raw_read_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ -void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) { __raw_read_unlock_irq(lock); } @@ -275,7 +275,7 @@ EXPORT_SYMBOL(_raw_read_unlock_irq); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_BH -void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) { __raw_read_unlock_bh(lock); } @@ -283,7 +283,7 @@ EXPORT_SYMBOL(_raw_read_unlock_bh); #endif #ifndef CONFIG_INLINE_WRITE_TRYLOCK -int __lockfunc _raw_write_trylock(rwlock_t *lock) +noinline int __lockfunc _raw_write_trylock(rwlock_t *lock) { return __raw_write_trylock(lock); } @@ -291,7 +291,7 @@ EXPORT_SYMBOL(_raw_write_trylock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK -void __lockfunc _raw_write_lock(rwlock_t *lock) +noinline void __lockfunc _raw_write_lock(rwlock_t *lock) { __raw_write_lock(lock); } @@ -299,7 +299,7 @@ EXPORT_SYMBOL(_raw_write_lock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) +noinline unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) { return __raw_write_lock_irqsave(lock); } @@ -307,7 +307,7 @@ EXPORT_SYMBOL(_raw_write_lock_irqsave); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ -void __lockfunc _raw_write_lock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_write_lock_irq(rwlock_t *lock) { __raw_write_lock_irq(lock); } @@ -315,7 +315,7 @@ EXPORT_SYMBOL(_raw_write_lock_irq); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_BH -void __lockfunc _raw_write_lock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_write_lock_bh(rwlock_t *lock) { __raw_write_lock_bh(lock); } @@ -323,7 +323,7 @@ EXPORT_SYMBOL(_raw_write_lock_bh); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK -void __lockfunc _raw_write_unlock(rwlock_t *lock) +noinline void __lockfunc _raw_write_unlock(rwlock_t *lock) { __raw_write_unlock(lock); } @@ -331,7 +331,7 @@ EXPORT_SYMBOL(_raw_write_unlock); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +noinline void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { __raw_write_unlock_irqrestore(lock, flags); } @@ -339,7 +339,7 @@ EXPORT_SYMBOL(_raw_write_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ -void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) { __raw_write_unlock_irq(lock); } @@ -347,7 +347,7 @@ EXPORT_SYMBOL(_raw_write_unlock_irq); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH -void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) { __raw_write_unlock_bh(lock); } From 205192522739189cfae93272c8d42707bde8149b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 23 Nov 2017 16:06:26 -0800 Subject: [PATCH 430/439] Fix read buffer overflow in delta-ipc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The single caller passes a string to delta_ipc_open, which copies with a fixed size larger than the string. So it copies some random data after the original string the ro segment. If the string was at the end of a page it may fault. Just copy the string with a normal strcpy after clearing the field. Found by a LTO build (which errors out) because the compiler inlines the functions and can resolve the string sizes and triggers the compile time checks in memcpy. In function ‘memcpy’, inlined from ‘delta_ipc_open.constprop’ at linux/drivers/media/platform/sti/delta/delta-ipc.c:178:0, inlined from ‘delta_mjpeg_ipc_open’ at linux/drivers/media/platform/sti/delta/delta-mjpeg-dec.c:227:0, inlined from ‘delta_mjpeg_decode’ at linux/drivers/media/platform/sti/delta/delta-mjpeg-dec.c:403:0: /home/andi/lsrc/linux/include/linux/string.h:337:0: error: call to ‘__read_overflow2’ declared with attribute error: detected read beyond size of object passed as 2nd parameter __read_overflow2(); Cc: hugues.fruchet@st.com Cc: mchehab@s-opensource.com Signed-off-by: Andi Kleen --- drivers/media/platform/sti/delta/delta-ipc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/sti/delta/delta-ipc.c index 41e4a4c259b3..b6c256e3ceb6 100644 --- a/drivers/media/platform/sti/delta/delta-ipc.c +++ b/drivers/media/platform/sti/delta/delta-ipc.c @@ -175,8 +175,8 @@ int delta_ipc_open(struct delta_ctx *pctx, const char *name, msg.ipc_buf_size = ipc_buf_size; msg.ipc_buf_paddr = ctx->ipc_buf->paddr; - memcpy(msg.name, name, sizeof(msg.name)); - msg.name[sizeof(msg.name) - 1] = 0; + memset(msg.name, 0, sizeof(msg.name)); + strcpy(msg.name, name); msg.param_size = param->size; memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size); From 8f2a75a328c1628527b29e9b9455f75b18df32b7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 31 Mar 2015 03:45:39 -0700 Subject: [PATCH 431/439] lto: Add __noreorder and mark initcalls __noreorder gcc 5 has a new no_reorder attribute that prevents top level reordering only for that symbol. Kernels don't like any reordering of initcalls between files, as several initcalls depend on each other. LTO previously needed to use -fno-toplevel-reordering to prevent boot failures. Add a __noreorder wrapper for the no_reorder attribute and use it for initcalls. Signed-off-by: Andi Kleen --- include/linux/compiler-gcc.h | 5 +++++ include/linux/compiler_types.h | 3 +++ include/linux/init.h | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 6d7ead22c1b4..4a8308c74bfc 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -357,6 +357,11 @@ #define __no_sanitize_address #endif +#if __GNUC__ >= 5 +/* Avoid reordering a top level statement */ +#define __noreorder __attribute__((no_reorder)) +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index a207f820d3b0..2da2ba2fd954 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -284,6 +284,9 @@ struct ftrace_likely_data { #define __assume_aligned(a, ...) #endif +#ifndef __noreorder +#define __noreorder +#endif /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type diff --git a/include/linux/init.h b/include/linux/init.h index f5fd059a6e76..6a8fa1e0cadf 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -179,7 +179,7 @@ extern bool initcall_debug; */ #define __define_initcall(fn, id) \ - static initcall_t __initcall_name(fn, id) __used \ + static initcall_t __initcall_name(fn, id) __used __noreorder \ __attribute__((__section__(".initcall" #id ".init"))) = fn; /* From 0d3db18c92572b620814ca1b8d80ba9dcc04d913 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 27 Jun 2014 23:11:22 +0200 Subject: [PATCH 432/439] lto, workaround: Disable LTO for BPF Disable LTO for the BPF interpreter. This works around a gcc bug in the LTO partitioner that partitions the jumptable used the BPF interpreter into a different LTO unit. This in term causes assembler errors because the jump table contains references to the code labels in the original file. gcc problem tracked in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=50676 Signed-off-by: Andi Kleen --- kernel/bpf/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index af3ab6164ff5..63ac9f5320b1 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -13,3 +13,8 @@ ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif obj-$(CONFIG_CGROUP_BPF) += cgroup.o + +# various version of gcc have a LTO bug where the &&labels used in the +# BPF interpreter can cause linker errors when spread incorrectly over +# partitions. Disable LTO for BPF for now +CFLAGS_core.o = $(DISABLE_LTO) From 6d282653645b609239032a0902d100391dfef0f1 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Wed, 29 Nov 2017 17:03:03 +0300 Subject: [PATCH 433/439] arm64: cpu_ops: Add missing 'const' qualifiers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Building the kernel with an LTO-enabled GCC spits out the following "const" warning for the cpu_ops code: mm/percpu.c:2168:20: error: pcpu_fc_names causes a section type conflict with dt_supported_cpu_ops const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { ^ arch/arm64/kernel/cpu_ops.c:34:37: note: ‘dt_supported_cpu_ops’ was declared here static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { Fix it by adding missed const qualifiers. Signed-off-by: Yury Norov Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_ops.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index d16978213c5b..ea001241bdd4 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -31,13 +31,13 @@ extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; -static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { +static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = { &smp_spin_table_ops, &cpu_psci_ops, NULL, }; -static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = { +static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = { #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL &acpi_parking_protocol_ops, #endif @@ -47,7 +47,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = { static const struct cpu_operations * __init cpu_get_ops(const char *name) { - const struct cpu_operations **ops; + const struct cpu_operations *const *ops; ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops; From be11908c2de2f3b6b416ebc71a39b5a780521cb8 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 25 Nov 2020 12:55:50 +0300 Subject: [PATCH 434/439] Makefile: add mcpu option Signed-off-by: Denis Efremov --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 3c1f63666619..d49b5b07be71 100644 --- a/Makefile +++ b/Makefile @@ -738,6 +738,12 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif +ifdef CONFIG_SOC_EXYNOS9820 +ifeq ($(cc-name),gcc) +KBUILD_CFLAGS += $(call cc-option,-mcpu=cortex-a75.cortex-a55,$(call cc-option,-mcpu=cortex-a55)) +endif +endif + ifeq ($(cc-name),clang) ifdef CONFIG_LLVM_POLLY KBUILD_CFLAGS += -mllvm -polly \ From 9f60875185906abd83ef5b609f4ea45239bfcb6e Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 9 Dec 2020 13:34:10 +0300 Subject: [PATCH 435/439] Kbuild: Add Link Time Optimization (LTO) support Based on Andi Kleen work. Signed-off-by: Denis Efremov --- Makefile | 65 ++++++++++++++++++++++++++++++++++++++-- arch/Kconfig | 60 +++++++++++++++++++++++++++++++++++++ arch/arm64/Kconfig | 1 + crypto/Makefile | 1 + lib/Kconfig.debug | 2 +- scripts/Makefile.modpost | 3 +- scripts/gcc-ld | 9 ++++-- scripts/link-vmlinux.sh | 6 ++++ 8 files changed, 141 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index d49b5b07be71..0555de65344a 100644 --- a/Makefile +++ b/Makefile @@ -713,6 +713,26 @@ LDFLAGS += -plugin LLVMgold.so LLVM_AR := llvm-ar LLVM_DIS := llvm-dis export LLVM_AR LLVM_DIS +else ifdef CONFIG_LTO_GCC +LDFLAGS_FINAL_vmlinux := -flto=jobserver -fuse-linker-plugin +LDFLAGS_FINAL_vmlinux += $(filter -g%, $(KBUILD_CFLAGS)) +LDFLAGS_FINAL_vmlinux += -fno-fat-lto-objects +LDFLAGS_FINAL_vmlinux += $(call cc-disable-warning,attribute-alias,) +LDFLAGS_FINAL_vmlinux += -Xassembler -Idrivers/misc/tzdev +ifdef CONFIG_LTO_DEBUG + LDFLAGS_FINAL_vmlinux += -fdump-ipa-cgraph -fdump-ipa-inline-details + # add for debugging compiler crashes: + # LDFLAGS_FINAL_vmlinux += -dH -save-temps +endif +ifdef CONFIG_LTO_CP_CLONE + LDFLAGS_FINAL_vmlinux += -fipa-cp-clone +endif +LDFLAGS_FINAL_vmlinux += -Wno-lto-type-mismatch -Wno-psabi +LDFLAGS_FINAL_vmlinux += -Wno-stringop-overflow -flinker-output=nolto-rel + +LDFINAL_vmlinux := ${CONFIG_SHELL} ${srctree}/scripts/gcc-ld +AR := $(CROSS_COMPILE)gcc-ar +NM := $(CROSS_COMPILE)gcc-nm endif # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default @@ -900,13 +920,23 @@ lto-clang-flags := -flto -fvisibility=hidden # allow disabling only clang LTO where needed DISABLE_LTO_CLANG := -fno-lto -fvisibility=default export DISABLE_LTO_CLANG +else ifdef CONFIG_LTO_GCC +lto-gcc-flags := -flto -fno-fat-lto-objects +lto-gcc-flags += $(call cc-disable-warning,attribute-alias,) + +ifdef CONFIG_LTO_CP_CLONE +lto-gcc-flags += -fipa-cp-clone +endif + +DISABLE_LTO_GCC := -fno-lto +export DISABLE_LTO_GCC endif ifdef CONFIG_LTO -lto-flags := $(lto-clang-flags) +lto-flags := $(lto-clang-flags) $(lto-gcc-flags) KBUILD_CFLAGS += $(lto-flags) -DISABLE_LTO := $(DISABLE_LTO_CLANG) +DISABLE_LTO := $(DISABLE_LTO_CLANG) $(DISABLE_LTO_GCC) export DISABLE_LTO # LDFINAL_vmlinux and LDFLAGS_FINAL_vmlinux can be set to override @@ -1309,6 +1339,22 @@ ifneq ($(findstring y,$(shell $(CONFIG_SHELL) \ endif endif +# Disable gcc-specific config options when using a different compiler +gcc-specific-configs := LTO_GCC + +PHONY += check-gcc-specific-options +check-gcc-specific-options: $(KCONFIG_CONFIG) FORCE +ifneq ($(cc-name),gcc) +ifneq ($(findstring y,$(shell $(CONFIG_SHELL) \ + $(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(gcc-specific-configs),-s $(c)))),) + @echo WARNING: Disabling gcc-specific options with $(cc-name) >&2 + $(Q)$(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(gcc-specific-configs),-d $(c)) && \ + $(MAKE) -f $(srctree)/Makefile olddefconfig +endif +endif + # Check for CONFIG flags that require compiler support. Abort the build # after .config has been processed, but before the kernel build starts. # @@ -1327,6 +1373,21 @@ ifdef CONFIG_LTO_CLANG ifneq ($(call gold-ifversion, -ge, 112000000, y), y) @echo Cannot use CONFIG_LTO_CLANG: requires GNU gold 1.12 or later >&2 && exit 1 endif +else ifdef CONFIG_LTO_GCC + ifdef CONFIG_UBSAN + ifeq ($(call gcc-ifversion,-lt,0600,y),y) + @echo Cannot use CONFIG_LTO_GCC with UBSAN: >= gcc 6.x required >&2 && exit 1 + endif + endif + ifeq ($(shell if test `ulimit -n` -lt 4000 ; then echo yes ; fi),yes) + @echo File descriptor limit too low. Increase with ulimit -n >&2 && exit 1 + endif + ifeq ($(call gcc-ifversion, -lt, 0500,y),y) + @echo Cannot use CONFIG_LTO_GCC: requires gcc 5.0 or later >&2 && exit 1 + endif + ifeq ($(call ld-ifversion,-lt,227000000,y),y) + @echo Cannot use CONFIG_LTO_GCC: requires binutils 2.27 or later >&2 && exit 1 + endif endif # Make sure compiler supports LTO flags ifdef lto-flags diff --git a/arch/Kconfig b/arch/Kconfig index 94802f909d95..3857689caf10 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -607,6 +607,9 @@ config ARCH_SUPPORTS_LTO_CLANG - compiling inline assembly with clang's integrated assembler, - and linking with either lld or GNU gold w/ LLVMgold. +config ARCH_SUPPORTS_LTO_GCC + bool + choice prompt "Link-Time Optimization (LTO) (EXPERIMENTAL)" default LTO_NONE @@ -634,8 +637,65 @@ config LTO_CLANG 5.0 (make CC=clang) and GNU gold from binutils >= 2.27, and have the LLVMgold plug-in in LD_LIBRARY_PATH. +config LTO_GCC + bool "Enable gcc link time optimization (LTO)" + depends on ARCH_SUPPORTS_LTO_GCC + depends on !GCOV_KERNEL + depends on !MODVERSIONS + select LTO + select THIN_ARCHIVES + select LD_DEAD_CODE_DATA_ELIMINATION + # lto does not support excluding flags for specific files + # right now. Can be removed if that is fixed. + #depends on !FUNCTION_TRACER + help + With this option gcc will do whole program optimizations for + the whole kernel and module. This increases compile time, but can + lead to better code. It allows gcc to inline functions between + different files and do other optimization. It might also trigger + bugs due to more aggressive optimization. It allows gcc to drop unused + code. On smaller monolithic kernel configurations + it usually leads to smaller kernels, especially when modules + are disabled. + + With this option gcc will also do some global checking over + different source files. It also disables a number of kernel + features. + + This option is recommended for release builds. With LTO + the kernel always has to be re-optimized (but not re-parsed) + on each build. + + This requires a gcc 6.0 or later compiler. + + On larger configurations this may need more than 4GB of RAM. + It will likely not work on those with a 32bit compiler. + + When the toolchain support is not available this will (hopefully) + be automatically disabled. + + For more information see Documentation/lto-build endchoice +config LTO_DEBUG + bool "Enable LTO compile time debugging" + depends on LTO_GCC + help + Enable LTO debugging in the compiler. The compiler dumps + some log files that make it easier to figure out LTO + behavior. The log files also allow to reconstruct + the global inlining and a global callgraph. + They however add some (single threaded) cost to the + compilation. When in doubt do not enable. + +config LTO_CP_CLONE + bool "Allow aggressive cloning for function specialization" + depends on LTO_GCC + help + Allow the compiler to clone and specialize functions for specific + arguments when it determines these arguments are very commonly + called. Experimential. Will increase text size. + config CFI bool diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 94683e02421d..7455241ac0b4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -25,6 +25,7 @@ config ARM64 select ARCH_USE_CMPXCHG_LOCKREF select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_LTO_CLANG + select ARCH_SUPPORTS_LTO_GCC select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_WANT_COMPAT_IPC_PARSE_VERSION diff --git a/crypto/Makefile b/crypto/Makefile index 038ed7b25dfa..afdb6e88c274 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -126,6 +126,7 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o +CFLAGS_cast_common.o += $(DISABLE_LTO) obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o obj-$(CONFIG_CRYPTO_CAST6) += cast6_generic.o diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ccd0e952f182..aaa7c5d4ae61 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -245,7 +245,7 @@ config STRIP_ASM_SYMS config READABLE_ASM bool "Generate readable assembler code" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !LTO help Disable some compiler optimizations that tend to generate human unreadable assembler output. This may make the kernel slightly slower, but it helps diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index acd7ea0b5f26..2d1b4cceb290 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -79,7 +79,8 @@ modpost = scripts/mod/modpost \ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ - $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) + $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \ + $(if $(CONFIG_LTO),-w) MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS))) diff --git a/scripts/gcc-ld b/scripts/gcc-ld index 997b818c3962..e552259bbe07 100755 --- a/scripts/gcc-ld +++ b/scripts/gcc-ld @@ -8,17 +8,20 @@ ARGS="-nostdlib" while [ "$1" != "" ] ; do case "$1" in - -save-temps|-m32|-m64) N="$1" ;; + -save-temps*|-m32|-m64) N="$1" ;; -r) N="$1" ;; + -flinker-output*) N="$1" ;; -[Wg]*) N="$1" ;; -[olv]|-[Ofd]*|-nostdlib) N="$1" ;; - --end-group|--start-group) + --end-group|--start-group|--whole-archive|--no-whole-archive) N="-Wl,$1" ;; -[RTFGhIezcbyYu]*|\ --script|--defsym|-init|-Map|--oformat|-rpath|\ -rpath-link|--sort-section|--section-start|-Tbss|-Tdata|-Ttext|\ --version-script|--dynamic-list|--version-exports-symbol|--wrap|-m) A="$1" ; shift ; N="-Wl,$A,$1" ;; + -maarch64elf) N="-Wl,$1" ;; + -Xassembler) shift ; N="-Xassembler $1" ;; -[m]*) N="$1" ;; -*) N="-Wl,$1" ;; *) N="$1" ;; @@ -27,4 +30,6 @@ while [ "$1" != "" ] ; do shift done +[ -n "$V" ] && echo >&2 $CC $ARGS + exec $CC $ARGS diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index ba25de7025fc..6591496fe845 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -120,6 +120,12 @@ modpost_link() # This might take a while, so indicate that we're doing # an LTO link info LTO vmlinux.o + elif [ -n "${CONFIG_LTO_GCC}" ]; then + if [ -n "${LDFINAL_vmlinux}" ]; then + LD=${LDFINAL_vmlinux} + LDFLAGS="${LDFLAGS_FINAL_vmlinux} ${LDFLAGS}" + fi + info LDFINAL vmlinux.o else info LD vmlinux.o fi From 1e3ff2b1bce56f7130ca6b7a4c09564f71007548 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 May 2022 21:35:57 +0400 Subject: [PATCH 436/439] drivers/net/wireless/broadcom: drop -Wno-sometimes-uninitialized Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd/Makefile | 2 -- drivers/net/wireless/broadcom/bcmdhd/Makefile.kk | 2 -- drivers/net/wireless/broadcom/bcmdhd/Makefile.lp | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp | 2 -- drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile | 7 ------- 10 files changed, 25 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd/Makefile b/drivers/net/wireless/broadcom/bcmdhd/Makefile index 2b1c18d4fa02..f0e0f6240315 100644 --- a/drivers/net/wireless/broadcom/bcmdhd/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd/Makefile @@ -1366,11 +1366,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk b/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk index ac9d6438e6bb..a1f8644798dc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk +++ b/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk @@ -1331,11 +1331,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp b/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp index 5ffbdc0aa5de..c9cbecb8b477 100644 --- a/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp +++ b/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp @@ -1354,11 +1354,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile index 97534f4483b6..0256c044264d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile @@ -1345,11 +1345,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk index d0ebd62b9cd0..d175529f8f46 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk @@ -1310,11 +1310,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp index 89271234981f..16e7d0623472 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp @@ -1333,11 +1333,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile index 2660b26bfc8e..cd6983d5df6a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile @@ -1555,11 +1555,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk index 4e689aadee3f..02c7fff2a748 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk @@ -1520,11 +1520,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp index 5354514c6c4f..5d6a89cf451a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp @@ -1543,11 +1543,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index c311e7a849c8..84bcbef79a5b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -1836,31 +1836,24 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS9830),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS2100),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS1000),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_KONA),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_LAHAINA),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS9110),y) DHDCFLAGS += -Wno-unused-const-variable From d939fd7b5febbea82d2ad375f91fa49990321cb0 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 24 May 2022 21:36:53 +0400 Subject: [PATCH 437/439] drivers/net/wireless/broadcom/bcmdhd_101_16: drop -Werror Signed-off-by: Denis Efremov --- drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index 84bcbef79a5b..c9fb91b814a4 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -25,7 +25,7 @@ DHDCFLAGS += -DBCMUTILS_ERR_CODES -DUSE_NEW_RSPEC_DEFS DHDCFLAGS += -Dlinux -D__linux__ -DLINUX -DHDCFLAGS += -Wall -Werror -Wstrict-prototypes -DBCMDRIVER \ +DHDCFLAGS += -Wall -Wstrict-prototypes -DBCMDRIVER \ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ -DDHDTHREAD -DDHD_BCMEVENTS -DSHOW_EVENTS -DWLP2P \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DKEEP_ALIVE -DCSCAN \ From 90bd6d466fb1a9cc17628590e06fcba3b3c7e335 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 19 Apr 2023 22:50:29 +0400 Subject: [PATCH 438/439] fixup! initramfs: integrate magiskinit64 Signed-off-by: Denis Efremov --- usr/magisk/initramfs_list | 1 + usr/magisk/update_magisk.sh | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/usr/magisk/initramfs_list b/usr/magisk/initramfs_list index 6fc1b73bfd2d..a63387d9e8cb 100644 --- a/usr/magisk/initramfs_list +++ b/usr/magisk/initramfs_list @@ -5,3 +5,4 @@ dir /overlay.d 0750 0 0 dir /overlay.d/sbin 0750 0 0 file /overlay.d/sbin/magisk32.xz usr/magisk/magisk32.xz 0644 0 0 file /overlay.d/sbin/magisk64.xz usr/magisk/magisk64.xz 0644 0 0 +file /overlay.d/sbin/stub.xz usr/magisk/stub.xz 0644 0 0 diff --git a/usr/magisk/update_magisk.sh b/usr/magisk/update_magisk.sh index 7bf57a36b086..305fb68245b1 100755 --- a/usr/magisk/update_magisk.sh +++ b/usr/magisk/update_magisk.sh @@ -39,6 +39,12 @@ then mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" mv -f "$DIR/lib/armeabi-v7a/libmagisk64.so" "$DIR/magisk64" xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" + elif unzip -o "$DIR/magisk.zip" lib/arm64-v8a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/arm64-v8a/libmagisk64.so assets/stub.apk -d "$DIR"; then + mv -f "$DIR/lib/arm64-v8a/libmagiskinit.so" "$DIR/magiskinit" + mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" + mv -f "$DIR/lib/arm64-v8a/libmagisk64.so" "$DIR/magisk64" + mv -f "$DIR/assets/stub.apk" "$DIR/stub" + xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" "$DIR/stub" else unzip -o "$DIR/magisk.zip" lib/arm64-v8a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/arm64-v8a/libmagisk64.so -d "$DIR" mv -f "$DIR/lib/arm64-v8a/libmagiskinit.so" "$DIR/magiskinit" From d8f85eda536ddfb89b1f93fad39640243767e261 Mon Sep 17 00:00:00 2001 From: Nswa <49425472+Nswa@users.noreply.github.com> Date: Thu, 20 Apr 2023 05:13:15 +0800 Subject: [PATCH 439/439] default toolchain --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ffee8352331d..37ce02a6e106 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -3,7 +3,7 @@ name: Build on: [push] env: - TOOLCHAIN: cruel + TOOLCHAIN: default INSTALLER: yes jobs: @@ -13,7 +13,7 @@ jobs: strategy: fail-fast: false matrix: - model: [ "G970F,G973F,G975F", "N975F" ] + model: [ "G975F"] steps: - uses: actions/checkout@v3