소스 검색

efDataPreprocessing preliminaryDataProcessing.c 韩正义 commit at 2021-03-29

韩正义 4 년 전
부모
커밋
2bfe700df5
1개의 변경된 파일195개의 추가작업 그리고 0개의 파일을 삭제
  1. 195 0
      efDataPreprocessing/monitoringDataProcessing/preliminaryDataProcessing.c

+ 195 - 0
efDataPreprocessing/monitoringDataProcessing/preliminaryDataProcessing.c

@@ -615,3 +615,198 @@ smp_send_reschedule(int cpu)
 
 void
 smp_send_stop(void)
+{
+	cpumask_t to_whom;
+	cpumask_copy(&to_whom, cpu_possible_mask);
+	cpumask_clear_cpu(smp_processor_id(), &to_whom);
+#ifdef DEBUG_IPI_MSG
+	if (hard_smp_processor_id() != boot_cpu_id)
+		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
+#endif
+	send_ipi_message(&to_whom, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+	send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+static void
+ipi_imb(void *ignored)
+{
+	imb();
+}
+
+void
+smp_imb(void)
+{
+	/* Must wait other processors to flush their icache before continue. */
+	if (on_each_cpu(ipi_imb, NULL, 1))
+		printk(KERN_CRIT "smp_imb: timed out\n");
+}
+EXPORT_SYMBOL(smp_imb);
+
+static void
+ipi_flush_tlb_all(void *ignored)
+{
+	tbia();
+}
+
+void
+flush_tlb_all(void)
+{
+	/* Although we don't have any data to pass, we do want to
+	   synchronize with the other processors.  */
+	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
+		printk(KERN_CRIT "flush_tlb_all: timed out\n");
+	}
+}
+
+#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
+
+static void
+ipi_flush_tlb_mm(void *x)
+{
+	struct mm_struct *mm = (struct mm_struct *) x;
+	if (mm == current->active_mm && !asn_locked())
+		flush_tlb_current(mm);
+	else
+		flush_tlb_other(mm);
+}
+
+void
+flush_tlb_mm(struct mm_struct *mm)
+{
+	preempt_disable();
+
+	if (mm == current->active_mm) {
+		flush_tlb_current(mm);
+		if (atomic_read(&mm->mm_users) <= 1) {
+			int cpu, this_cpu = smp_processor_id();
+			for (cpu = 0; cpu < NR_CPUS; cpu++) {
+				if (!cpu_online(cpu) || cpu == this_cpu)
+					continue;
+				if (mm->context[cpu])
+					mm->context[cpu] = 0;
+			}
+			preempt_enable();
+			return;
+		}
+	}
+
+	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
+		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
+	}
+
+	preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_mm);
+
+struct flush_tlb_page_struct {
+	struct vm_area_struct *vma;
+	struct mm_struct *mm;
+	unsigned long addr;
+};
+
+static void
+ipi_flush_tlb_page(void *x)
+{
+	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
+	struct mm_struct * mm = data->mm;
+
+	if (mm == current->active_mm && !asn_locked())
+		flush_tlb_current_page(mm, data->vma, data->addr);
+	else
+		flush_tlb_other(mm);
+}
+
+void
+flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	struct flush_tlb_page_struct data;
+	struct mm_struct *mm = vma->vm_mm;
+
+	preempt_disable();
+
+	if (mm == current->active_mm) {
+		flush_tlb_current_page(mm, vma, addr);
+		if (atomic_read(&mm->mm_users) <= 1) {
+			int cpu, this_cpu = smp_processor_id();
+			for (cpu = 0; cpu < NR_CPUS; cpu++) {
+				if (!cpu_online(cpu) || cpu == this_cpu)
+					continue;
+				if (mm->context[cpu])
+					mm->context[cpu] = 0;
+			}
+			preempt_enable();
+			return;
+		}
+	}
+
+	data.vma = vma;
+	data.mm = mm;
+	data.addr = addr;
+
+	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
+		printk(KERN_CRIT "flush_tlb_page: timed out\n");
+	}
+
+	preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+void
+flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+	/* On the Alpha we always flush the whole user tlb.  */
+	flush_tlb_mm(vma->vm_mm);
+}
+EXPORT_SYMBOL(flush_tlb_range);
+
+static void
+ipi_flush_icache_page(void *x)
+{
+	struct mm_struct *mm = (struct mm_struct *) x;
+	if (mm == current->active_mm && !asn_locked())
+		__load_new_mm_context(mm);
+	else
+		flush_tlb_other(mm);
+}
+
+void
+flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+			unsigned long addr, int len)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if ((vma->vm_flags & VM_EXEC) == 0)
+		return;
+
+	preempt_disable();
+
+	if (mm == current->active_mm) {
+		__load_new_mm_context(mm);
+		if (atomic_read(&mm->mm_users) <= 1) {
+			int cpu, this_cpu = smp_processor_id();
+			for (cpu = 0; cpu < NR_CPUS; cpu++) {
+				if (!cpu_online(cpu) || cpu == this_cpu)
+					continue;
+				if (mm->context[cpu])
+					mm->context[cpu] = 0;
+			}
+			preempt_enable();
+			return;
+		}
+	}
+
+	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
+		printk(KERN_CRIT "flush_icache_page: timed out\n");
+	}
+
+	preempt_enable();
+}