summaryrefslogtreecommitdiffstats
path: root/Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c
diff options
context:
space:
mode:
Diffstat (limited to 'Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c')
-rw-r--r--Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c b/Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c
new file mode 100644
index 0000000..58eb990
--- /dev/null
+++ b/Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ----------------------------------------------------------------------
+ * Project: CMSIS NN Library
+ * Title: arm_softmax_q7.c
+ * Description: Q7 softmax function
+ *
+ * $Date: 09. October 2020
+ * $Revision: V.1.0.2
+ *
+ * Target Processor: Cortex-M cores
+ *
+ * -------------------------------------------------------------------- */
+
+#include "arm_nnfunctions.h"
+
+/**
+ * @ingroup groupNN
+ */
+
+/**
+ * @addtogroup Softmax
+ * @{
+ */
+
+/**
+ * @brief Q7 softmax function
+ * @param[in] vec_in pointer to input vector
+ * @param[in] dim_vec input vector dimention
+ * @param[out] p_out pointer to output vector
+ *
+ * @details
+ *
+ * Here, instead of typical natural logarithm e based softmax, we use
+ * 2-based softmax here, i.e.,:
+ *
+ * y_i = 2^(x_i) / sum(2^x_j)
+ *
+ * The relative output will be different here.
+ * But mathematically, the gradient will be the same
+ * with a log(2) scaling factor.
+ *
+ */
+
+void arm_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out)
+{
+ q31_t sum;
+ int16_t i;
+ uint8_t shift;
+ q15_t base;
+ base = -128;
+
+ /* We first search for the maximum */
+ for (i = 0; i < dim_vec; i++)
+ {
+ if (vec_in[i] > base)
+ {
+ base = vec_in[i];
+ }
+ }
+
+ /*
+ * So the base is set to max-8, meaning
+ * that we ignore really small values.
+ * anyway, they will be 0 after shrinking to q7_t.
+ */
+ base = base - (1 << 3);
+
+ sum = 0;
+
+ for (i = 0; i < dim_vec; i++)
+ {
+ shift = (uint8_t)__USAT(vec_in[i] - base, 3);
+ sum += 0x1 << shift;
+ }
+
+ /* This is effectively (0x1 << 20) / sum */
+ int output_base = (1 << 20) / sum;
+
+ for (i = 0; i < dim_vec; i++)
+ {
+
+ /* Here minimum value of 13+base-vec_in[i] will be 5 */
+ shift = (uint8_t)__USAT(13 + base - vec_in[i], 5);
+ p_out[i] = (q7_t)__SSAT((output_base >> shift), 8);
+ }
+}
+
+/**
+ * @} end of Softmax group
+ */