##// END OF EJS Templates
Removed error on fat32 library, seems now to be able navigate among sectors in...
Removed error on fat32 library, seems now to be able navigate among sectors in both directions. Improved SDLCD drawing performances by almost 1000x.

File last commit:

r41:27c5438a4566 dev_alexis
r68:104125d87b89 dev_alexis
Show More
arm_correlate_q15.c
714 lines | 21.9 KiB | text/x-c | CLexer
/* ----------------------------------------------------------------------
* Copyright (C) 2010 ARM Limited. All rights reserved.
*
* $Date: 15. July 2011
* $Revision: V1.0.10
*
* Project: CMSIS DSP Library
* Title: arm_correlate_q15.c
*
* Description: Correlation of Q15 sequences.
*
* Target Processor: Cortex-M4/Cortex-M3/Cortex-M0
*
* Version 1.0.10 2011/7/15
* Big Endian support added and Merged M0 and M3/M4 Source code.
*
* Version 1.0.3 2010/11/29
* Re-organized the CMSIS folders and updated documentation.
*
* Version 1.0.2 2010/11/11
* Documentation updated.
*
* Version 1.0.1 2010/10/05
* Production release and review comments incorporated.
*
* Version 1.0.0 2010/09/20
* Production release and review comments incorporated
*
* Version 0.0.7 2010/06/10
* Misra-C changes done
*
* -------------------------------------------------------------------- */
#include "arm_math.h"
/**
* @ingroup groupFilters
*/
/**
* @addtogroup Corr
* @{
*/
/**
* @brief Correlation of Q15 sequences.
* @param[in] *pSrcA points to the first input sequence.
* @param[in] srcALen length of the first input sequence.
* @param[in] *pSrcB points to the second input sequence.
* @param[in] srcBLen length of the second input sequence.
* @param[out] *pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1.
* @return none.
*
* @details
* <b>Scaling and Overflow Behavior:</b>
*
* \par
* The function is implemented using a 64-bit internal accumulator.
* Both inputs are in 1.15 format and multiplications yield a 2.30 result.
* The 2.30 intermediate results are accumulated in a 64-bit accumulator in 34.30 format.
* This approach provides 33 guard bits and there is no risk of overflow.
* The 34.30 result is then truncated to 34.15 format by discarding the low 15 bits and then saturated to 1.15 format.
*
* \par
* Refer to <code>arm_correlate_fast_q15()</code> for a faster but less precise version of this function for Cortex-M3 and Cortex-M4.
*/
void arm_correlate_q15(
q15_t * pSrcA,
uint32_t srcALen,
q15_t * pSrcB,
uint32_t srcBLen,
q15_t * pDst)
{
#ifndef ARM_MATH_CM0
/* Run the below code for Cortex-M4 and Cortex-M3 */
q15_t *pIn1; /* inputA pointer */
q15_t *pIn2; /* inputB pointer */
q15_t *pOut = pDst; /* output pointer */
q63_t sum, acc0, acc1, acc2, acc3; /* Accumulators */
q15_t *px; /* Intermediate inputA pointer */
q15_t *py; /* Intermediate inputB pointer */
q15_t *pSrc1; /* Intermediate pointers */
q31_t x0, x1, x2, x3, c0; /* temporary variables for holding input and coefficient values */
uint32_t j, k = 0u, count, blkCnt, outBlockSize, blockSize1, blockSize2, blockSize3; /* loop counter */
int32_t inc = 1; /* Destination address modifier */
q31_t *pb; /* 32 bit pointer for inputB buffer */
/* The algorithm implementation is based on the lengths of the inputs. */
/* srcB is always made to slide across srcA. */
/* So srcBLen is always considered as shorter or equal to srcALen */
/* But CORR(x, y) is reverse of CORR(y, x) */
/* So, when srcBLen > srcALen, output pointer is made to point to the end of the output buffer */
/* and the destination pointer modifier, inc is set to -1 */
/* If srcALen > srcBLen, zero pad has to be done to srcB to make the two inputs of same length */
/* But to improve the performance,
* we include zeroes in the output instead of zero padding either of the the inputs*/
/* If srcALen > srcBLen,
* (srcALen - srcBLen) zeroes has to included in the starting of the output buffer */
/* If srcALen < srcBLen,
* (srcALen - srcBLen) zeroes has to included in the ending of the output buffer */
if(srcALen >= srcBLen)
{
/* Initialization of inputA pointer */
pIn1 = (pSrcA);
/* Initialization of inputB pointer */
pIn2 = (pSrcB);
/* Number of output samples is calculated */
outBlockSize = (2u * srcALen) - 1u;
/* When srcALen > srcBLen, zero padding is done to srcB
* to make their lengths equal.
* Instead, (outBlockSize - (srcALen + srcBLen - 1))
* number of output samples are made zero */
j = outBlockSize - (srcALen + (srcBLen - 1u));
/* Updating the pointer position to non zero value */
pOut += j;
}
else
{
/* Initialization of inputA pointer */
pIn1 = (pSrcB);
/* Initialization of inputB pointer */
pIn2 = (pSrcA);
/* srcBLen is always considered as shorter or equal to srcALen */
j = srcBLen;
srcBLen = srcALen;
srcALen = j;
/* CORR(x, y) = Reverse order(CORR(y, x)) */
/* Hence set the destination pointer to point to the last output sample */
pOut = pDst + ((srcALen + srcBLen) - 2u);
/* Destination address modifier is set to -1 */
inc = -1;
}
/* The function is internally
* divided into three parts according to the number of multiplications that has to be
* taken place between inputA samples and inputB samples. In the first part of the
* algorithm, the multiplications increase by one for every iteration.
* In the second part of the algorithm, srcBLen number of multiplications are done.
* In the third part of the algorithm, the multiplications decrease by one
* for every iteration.*/
/* The algorithm is implemented in three stages.
* The loop counters of each stage is initiated here. */
blockSize1 = srcBLen - 1u;
blockSize2 = srcALen - (srcBLen - 1u);
blockSize3 = blockSize1;
/* --------------------------
* Initializations of stage1
* -------------------------*/
/* sum = x[0] * y[srcBlen - 1]
* sum = x[0] * y[srcBlen - 2] + x[1] * y[srcBlen - 1]
* ....
* sum = x[0] * y[0] + x[1] * y[1] +...+ x[srcBLen - 1] * y[srcBLen - 1]
*/
/* In this stage the MAC operations are increased by 1 for every iteration.
The count variable holds the number of MAC operations performed */
count = 1u;
/* Working pointer of inputA */
px = pIn1;
/* Working pointer of inputB */
pSrc1 = pIn2 + (srcBLen - 1u);
py = pSrc1;
/* ------------------------
* Stage1 process
* ----------------------*/
/* The first loop starts here */
while(blockSize1 > 0u)
{
/* Accumulator is made zero for every iteration */
sum = 0;
/* Apply loop unrolling and compute 4 MACs simultaneously. */
k = count >> 2;
/* First part of the processing with loop unrolling. Compute 4 MACs at a time.
** a second loop below computes MACs for the remaining 1 to 3 samples. */
while(k > 0u)
{
/* x[0] * y[srcBLen - 4] , x[1] * y[srcBLen - 3] */
sum = __SMLALD(*__SIMD32(px)++, *__SIMD32(py)++, sum);
/* x[3] * y[srcBLen - 1] , x[2] * y[srcBLen - 2] */
sum = __SMLALD(*__SIMD32(px)++, *__SIMD32(py)++, sum);
/* Decrement the loop counter */
k--;
}
/* If the count is not a multiple of 4, compute any remaining MACs here.
** No loop unrolling is used. */
k = count % 0x4u;
while(k > 0u)
{
/* Perform the multiply-accumulates */
/* x[0] * y[srcBLen - 1] */
sum = __SMLALD(*px++, *py++, sum);
/* Decrement the loop counter */
k--;
}
/* Store the result in the accumulator in the destination buffer. */
*pOut = (q15_t) (__SSAT((sum >> 15), 16));
/* Destination pointer is updated according to the address modifier, inc */
pOut += inc;
/* Update the inputA and inputB pointers for next MAC calculation */
py = pSrc1 - count;
px = pIn1;
/* Increment the MAC count */
count++;
/* Decrement the loop counter */
blockSize1--;
}
/* --------------------------
* Initializations of stage2
* ------------------------*/
/* sum = x[0] * y[0] + x[1] * y[1] +...+ x[srcBLen-1] * y[srcBLen-1]
* sum = x[1] * y[0] + x[2] * y[1] +...+ x[srcBLen] * y[srcBLen-1]
* ....
* sum = x[srcALen-srcBLen-2] * y[0] + x[srcALen-srcBLen-1] * y[1] +...+ x[srcALen-1] * y[srcBLen-1]
*/
/* Working pointer of inputA */
px = pIn1;
/* Working pointer of inputB */
py = pIn2;
/* Initialize inputB pointer of type q31 */
pb = (q31_t *) (py);
/* count is index by which the pointer pIn1 to be incremented */
count = 0u;
/* -------------------
* Stage2 process
* ------------------*/
/* Stage2 depends on srcBLen as in this stage srcBLen number of MACS are performed.
* So, to loop unroll over blockSize2,
* srcBLen should be greater than or equal to 4, to loop unroll the srcBLen loop */
if(srcBLen >= 4u)
{
/* Loop unroll over blockSize2, by 4 */
blkCnt = blockSize2 >> 2u;
while(blkCnt > 0u)
{
/* Set all accumulators to zero */
acc0 = 0;
acc1 = 0;
acc2 = 0;
acc3 = 0;
/* read x[0], x[1] samples */
x0 = *(q31_t *) (px++);
/* read x[1], x[2] samples */
x1 = *(q31_t *) (px++);
/* Apply loop unrolling and compute 4 MACs simultaneously. */
k = srcBLen >> 2u;
/* First part of the processing with loop unrolling. Compute 4 MACs at a time.
** a second loop below computes MACs for the remaining 1 to 3 samples. */
do
{
/* Read the first two inputB samples using SIMD:
* y[0] and y[1] */
c0 = *(pb++);
/* acc0 += x[0] * y[0] + x[1] * y[1] */
acc0 = __SMLALD(x0, c0, acc0);
/* acc1 += x[1] * y[0] + x[2] * y[1] */
acc1 = __SMLALD(x1, c0, acc1);
/* Read x[2], x[3] */
x2 = *(q31_t *) (px++);
/* Read x[3], x[4] */
x3 = *(q31_t *) (px++);
/* acc2 += x[2] * y[0] + x[3] * y[1] */
acc2 = __SMLALD(x2, c0, acc2);
/* acc3 += x[3] * y[0] + x[4] * y[1] */
acc3 = __SMLALD(x3, c0, acc3);
/* Read y[2] and y[3] */
c0 = *(pb++);
/* acc0 += x[2] * y[2] + x[3] * y[3] */
acc0 = __SMLALD(x2, c0, acc0);
/* acc1 += x[3] * y[2] + x[4] * y[3] */
acc1 = __SMLALD(x3, c0, acc1);
/* Read x[4], x[5] */
x0 = *(q31_t *) (px++);
/* Read x[5], x[6] */
x1 = *(q31_t *) (px++);
/* acc2 += x[4] * y[2] + x[5] * y[3] */
acc2 = __SMLALD(x0, c0, acc2);
/* acc3 += x[5] * y[2] + x[6] * y[3] */
acc3 = __SMLALD(x1, c0, acc3);
} while(--k);
/* For the next MAC operations, SIMD is not used
* So, the 16 bit pointer if inputB, py is updated */
py = (q15_t *) (pb);
/* If the srcBLen is not a multiple of 4, compute any remaining MACs here.
** No loop unrolling is used. */
k = srcBLen % 0x4u;
if(k == 1u)
{
/* Read y[4] */
c0 = *py;
#ifdef ARM_MATH_BIG_ENDIAN
c0 = c0 << 16u;
#else
c0 = c0 & 0x0000FFFF;
#endif /* #ifdef ARM_MATH_BIG_ENDIAN */
/* Read x[7] */
x3 = *(q31_t *) px++;
/* Perform the multiply-accumulates */
acc0 = __SMLALD(x0, c0, acc0);
acc1 = __SMLALD(x1, c0, acc1);
acc2 = __SMLALDX(x1, c0, acc2);
acc3 = __SMLALDX(x3, c0, acc3);
}
if(k == 2u)
{
/* Read y[4], y[5] */
c0 = *(pb);
/* Read x[7], x[8] */
x3 = *(q31_t *) px++;
/* Read x[9] */
x2 = *(q31_t *) px++;
/* Perform the multiply-accumulates */
acc0 = __SMLALD(x0, c0, acc0);
acc1 = __SMLALD(x1, c0, acc1);
acc2 = __SMLALD(x3, c0, acc2);
acc3 = __SMLALD(x2, c0, acc3);
}
if(k == 3u)
{
/* Read y[4], y[5] */
c0 = *pb++;
/* Read x[7], x[8] */
x3 = *(q31_t *) px++;
/* Read x[9] */
x2 = *(q31_t *) px++;
/* Perform the multiply-accumulates */
acc0 = __SMLALD(x0, c0, acc0);
acc1 = __SMLALD(x1, c0, acc1);
acc2 = __SMLALD(x3, c0, acc2);
acc3 = __SMLALD(x2, c0, acc3);
/* Read y[6] */
#ifdef ARM_MATH_BIG_ENDIAN
c0 = (*pb);
c0 = c0 & 0xFFFF0000;
#else
c0 = (q15_t) (*pb);
c0 = c0 & 0x0000FFFF;
#endif /* #ifdef ARM_MATH_BIG_ENDIAN */
/* Read x[10] */
x3 = *(q31_t *) px++;
/* Perform the multiply-accumulates */
acc0 = __SMLALDX(x1, c0, acc0);
acc1 = __SMLALD(x2, c0, acc1);
acc2 = __SMLALDX(x2, c0, acc2);
acc3 = __SMLALDX(x3, c0, acc3);
}
/* Store the result in the accumulator in the destination buffer. */
*pOut = (q15_t) (__SSAT(acc0 >> 15, 16));
/* Destination pointer is updated according to the address modifier, inc */
pOut += inc;
*pOut = (q15_t) (__SSAT(acc1 >> 15, 16));
pOut += inc;
*pOut = (q15_t) (__SSAT(acc2 >> 15, 16));
pOut += inc;
*pOut = (q15_t) (__SSAT(acc3 >> 15, 16));
pOut += inc;
/* Increment the count by 4 as 4 output values are computed */
count += 4u;
/* Update the inputA and inputB pointers for next MAC calculation */
px = pIn1 + count;
py = pIn2;
pb = (q31_t *) (py);
/* Decrement the loop counter */
blkCnt--;
}
/* If the blockSize2 is not a multiple of 4, compute any remaining output samples here.
** No loop unrolling is used. */
blkCnt = blockSize2 % 0x4u;
while(blkCnt > 0u)
{
/* Accumulator is made zero for every iteration */
sum = 0;
/* Apply loop unrolling and compute 4 MACs simultaneously. */
k = srcBLen >> 2u;
/* First part of the processing with loop unrolling. Compute 4 MACs at a time.
** a second loop below computes MACs for the remaining 1 to 3 samples. */
while(k > 0u)
{
/* Perform the multiply-accumulates */
sum += ((q63_t) * px++ * *py++);
sum += ((q63_t) * px++ * *py++);
sum += ((q63_t) * px++ * *py++);
sum += ((q63_t) * px++ * *py++);
/* Decrement the loop counter */
k--;
}
/* If the srcBLen is not a multiple of 4, compute any remaining MACs here.
** No loop unrolling is used. */
k = srcBLen % 0x4u;
while(k > 0u)
{
/* Perform the multiply-accumulates */
sum += ((q63_t) * px++ * *py++);
/* Decrement the loop counter */
k--;
}
/* Store the result in the accumulator in the destination buffer. */
*pOut = (q15_t) (__SSAT(sum >> 15, 16));
/* Destination pointer is updated according to the address modifier, inc */
pOut += inc;
/* Increment count by 1, as one output value is computed */
count++;
/* Update the inputA and inputB pointers for next MAC calculation */
px = pIn1 + count;
py = pIn2;
/* Decrement the loop counter */
blkCnt--;
}
}
else
{
/* If the srcBLen is not a multiple of 4,
* the blockSize2 loop cannot be unrolled by 4 */
blkCnt = blockSize2;
while(blkCnt > 0u)
{
/* Accumulator is made zero for every iteration */
sum = 0;
/* Loop over srcBLen */
k = srcBLen;
while(k > 0u)
{
/* Perform the multiply-accumulate */
sum += ((q63_t) * px++ * *py++);
/* Decrement the loop counter */
k--;
}
/* Store the result in the accumulator in the destination buffer. */
*pOut = (q15_t) (__SSAT(sum >> 15, 16));
/* Destination pointer is updated according to the address modifier, inc */
pOut += inc;
/* Increment the MAC count */
count++;
/* Update the inputA and inputB pointers for next MAC calculation */
px = pIn1 + count;
py = pIn2;
/* Decrement the loop counter */
blkCnt--;
}
}
/* --------------------------
* Initializations of stage3
* -------------------------*/
/* sum += x[srcALen-srcBLen+1] * y[0] + x[srcALen-srcBLen+2] * y[1] +...+ x[srcALen-1] * y[srcBLen-1]
* sum += x[srcALen-srcBLen+2] * y[0] + x[srcALen-srcBLen+3] * y[1] +...+ x[srcALen-1] * y[srcBLen-1]
* ....
* sum += x[srcALen-2] * y[0] + x[srcALen-1] * y[1]
* sum += x[srcALen-1] * y[0]
*/
/* In this stage the MAC operations are decreased by 1 for every iteration.
The count variable holds the number of MAC operations performed */
count = srcBLen - 1u;
/* Working pointer of inputA */
pSrc1 = (pIn1 + srcALen) - (srcBLen - 1u);
px = pSrc1;
/* Working pointer of inputB */
py = pIn2;
/* -------------------
* Stage3 process
* ------------------*/
while(blockSize3 > 0u)
{
/* Accumulator is made zero for every iteration */
sum = 0;
/* Apply loop unrolling and compute 4 MACs simultaneously. */
k = count >> 2u;
/* First part of the processing with loop unrolling. Compute 4 MACs at a time.
** a second loop below computes MACs for the remaining 1 to 3 samples. */
while(k > 0u)
{
/* Perform the multiply-accumulates */
/* sum += x[srcALen - srcBLen + 4] * y[3] , sum += x[srcALen - srcBLen + 3] * y[2] */
sum = __SMLALD(*__SIMD32(px)++, *__SIMD32(py)++, sum);
/* sum += x[srcALen - srcBLen + 2] * y[1] , sum += x[srcALen - srcBLen + 1] * y[0] */
sum = __SMLALD(*__SIMD32(px)++, *__SIMD32(py)++, sum);
/* Decrement the loop counter */
k--;
}
/* If the count is not a multiple of 4, compute any remaining MACs here.
** No loop unrolling is used. */
k = count % 0x4u;
while(k > 0u)
{
/* Perform the multiply-accumulates */
sum = __SMLALD(*px++, *py++, sum);
/* Decrement the loop counter */
k--;
}
/* Store the result in the accumulator in the destination buffer. */
*pOut = (q15_t) (__SSAT((sum >> 15), 16));
/* Destination pointer is updated according to the address modifier, inc */
pOut += inc;
/* Update the inputA and inputB pointers for next MAC calculation */
px = ++pSrc1;
py = pIn2;
/* Decrement the MAC count */
count--;
/* Decrement the loop counter */
blockSize3--;
}
#else
/* Run the below code for Cortex-M0 */
q15_t *pIn1 = pSrcA; /* inputA pointer */
q15_t *pIn2 = pSrcB + (srcBLen - 1u); /* inputB pointer */
q63_t sum; /* Accumulators */
uint32_t i = 0u, j; /* loop counters */
uint32_t inv = 0u; /* Reverse order flag */
uint32_t tot = 0u; /* Length */
/* The algorithm implementation is based on the lengths of the inputs. */
/* srcB is always made to slide across srcA. */
/* So srcBLen is always considered as shorter or equal to srcALen */
/* But CORR(x, y) is reverse of CORR(y, x) */
/* So, when srcBLen > srcALen, output pointer is made to point to the end of the output buffer */
/* and a varaible, inv is set to 1 */
/* If lengths are not equal then zero pad has to be done to make the two
* inputs of same length. But to improve the performance, we include zeroes
* in the output instead of zero padding either of the the inputs*/
/* If srcALen > srcBLen, (srcALen - srcBLen) zeroes has to included in the
* starting of the output buffer */
/* If srcALen < srcBLen, (srcALen - srcBLen) zeroes has to included in the
* ending of the output buffer */
/* Once the zero padding is done the remaining of the output is calcualted
* using convolution but with the shorter signal time shifted. */
/* Calculate the length of the remaining sequence */
tot = ((srcALen + srcBLen) - 2u);
if(srcALen > srcBLen)
{
/* Calculating the number of zeros to be padded to the output */
j = srcALen - srcBLen;
/* Initialise the pointer after zero padding */
pDst += j;
}
else if(srcALen < srcBLen)
{
/* Initialization to inputB pointer */
pIn1 = pSrcB;
/* Initialization to the end of inputA pointer */
pIn2 = pSrcA + (srcALen - 1u);
/* Initialisation of the pointer after zero padding */
pDst = pDst + tot;
/* Swapping the lengths */
j = srcALen;
srcALen = srcBLen;
srcBLen = j;
/* Setting the reverse flag */
inv = 1;
}
/* Loop to calculate convolution for output length number of times */
for (i = 0u; i <= tot; i++)
{
/* Initialize sum with zero to carry on MAC operations */
sum = 0;
/* Loop to perform MAC operations according to convolution equation */
for (j = 0u; j <= i; j++)
{
/* Check the array limitations */
if((((i - j) < srcBLen) && (j < srcALen)))
{
/* z[i] += x[i-j] * y[j] */
sum += ((q31_t) pIn1[j] * pIn2[-((int32_t) i - j)]);
}
}
/* Store the output in the destination buffer */
if(inv == 1)
*pDst-- = (q15_t) __SSAT((sum >> 15u), 16u);
else
*pDst++ = (q15_t) __SSAT((sum >> 15u), 16u);
}
#endif /* #ifndef ARM_MATH_CM0 */
}
/**
* @} end of Corr group
*/