ppisa commented on code in PR #12081:
URL: https://github.com/apache/nuttx/pull/12081#discussion_r1567871099
##########
arch/arm/src/samv7/sam_qencoder.c:
##########
@@ -270,11 +316,139 @@ static int sam_reset(struct qe_lowerhalf_s *lower)
static int sam_ioctl(struct qe_lowerhalf_s *lower, int cmd,
unsigned long arg)
{
- /* No ioctl commands supported */
-
+#ifdef CONFIG_SAMV7_QENCODER_ENABLE_GETINDEX
+ switch (cmd)
+ {
+ case QEIOC_GETINDEX:
+ {
+ /* Call the qeindex function */
+
+ sam_qeindex(lower, (struct qe_index_s *) arg);
+ return OK;
+ }
+
+ default:
+ {
+ return -ENOTTY;
+ }
+ }
+#else
return -ENOTTY;
+#endif
}
+#ifdef CONFIG_SAMV7_QENCODER_ENABLE_GETINDEX
+/****************************************************************************
+ * Name: sam_qe_pos_16to32b
+ *
+ * Description:
+ * An inline function performing the extension of current position.
+ * Last reading is saved to priv->last_pos.
+ *
+ ****************************************************************************/
+
+static inline int32_t sam_qe_pos_16to32b(struct qe_lowerhalf_s *lower,
+ uint32_t current_pos)
+{
+ struct sam_lowerhalf_s *priv = (struct sam_lowerhalf_s *) lower;
+
+ uint32_t new_pos = *(volatile uint32_t *) &priv->last_pos;
+ new_pos += (int16_t) (current_pos - new_pos);
+ *(volatile uint32_t *) &priv->last_pos = new_pos;
+
+ return (int32_t) new_pos;
+}
+#endif
+
+#ifdef CONFIG_SAMV7_QENCODER_ENABLE_GETINDEX
+/****************************************************************************
+ * Name: sam_qe_indx_pos_16to32b
+ *
+ * Description:
+ * An inline function performing the extension of the last index position.
+ * Last reading is saved to priv->last_index.
+ *
+ ****************************************************************************/
+
+static inline int32_t sam_qe_indx_pos_16to32b(struct qe_lowerhalf_s *lower,
+ uint32_t current_indx_pos)
+{
+ struct sam_lowerhalf_s *priv = (struct sam_lowerhalf_s *) lower;
+
+ uint32_t new_index = *(volatile uint32_t *) &priv->last_pos;
Review Comment:
It is not problem if CPU reorders operations, there is only single situation
which needs to be prevented and it is situation when the subtract is done
against one snapshot of the last_pos and following addition is done against
another value updated by some other thread, IRQ or anything else. The volatile
modifier at pointer prevents compiler to load the value of the last_pos the
second time between subtract and following addition. It is not probable on ARM
where both operands have to reside in the registers before add machine code,
but for example on x86 sub and add can be done against last_pos value accessed
by offset addressing against some base register, so memory is accessed twice.
Volatile ensures that value is read exactly once from memory and stored in the
local variable (register) and all is fine whatever value is read. There can be
concern if the value is read by halves or individual bytes. But that is
something which is silently considered to not happen for *(volatile u
int32_t *) & combination on 32-bit target. It is right that for 16-bit CPU
this could be a problem. If that is problem for 32-bit ones then you cannot use
*(volatile uint32_t *) to access registers which can change their value
independently from CPU action, because you cannot get consistent 32-bit data in
such case. If you drop volatile in the discussed case, then probability that
value in memory is accessed twice on ARM is low, zero for GCC with some
optimization set and even for unoptimized case when local variable is used.
Combined with probability that some other parallel thread reads and extends
value at exactly same time instant and fits with update directly between
subtraction and addition is again extremely low so code will work without
volatile but is theoretically susceptible to misbehave. With volatile it should
be OK even if multiple cores are used and CPU does not keep value in reorder
registers for time longer than 32k increments changes. But yes, on multiple
cores po
wer PC or Apple M1 when some core loops in some close busy loop (for time
around 1 millisecond and more for 32 mega IRC events/s) and does not read value
from the counter again, I can imagine that barrier instruction could be
necessary. But on such architectures you often need to add some barrier
instructions even to basic read and writes into I/O space to prevent reorder of
such accesses. So it is totally another category than Cortex-M based cores.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]