On 31.10.24 12:16, Jan Beulich wrote:
On 23.10.2024 15:10, Juergen Gross wrote:
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -154,6 +154,57 @@ void domain_reset_states(void)
      rcu_read_unlock(&domlist_read_lock);
  }
+static void set_domain_state_info(struct xen_domctl_get_domain_state *info,
+                                  const struct domain *d)
+{
+    info->state = XEN_DOMCTL_GETDOMSTATE_STATE_EXIST;
+    if ( d->is_shut_down )
+        info->state |= XEN_DOMCTL_GETDOMSTATE_STATE_SHUTDOWN;
+    if ( d->is_dying == DOMDYING_dead )
+        info->state |= XEN_DOMCTL_GETDOMSTATE_STATE_DYING;
+    info->unique_id = d->unique_id;
+}
+
+int get_domain_state(struct xen_domctl_get_domain_state *info, struct domain 
*d)
+{
+    unsigned int dom;
+
+    memset(info, 0, sizeof(*info));

Would this better go into set_domain_state_info()? Ah, no, you ...

+    if ( d )
+    {
+        set_domain_state_info(info, d);
+
+        return 0;
+    }
+
+    while ( (dom = find_first_bit(dom_state_changed, DOMID_MASK + 1)) <
+            DOMID_FIRST_RESERVED )
+    {
+        d = rcu_lock_domain_by_id(dom);

... acquiring the lock early and then ...

+        if ( test_and_clear_bit(dom, dom_state_changed) )
+        {
+            info->domid = dom;
+            if ( d )
+            {
+                set_domain_state_info(info, d);

... potentially bypassing the call (with just the domid set) requires it
that way.

As to the point in time when the lock is acquired: Why is that, seeing that
it complicates the unlocking a little, by requiring a 2nd unlock a few
lines down?

I agree this can be simplified.


+                rcu_unlock_domain(d);
+            }
+
+            return 0;
+        }
+
+        if ( d )
+        {
+            rcu_unlock_domain(d);
+        }

Nit: No need for the braces.

--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -969,11 +969,18 @@ static struct domain *global_virq_handlers[NR_VIRQS] 
__read_mostly;
static DEFINE_SPINLOCK(global_virq_handlers_lock); +struct domain *get_global_virq_handler(uint32_t virq)
+{
+    ASSERT(virq_is_global(virq));
+
+    return global_virq_handlers[virq] ?: hardware_domain;
+}
+
  void send_global_virq(uint32_t virq)
  {
      ASSERT(virq_is_global(virq));
- send_guest_global_virq(global_virq_handlers[virq] ?: hardware_domain, virq);
+    send_guest_global_virq(get_global_virq_handler(virq), virq);
  }

Is this a stale leftover from an earlier version? There's no other caller of
get_global_virq_handler() here, hence the change looks unmotivated here.

I think it is indeed stale now.


@@ -1236,7 +1237,37 @@ struct xen_domctl_dt_overlay {
  };
  #endif
+/*
+ * XEN_DOMCTL_get_domain_state (stable interface)
+ *
+ * Get state information of a domain.
+ *
+ * In case domain is DOMID_INVALID, return information about a domain having
+ * changed state and reset the state change indicator for that domain. This
+ * function is usable only by a domain having registered the VIRQ_DOM_EXC
+ * event (normally Xenstore).
+ *
+ * Supported interface versions: 0x00000000
+ */
+#define XEN_DOMCTL_GETDOMSTATE_VERS_MAX    0
+struct xen_domctl_get_domain_state {
+    domid_t domid;

Despite the DOMID_INVALID special case the redundant domid here is odd.
You actually add the new sub-op to the special casing of op->domain at the
top of do_domctl(), so the sole difference to most other sub-ops would be
that this then is an IN/OUT (rather than the field here being an output
only when DOMID_INVALID was passed in via the common domid field).

The main idea was to have all data returned by get_domain_state() in struct
xen_domctl_get_domain_state. I'm fine either way.

But I think this discussion is moot now, as it seems we are switching to a
new hypercall anyway, where we probably will have all data in per sub-op
structs.


+    uint16_t state;
+#define XEN_DOMCTL_GETDOMSTATE_STATE_EXIST     0x0001  /* Domain is existing. 
*/
+#define XEN_DOMCTL_GETDOMSTATE_STATE_SHUTDOWN  0x0002  /* Shutdown finished. */
+#define XEN_DOMCTL_GETDOMSTATE_STATE_DYING     0x0004  /* Domain dying. */
+    uint32_t pad1;           /* Returned as 0. */
+    uint64_t unique_id;      /* Unique domain identifier. */
+    uint64_t pad2[6];        /* Returned as 0. */
+};

What are the intentions with this padding array?

The idea was to allow to return additional domain data in future without having
to extend the struct.


Juergen

Attachment: OpenPGP_0xB0DE9DD628BF132F.asc
Description: OpenPGP public key

Attachment: OpenPGP_signature.asc
Description: OpenPGP digital signature

Reply via email to