Hops and data from Webcam

Hello, i tried a code to detect hand and fingers in realtime with webcam; it work fine in Python but with Hops Grasshopper receive only the first data.
Is that possible with Hops?

from flask import Flask
import ghhops_server as hs
import rhino3dm
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands


# register hops app as middleware
app = Flask(__name__)
hops = hs.Hops(app)

@hops.component(
    "/hand",
    name="hand_motion",
    description="detect hand motion",
    #icon="",
    inputs=[
        hs.HopsString("Image", "Image", "image",hs.HopsParamAccess.LIST),
        hs.HopsNumber("Zscale","Zscale")
    ],
    outputs=[
        hs.HopsPoint("Points", "Points"),
        hs.HopsNumber("width","Width"),
        hs.HopsNumber("height","Height"),
        hs.HopsString("Index","Index"),
        #hs.HopsString("Score","s"),
        hs.HopsString("Label","Label")
    ]
)

def hand(img,n):
    file_list = img
    with mp_hands.Hands(
            static_image_mode=True,
            max_num_hands=2,
            min_detection_confidence=0.5) as hands:
        for idx, file in enumerate(file_list):
            # Read an image, flip it around y-axis for correct handedness output (see
            # above).
            image = cv2.flip(cv2.imread(file), 1)
            # Convert the BGR image to RGB before processing.
            results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            ind = []
            lab = []
            for classifications in results.multi_handedness:
                index = [classification.index for classification in classifications.classification]
                #score = [classification.score for classification in classifications.classification]
                label = [classification.label for classification in classifications.classification]
                ind.append(index)
                #scr.append(score)
                lab.append(label)
            if not results.multi_hand_landmarks:
                continue
            image_height, image_width, _ = image.shape
            w = image_width
            h = image_height
            point = []
            for hand_landmarks in results.multi_hand_landmarks:
                pt = [rhino3dm.Point3d(-landmark.x*w, -landmark.y*h, -landmark.z*n) for landmark in hand_landmarks.landmark]
                point.append(pt)
            if len(point) == 2:
                points = point[0]+point[1]
                indexes = ind[0]+ind[1]
                labels = lab[0]+lab[1]
            elif len(point) == 1:
                points = point[0]
                indexes = ind[0]
                labels = lab[0]
        return points, w, h ,indexes,labels

@hops.component(
    "/hands",
    name="hands_motion",
    description="hand motion video",
    #icon="",
    inputs=[
        hs.HopsNumber("Zscale","Zscale")
    ],
    outputs=[
        hs.HopsPoint("Points", "Points"),
        hs.HopsNumber("width","Width"),
        hs.HopsNumber("height","Height"),
    ]
)
def hands(n):
    # For webcam input:
    cap = cv2.VideoCapture(0)
    with mp_hands.Hands(
            min_detection_confidence=0.5,
            min_tracking_confidence=0.5) as hands:
        while cap.isOpened():
            success, image = cap.read()
            if not success:
                print("Ignoring empty camera frame.")
                # If loading a video, use 'break' instead of 'continue'.
                continue

            # Flip the image horizontally for a later selfie-view display, and convert
            # the BGR image to RGB.
            image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
            # To improve performance, optionally mark the image as not writeable to
            # pass by reference.
            image.flags.writeable = False
            results = hands.process(image)

            # Draw the hand annotations on the image.
            image.flags.writeable = True
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            image_height, image_width, _ = image.shape
            w = image_width
            h = image_height
            point = []
            if results.multi_hand_landmarks:
                for hand_landmarks in results.multi_hand_landmarks:
                    pt = [rhino3dm.Point3d(-landmark.x * w, -landmark.y * h, -landmark.z * n) for landmark in
                          hand_landmarks.landmark]
                    point.append(pt)
                if len(point) == 2:
                    points = point[0] + point[1]
                elif len(point) == 1:
                    points = point[0]
            return points, w, h
            if cv2.waitKey(5) & 0xFF == 27:
                break

    cap.release()

if __name__ == "__main__":
    app.run()

It’s a little hard to tell what you mean by “receive the first data”, but have you tried attaching a Timer, nowadays called Trigger? The Hops component usually only recomputes when one of its inputs changes (and caching is turned off).