VirtualOasis commited on
Commit
46c3ea1
·
1 Parent(s): acd33ac
app.py CHANGED
@@ -16,7 +16,7 @@ STYLE_CHOICES = [
16
  ]
17
 
18
  VIDEO_MODEL_CHOICES = [
19
- ("Wan 2.1 (fal-ai)", "Wan-AI/Wan2.1-T2V-14B"),
20
  ("LTX Video 0.9.7", "Lightricks/LTX-Video-0.9.7-distilled"),
21
  ("Hunyuan Video 1.5", "tencent/HunyuanVideo-1.5"),
22
  ("CogVideoX 5B", "THUDM/CogVideoX-5b"),
 
16
  ]
17
 
18
  VIDEO_MODEL_CHOICES = [
19
+ ("Wan 2.2 TI2V (fal-ai)", "Wan-AI/Wan2.2-TI2V-5B"),
20
  ("LTX Video 0.9.7", "Lightricks/LTX-Video-0.9.7-distilled"),
21
  ("Hunyuan Video 1.5", "tencent/HunyuanVideo-1.5"),
22
  ("CogVideoX 5B", "THUDM/CogVideoX-5b"),
cinegen/character_engine.py CHANGED
@@ -29,16 +29,26 @@ class CharacterDesigner:
29
  def design(self, storyboard: Storyboard) -> Tuple[List[Tuple[str, str]], Storyboard]:
30
  gallery: List[Tuple[str, str]] = []
31
  for character in storyboard.characters:
32
- image_path = None
33
- if self.client:
34
- image_path = self._try_generate(character, storyboard.style)
35
- if not image_path:
36
- image_path = synthesize_character_card(character, storyboard.style)
37
- character.reference_image = image_path
38
- caption = f"{character.name} — {character.role}"
39
- gallery.append((image_path, caption))
40
  return gallery, storyboard
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def _try_generate(self, character, style: str) -> Optional[str]: # pragma: no cover
43
  prompt = (
44
  f"Create a portrait for {character.name}, a {character.role} in a {style} short film. "
 
29
  def design(self, storyboard: Storyboard) -> Tuple[List[Tuple[str, str]], Storyboard]:
30
  gallery: List[Tuple[str, str]] = []
31
  for character in storyboard.characters:
32
+ gallery.append(self._refresh_reference(character, storyboard.style))
 
 
 
 
 
 
 
33
  return gallery, storyboard
34
 
35
+ def redesign_character(self, storyboard: Storyboard, character_id: str) -> Tuple[Tuple[str, str], Storyboard]:
36
+ target = next((char for char in storyboard.characters if char.identifier == character_id), None)
37
+ if not target:
38
+ raise ValueError(f"Character {character_id} not found.")
39
+ card = self._refresh_reference(target, storyboard.style)
40
+ return card, storyboard
41
+
42
+ def _refresh_reference(self, character, style: str) -> Tuple[str, str]:
43
+ image_path = None
44
+ if self.client:
45
+ image_path = self._try_generate(character, style)
46
+ if not image_path:
47
+ image_path = synthesize_character_card(character, style)
48
+ character.reference_image = image_path
49
+ caption = f"{character.name} — {character.role}"
50
+ return image_path, caption
51
+
52
  def _try_generate(self, character, style: str) -> Optional[str]: # pragma: no cover
53
  prompt = (
54
  f"Create a portrait for {character.name}, a {character.role} in a {style} short film. "
cinegen/placeholders.py CHANGED
@@ -43,13 +43,22 @@ def _slugify(text: str) -> str:
43
  return safe or "cinegen"
44
 
45
 
 
 
 
 
 
 
 
 
46
  def build_stub_storyboard(
47
  idea: str,
48
  style: str,
49
- scene_count: int,
50
  inspiration_hint: str | None,
51
  ) -> Storyboard:
52
- random.seed(_slugify(idea) + style + str(scene_count))
 
53
  title = idea.title() if idea else f"{style} Short"
54
  synopsis = (
55
  f"A {style.lower()} short that transforms the idea '{idea or 'mystery cue'}' "
@@ -57,7 +66,7 @@ def build_stub_storyboard(
57
  )
58
  characters: List[CharacterSpec] = []
59
  for idx, (role, desc) in enumerate(CHARACTER_ARCHETYPES):
60
- if idx >= 3 and scene_count <= 3:
61
  break
62
  identifier = f"CHAR-{idx+1}"
63
  name = f"{role} {random.choice(string.ascii_uppercase)}"
@@ -75,7 +84,7 @@ def build_stub_storyboard(
75
  )
76
 
77
  scenes: List[SceneBeat] = []
78
- for idx in range(scene_count):
79
  label = SCENE_TITLES[idx % len(SCENE_TITLES)]
80
  scene_id = f"SCENE-{idx+1}"
81
  visuals = (
 
43
  return safe or "cinegen"
44
 
45
 
46
+ def normalize_scene_count(scene_count: int | float | str | None) -> int:
47
+ try:
48
+ value = int(float(scene_count))
49
+ except (TypeError, ValueError):
50
+ return 3
51
+ return max(1, value)
52
+
53
+
54
  def build_stub_storyboard(
55
  idea: str,
56
  style: str,
57
+ scene_count: int | float | str,
58
  inspiration_hint: str | None,
59
  ) -> Storyboard:
60
+ normalized_scenes = normalize_scene_count(scene_count)
61
+ random.seed(_slugify(idea) + style + str(normalized_scenes))
62
  title = idea.title() if idea else f"{style} Short"
63
  synopsis = (
64
  f"A {style.lower()} short that transforms the idea '{idea or 'mystery cue'}' "
 
66
  )
67
  characters: List[CharacterSpec] = []
68
  for idx, (role, desc) in enumerate(CHARACTER_ARCHETYPES):
69
+ if idx >= 3 and normalized_scenes <= 3:
70
  break
71
  identifier = f"CHAR-{idx+1}"
72
  name = f"{role} {random.choice(string.ascii_uppercase)}"
 
84
  )
85
 
86
  scenes: List[SceneBeat] = []
87
+ for idx in range(normalized_scenes):
88
  label = SCENE_TITLES[idx % len(SCENE_TITLES)]
89
  scene_id = f"SCENE-{idx+1}"
90
  visuals = (
cinegen/story_engine.py CHANGED
@@ -5,7 +5,11 @@ import os
5
  from typing import Any, Dict, Optional
6
 
7
  from .models import Storyboard, CharacterSpec, SceneBeat
8
- from .placeholders import build_stub_storyboard, describe_image_reference
 
 
 
 
9
 
10
  DEFAULT_STORY_MODEL = os.environ.get("CINEGEN_STORY_MODEL", "gemini-2.5-flash")
11
 
@@ -32,18 +36,19 @@ class StoryGenerator:
32
  self,
33
  idea: str,
34
  style: str,
35
- scene_count: int,
36
  inspiration_path: Optional[str] = None,
37
  ) -> Storyboard:
 
38
  if not self.client:
39
  return build_stub_storyboard(
40
  idea=idea,
41
  style=style,
42
- scene_count=scene_count,
43
  inspiration_hint=describe_image_reference(inspiration_path),
44
  )
45
 
46
- prompt = self._build_prompt(idea, style, scene_count)
47
  contents = [prompt]
48
  parts = self._maybe_add_image_part(inspiration_path)
49
  contents = parts + contents if parts else contents
@@ -64,7 +69,7 @@ class StoryGenerator:
64
  return build_stub_storyboard(
65
  idea=idea,
66
  style=style,
67
- scene_count=scene_count,
68
  inspiration_hint=describe_image_reference(inspiration_path),
69
  )
70
 
 
5
  from typing import Any, Dict, Optional
6
 
7
  from .models import Storyboard, CharacterSpec, SceneBeat
8
+ from .placeholders import (
9
+ build_stub_storyboard,
10
+ describe_image_reference,
11
+ normalize_scene_count,
12
+ )
13
 
14
  DEFAULT_STORY_MODEL = os.environ.get("CINEGEN_STORY_MODEL", "gemini-2.5-flash")
15
 
 
36
  self,
37
  idea: str,
38
  style: str,
39
+ scene_count: int | float | str,
40
  inspiration_path: Optional[str] = None,
41
  ) -> Storyboard:
42
+ scene_total = normalize_scene_count(scene_count)
43
  if not self.client:
44
  return build_stub_storyboard(
45
  idea=idea,
46
  style=style,
47
+ scene_count=scene_total,
48
  inspiration_hint=describe_image_reference(inspiration_path),
49
  )
50
 
51
+ prompt = self._build_prompt(idea, style, scene_total)
52
  contents = [prompt]
53
  parts = self._maybe_add_image_part(inspiration_path)
54
  contents = parts + contents if parts else contents
 
69
  return build_stub_storyboard(
70
  idea=idea,
71
  style=style,
72
+ scene_count=scene_total,
73
  inspiration_hint=describe_image_reference(inspiration_path),
74
  )
75
 
cinegen/video_engine.py CHANGED
@@ -2,21 +2,28 @@ from __future__ import annotations
2
 
3
  import os
4
  import tempfile
5
- import time
6
- from typing import List, Optional, Sequence, Tuple
7
 
8
- import requests
9
 
10
  from .models import SceneBeat, Storyboard
11
  from .placeholders import create_placeholder_video
12
 
13
  DEFAULT_VIDEO_MODELS = [
14
- "Wan-AI/Wan2.1-T2V-14B",
15
  "Lightricks/LTX-Video-0.9.7-distilled",
16
  "tencent/HunyuanVideo-1.5",
17
  "THUDM/CogVideoX-5b",
18
  ]
19
 
 
 
 
 
 
 
 
 
20
 
21
  class VideoDirector:
22
  def __init__(
@@ -59,29 +66,25 @@ class VideoDirector:
59
  def _call_hf_inference(self, prompt: str, model_id: str, duration: int) -> str:
60
  if not self.token:
61
  raise RuntimeError("Missing Hugging Face token")
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- url = f"https://api-inference.huggingface.co/models/{model_id}"
64
- headers = {
65
- "Authorization": f"Bearer {self.token}",
66
- "Accept": "video/mp4",
67
- }
68
- payload = {
69
- "inputs": prompt,
70
- "parameters": {"duration": duration},
71
- }
72
- for _ in range(3):
73
- response = requests.post(url, headers=headers, json=payload, timeout=600)
74
- if response.status_code == 200:
75
- tmp_dir = tempfile.mkdtemp(prefix="cinegen-video-")
76
- path = os.path.join(tmp_dir, f"{model_id.split('/')[-1]}.mp4")
77
- with open(path, "wb") as handle:
78
- handle.write(response.content)
79
- return path
80
- if response.status_code in (503, 529, 202):
81
- time.sleep(5)
82
- continue
83
- raise RuntimeError(f"{response.status_code}: {response.text[:120]}")
84
- raise RuntimeError("Model busy")
85
 
86
  @staticmethod
87
  def _compose_prompt(storyboard: Storyboard, scene: SceneBeat) -> str:
 
2
 
3
  import os
4
  import tempfile
5
+ from typing import Dict, List, Optional, Sequence, Tuple
 
6
 
7
+ from huggingface_hub import InferenceClient
8
 
9
  from .models import SceneBeat, Storyboard
10
  from .placeholders import create_placeholder_video
11
 
12
  DEFAULT_VIDEO_MODELS = [
13
+ "Wan-AI/Wan2.2-TI2V-5B",
14
  "Lightricks/LTX-Video-0.9.7-distilled",
15
  "tencent/HunyuanVideo-1.5",
16
  "THUDM/CogVideoX-5b",
17
  ]
18
 
19
+ MODEL_PROVIDER_OVERRIDES: Dict[str, Optional[str]] = {
20
+ "Wan-AI/Wan2.2-TI2V-5B": "fal-ai",
21
+ }
22
+
23
+ MIN_FRAMES = 16
24
+ MAX_FRAMES = 240
25
+ FRAMES_PER_SECOND = 8
26
+
27
 
28
  class VideoDirector:
29
  def __init__(
 
66
  def _call_hf_inference(self, prompt: str, model_id: str, duration: int) -> str:
67
  if not self.token:
68
  raise RuntimeError("Missing Hugging Face token")
69
+ client = self._build_client(model_id)
70
+ frames = max(MIN_FRAMES, min(MAX_FRAMES, int(duration * FRAMES_PER_SECOND)))
71
+ video_bytes = client.text_to_video(
72
+ prompt,
73
+ model=model_id,
74
+ num_frames=frames,
75
+ )
76
+ tmp_dir = tempfile.mkdtemp(prefix="cinegen-video-")
77
+ path = os.path.join(tmp_dir, f"{model_id.split('/')[-1]}.mp4")
78
+ with open(path, "wb") as handle:
79
+ handle.write(video_bytes)
80
+ return path
81
 
82
+ def _build_client(self, model_id: str) -> InferenceClient:
83
+ provider = MODEL_PROVIDER_OVERRIDES.get(model_id)
84
+ kwargs = {"token": self.token}
85
+ if provider:
86
+ kwargs["provider"] = provider
87
+ return InferenceClient(**kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
  @staticmethod
90
  def _compose_prompt(storyboard: Storyboard, scene: SceneBeat) -> str: