Skip to content

Commit 04921e5

Browse files
adding uml and sequence diagrams
1 parent fb0e144 commit 04921e5

1 file changed

Lines changed: 233 additions & 0 deletions

File tree

README.md

Lines changed: 233 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -388,6 +388,239 @@ graph TB
388388
style BrowserCache fill:#f3e5f5
389389
```
390390

391+
### UML Class Diagram
392+
393+
```mermaid
394+
classDiagram
395+
class ChatInterface {
396+
-messages: ChatMessage[]
397+
-inputText: string
398+
-isLoading: boolean
399+
+sendMessage(text: string): void
400+
+clearChat(): void
401+
+handleFileUpload(files: File[]): void
402+
}
403+
404+
class ModelManager {
405+
-currentModel: string
406+
-availableModels: ModelInfo[]
407+
-loadingProgress: number
408+
+loadModel(modelId: string): Promise~void~
409+
+switchModel(modelId: string): void
410+
+getModelInfo(): ModelInfo
411+
}
412+
413+
class RAGService {
414+
-vectorStore: VectorStore
415+
-embeddingService: EmbeddingService
416+
+addDocument(doc: Document): Promise~void~
417+
+search(query: string): Promise~SearchResult[]~
418+
+updateSettings(settings: RAGSettings): void
419+
}
420+
421+
class VectorStore {
422+
-embeddings: Map~string, Vector~
423+
-documents: Map~string, DocumentChunk~
424+
+addEmbedding(id: string, vector: Vector): void
425+
+findSimilar(query: Vector, k: number): SearchResult[]
426+
+clear(): void
427+
}
428+
429+
class EmbeddingService {
430+
-model: EmbeddingModel
431+
+generateEmbedding(text: string): Promise~Vector~
432+
+batchEmbed(texts: string[]): Promise~Vector[]~
433+
}
434+
435+
class DocumentProcessor {
436+
+processFile(file: File): Promise~ProcessedDocument~
437+
+extractText(buffer: ArrayBuffer, type: string): Promise~string~
438+
+chunkDocument(text: string, options: ChunkOptions): DocumentChunk[]
439+
}
440+
441+
class WebLLMCore {
442+
-engine: MLCEngine
443+
-config: EngineConfig
444+
+initializeEngine(): Promise~void~
445+
+chat(messages: Message[]): Promise~string~
446+
+streamChat(messages: Message[]): AsyncGenerator~string~
447+
}
448+
449+
class ChatStore {
450+
-sessions: ChatSession[]
451+
-currentSession: ChatSession
452+
+addMessage(message: ChatMessage): void
453+
+createSession(): ChatSession
454+
+loadSession(id: string): void
455+
+saveToLocalStorage(): void
456+
}
457+
458+
class DocumentStore {
459+
-documents: Document[]
460+
-chunks: DocumentChunk[]
461+
+addDocument(doc: Document): void
462+
+removeDocument(id: string): void
463+
+getChunksByDocument(docId: string): DocumentChunk[]
464+
}
465+
466+
class ThemeStore {
467+
-currentTheme: string
468+
-availableThemes: Theme[]
469+
+setTheme(theme: string): void
470+
+getTheme(): Theme
471+
}
472+
473+
ChatInterface --> ChatStore : uses
474+
ChatInterface --> ModelManager : uses
475+
ChatInterface --> RAGService : uses
476+
ChatInterface --> DocumentProcessor : uses file upload
477+
478+
ModelManager --> WebLLMCore : manages
479+
480+
RAGService --> VectorStore : stores vectors
481+
RAGService --> EmbeddingService : generates embeddings
482+
RAGService --> DocumentStore : retrieves documents
483+
484+
DocumentProcessor --> DocumentStore : stores processed docs
485+
486+
ChatStore --> WebLLMCore : sends messages
487+
488+
VectorStore --> IndexedDB : persists data
489+
DocumentStore --> IndexedDB : persists data
490+
ChatStore --> LocalStorage : persists sessions
491+
ThemeStore --> LocalStorage : persists theme
492+
493+
class IndexedDB {
494+
<<external>>
495+
+put(store: string, data: any): Promise~void~
496+
+get(store: string, key: string): Promise~any~
497+
+delete(store: string, key: string): Promise~void~
498+
}
499+
500+
class LocalStorage {
501+
<<external>>
502+
+setItem(key: string, value: string): void
503+
+getItem(key: string): string
504+
+removeItem(key: string): void
505+
}
506+
```
507+
508+
### LLM Access Sequence Diagram
509+
510+
```mermaid
511+
sequenceDiagram
512+
participant User
513+
participant ChatInterface
514+
participant ChatStore
515+
participant ModelManager
516+
participant WebLLMCore
517+
participant WebGPU
518+
participant BrowserCache
519+
520+
User->>ChatInterface: Types message and sends
521+
ChatInterface->>ChatStore: addMessage(userMessage)
522+
ChatStore->>ChatStore: Save to LocalStorage
523+
ChatInterface->>ModelManager: checkModelLoaded()
524+
525+
alt Model not loaded
526+
ModelManager->>WebLLMCore: initializeEngine()
527+
WebLLMCore->>BrowserCache: Check for cached model
528+
alt Model cached
529+
BrowserCache-->>WebLLMCore: Return cached model
530+
else Model not cached
531+
WebLLMCore->>WebLLMCore: Download model from CDN
532+
WebLLMCore->>BrowserCache: Cache model
533+
end
534+
WebLLMCore->>WebGPU: Load model to GPU
535+
WebGPU-->>WebLLMCore: Model ready
536+
WebLLMCore-->>ModelManager: Engine initialized
537+
end
538+
539+
ChatInterface->>ChatStore: getConversationHistory()
540+
ChatStore-->>ChatInterface: Return messages[]
541+
ChatInterface->>ModelManager: generateResponse(messages)
542+
ModelManager->>WebLLMCore: streamChat(messages)
543+
WebLLMCore->>WebGPU: Process tokens
544+
545+
loop Streaming response
546+
WebGPU-->>WebLLMCore: Generated tokens
547+
WebLLMCore-->>ModelManager: Token stream
548+
ModelManager-->>ChatInterface: Update response
549+
ChatInterface->>ChatInterface: Display streaming text
550+
end
551+
552+
ChatInterface->>ChatStore: addMessage(aiResponse)
553+
ChatStore->>ChatStore: Save to LocalStorage
554+
ChatInterface-->>User: Display complete response
555+
```
556+
557+
### RAG Access Sequence Diagram
558+
559+
```mermaid
560+
sequenceDiagram
561+
participant User
562+
participant ChatInterface
563+
participant DocumentProcessor
564+
participant RAGService
565+
participant EmbeddingService
566+
participant VectorStore
567+
participant DocumentStore
568+
participant IndexedDB
569+
570+
Note over User,ChatInterface: Document Upload Flow
571+
User->>ChatInterface: Uploads document
572+
ChatInterface->>DocumentProcessor: processFile(file)
573+
574+
alt PDF Document
575+
DocumentProcessor->>DocumentProcessor: PDF.js extraction
576+
else DOCX Document
577+
DocumentProcessor->>DocumentProcessor: Mammoth.js conversion
578+
else Text/Markdown
579+
DocumentProcessor->>DocumentProcessor: Direct text processing
580+
end
581+
582+
DocumentProcessor->>DocumentProcessor: chunkDocument(text, options)
583+
DocumentProcessor->>DocumentStore: addDocument(processedDoc)
584+
DocumentStore->>IndexedDB: persistDocument()
585+
586+
DocumentProcessor->>RAGService: indexDocument(chunks)
587+
588+
loop For each chunk
589+
RAGService->>EmbeddingService: generateEmbedding(chunkText)
590+
EmbeddingService->>EmbeddingService: TF-IDF calculation
591+
EmbeddingService-->>RAGService: Return vector
592+
RAGService->>VectorStore: addEmbedding(chunkId, vector)
593+
end
594+
595+
VectorStore->>IndexedDB: persistEmbeddings()
596+
RAGService-->>ChatInterface: Document indexed
597+
ChatInterface-->>User: Show upload success
598+
599+
Note over User,ChatInterface: Query with RAG Flow
600+
User->>ChatInterface: Sends question
601+
ChatInterface->>RAGService: search(userQuery)
602+
RAGService->>EmbeddingService: generateEmbedding(query)
603+
EmbeddingService-->>RAGService: Query vector
604+
605+
RAGService->>VectorStore: findSimilar(queryVector, k)
606+
VectorStore->>VectorStore: Calculate cosine similarity
607+
VectorStore-->>RAGService: Top k chunks
608+
609+
RAGService->>DocumentStore: getChunkDetails(chunkIds)
610+
DocumentStore->>IndexedDB: fetchChunks()
611+
IndexedDB-->>DocumentStore: Chunk data
612+
DocumentStore-->>RAGService: Enriched chunks
613+
614+
RAGService->>RAGService: Format context
615+
RAGService-->>ChatInterface: SearchResults + Context
616+
617+
ChatInterface->>ChatInterface: Augment prompt with context
618+
ChatInterface->>ModelManager: generateResponse(augmentedPrompt)
619+
Note right of ModelManager: Continue with LLM flow
620+
621+
ChatInterface-->>User: Display response with sources
622+
```
623+
391624
## Performance Notes
392625

393626
- First model load may take 1-5 minutes depending on internet speed

0 commit comments

Comments
 (0)