Xenova HF Staff commited on
Commit
7c3d308
·
verified ·
1 Parent(s): 6c62a90

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -9
README.md CHANGED
@@ -24,8 +24,7 @@ npm i @huggingface/transformers
24
  import {
25
  Florence2ForConditionalGeneration,
26
  AutoProcessor,
27
- AutoTokenizer,
28
- RawImage,
29
  } from '@huggingface/transformers';
30
 
31
  // Load model, processor, and tokenizer
@@ -39,27 +38,26 @@ const model = await Florence2ForConditionalGeneration.from_pretrained(model_id,
39
  },
40
  });
41
  const processor = await AutoProcessor.from_pretrained(model_id);
42
- const tokenizer = await AutoTokenizer.from_pretrained(model_id);
43
 
44
  // Load image and prepare vision inputs
45
  const url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg';
46
- const image = await RawImage.fromURL(url);
47
- const vision_inputs = await processor(image);
48
 
49
  // Specify task and prepare text inputs
50
  const task = '<MORE_DETAILED_CAPTION>';
51
  const prompts = processor.construct_prompts(task);
52
- const text_inputs = tokenizer(prompts);
 
 
53
 
54
  // Generate text
55
  const generated_ids = await model.generate({
56
- ...text_inputs,
57
- ...vision_inputs,
58
  max_new_tokens: 256,
59
  });
60
 
61
  // Decode generated text
62
- const generated_text = tokenizer.batch_decode(generated_ids, { skip_special_tokens: false })[0];
63
 
64
  // Post-process the generated text
65
  const result = processor.post_process_generation(generated_text, task, image.size);
 
24
  import {
25
  Florence2ForConditionalGeneration,
26
  AutoProcessor,
27
+ load_image,
 
28
  } from '@huggingface/transformers';
29
 
30
  // Load model, processor, and tokenizer
 
38
  },
39
  });
40
  const processor = await AutoProcessor.from_pretrained(model_id);
 
41
 
42
  // Load image and prepare vision inputs
43
  const url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg';
44
+ const image = await load_image(url);
 
45
 
46
  // Specify task and prepare text inputs
47
  const task = '<MORE_DETAILED_CAPTION>';
48
  const prompts = processor.construct_prompts(task);
49
+
50
+ // Pre-process the image and text inputs
51
+ const inputs = await processor(image, prompts);
52
 
53
  // Generate text
54
  const generated_ids = await model.generate({
55
+ ...inputs,
 
56
  max_new_tokens: 256,
57
  });
58
 
59
  // Decode generated text
60
+ const generated_text = processor.batch_decode(generated_ids, { skip_special_tokens: false })[0];
61
 
62
  // Post-process the generated text
63
  const result = processor.post_process_generation(generated_text, task, image.size);