|
| 1 | +# Use the native inference API to create an image with SDXL 1.0 |
| 2 | + |
| 3 | +import base64 |
| 4 | +import boto3 |
| 5 | +import json |
| 6 | +import os |
| 7 | +import random |
| 8 | + |
| 9 | +# Create a Bedrock Runtime client in the AWS Region of your choice. |
| 10 | +client = boto3.client("bedrock-runtime", region_name="us-west-2") |
| 11 | + |
| 12 | +# Set the model ID, e.g., Titan Image Generator G1. |
| 13 | +model_id = "stability.stable-diffusion-xl-v1" |
| 14 | + |
| 15 | +# Define the image generation prompt for the model. |
| 16 | +prompt = "Lion in the Jungle" |
| 17 | + |
| 18 | +# Format the request payload using the model's native structure. |
| 19 | +native_request = {"text_prompts":[{"text":prompt,"weight":1}],"cfg_scale":10,"steps":50,"seed":0,"width":1024,"height":1024,"samples":1} |
| 20 | + |
| 21 | +# Convert the native request to JSON. |
| 22 | +request = json.dumps(native_request) |
| 23 | + |
| 24 | +# Invoke the model with the request. |
| 25 | +response = client.invoke_model(modelId=model_id, body=request) |
| 26 | + |
| 27 | +# Decode the response body. |
| 28 | +model_response = json.loads(response["body"].read()) |
| 29 | + |
| 30 | +# Extract the image data. |
| 31 | +base64_image_data = model_response["artifacts"][0]["base64"] |
| 32 | + |
| 33 | +# Save the generated image to a local folder. |
| 34 | +i, output_dir = 1, "output" |
| 35 | +if not os.path.exists(output_dir): |
| 36 | + os.makedirs(output_dir) |
| 37 | +while os.path.exists(os.path.join(output_dir, f"image_{i}.png")): |
| 38 | + i += 1 |
| 39 | + |
| 40 | +image_data = base64.b64decode(base64_image_data) |
| 41 | + |
| 42 | +image_path = os.path.join(output_dir, f"image_{i}.png") |
| 43 | +with open(image_path, "wb") as file: |
| 44 | + file.write(image_data) |
| 45 | + |
| 46 | +print(f"The generated image has been saved to {image_path}.") |
0 commit comments