Skip to content

Commit 9e68a85

Browse files
committed
Updated
1 parent 8dba919 commit 9e68a85

File tree

10 files changed

+444
-9
lines changed

10 files changed

+444
-9
lines changed
121 KB
Loading
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
1. A client error occured: generation_error
2+
sagemaker_inference.errors.BaseInferenceToolkitError: (400, 'generation_error', 'image dimensions must be multiples of 64, got 300x253')
3+
4+
2. A client error occured: generation_error
5+
sagemaker_inference.errors.BaseInferenceToolkitError: (400, 'generation_error', 'image too large, 1590226>1048576 pixels')
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
"""
4+
Shows how to generate an image from a reference image with SDXL 1.0 (on demand).
5+
"""
6+
import base64
7+
import io
8+
import json
9+
import logging
10+
import boto3
11+
from PIL import Image
12+
13+
from botocore.exceptions import ClientError
14+
15+
class ImageError(Exception):
16+
"Custom exception for errors returned by SDXL"
17+
def __init__(self, message):
18+
self.message = message
19+
20+
21+
logger = logging.getLogger(__name__)
22+
logging.basicConfig(level=logging.INFO)
23+
24+
25+
def generate_image(model_id, body):
26+
"""
27+
Generate an image using SDXL 1.0 on demand.
28+
Args:
29+
model_id (str): The model ID to use.
30+
body (str) : The request body to use.
31+
Returns:
32+
image_bytes (bytes): The image generated by the model.
33+
"""
34+
35+
logger.info("Generating image with SDXL model %s", model_id)
36+
37+
bedrock = boto3.client(service_name='bedrock-runtime')
38+
39+
accept = "application/json"
40+
content_type = "application/json"
41+
42+
response = bedrock.invoke_model(
43+
body=body, modelId=model_id, accept=accept, contentType=content_type
44+
)
45+
response_body = json.loads(response.get("body").read())
46+
print(response_body['result'])
47+
48+
base64_image = response_body.get("artifacts")[0].get("base64")
49+
base64_bytes = base64_image.encode('ascii')
50+
image_bytes = base64.b64decode(base64_bytes)
51+
52+
finish_reason = response_body.get("artifacts")[0].get("finishReason")
53+
54+
if finish_reason == 'ERROR' or finish_reason == 'CONTENT_FILTERED':
55+
raise ImageError(f"Image generation error. Error code is {finish_reason}")
56+
57+
58+
logger.info("Successfully generated image withvthe SDXL 1.0 model %s", model_id)
59+
60+
return image_bytes
61+
62+
63+
64+
def main():
65+
"""
66+
Entrypoint for SDXL example.
67+
"""
68+
69+
logging.basicConfig(level = logging.INFO,
70+
format = "%(levelname)s: %(message)s")
71+
72+
model_id='stability.stable-diffusion-xl-v1'
73+
74+
prompt="""Corporate Meeting room with lights on."""
75+
76+
# Read reference image from file and encode as base64 strings.
77+
with open("D:/VSCode/GitRepos/ExampleFlaskApp/static/images/bill.png", "rb") as image_file:
78+
init_image = base64.b64encode(image_file.read()).decode('utf8')
79+
80+
# Create request body.
81+
body=json.dumps({
82+
"text_prompts": [
83+
{
84+
"text": prompt
85+
}
86+
],
87+
"init_image": init_image,
88+
"style_preset" : "isometric"
89+
})
90+
91+
try:
92+
print(f"Generating image with SDXL model {model_id}... 1")
93+
image_bytes=generate_image(model_id = model_id, body = body)
94+
print(f"Generating image with SDXL model {model_id}... 2")
95+
image = Image.open(io.BytesIO(image_bytes))
96+
image.show()
97+
98+
99+
except ClientError as err:
100+
message=err.response["Error"]["Message"]
101+
logger.error("A client error occurred: %s", message)
102+
print("A client error occured: " +
103+
format(message))
104+
except ImageError as err:
105+
logger.error(err.message)
106+
print(err.message)
107+
108+
else:
109+
print(f"Finished generating text with SDXL model {model_id}.")
110+
111+
112+
if __name__ == "__main__":
113+
main()
114+
Lines changed: 190 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {
6+
"deletable": false,
7+
"editable": false,
8+
"trusted": true
9+
},
10+
"source": [
11+
"\n",
12+
"# Glue Studio Notebook\n",
13+
"You are now running a **Glue Studio** notebook; before you can start using your notebook you *must* start an interactive session.\n",
14+
"\n",
15+
"## Available Magics\n",
16+
"| Magic | Type | Description |\n",
17+
"|-----------------------------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
18+
"| %%configure | Dictionary | A json-formatted dictionary consisting of all configuration parameters for a session. Each parameter can be specified here or through individual magics. |\n",
19+
"| %profile | String | Specify a profile in your aws configuration to use as the credentials provider. |\n",
20+
"| %iam_role | String | Specify an IAM role to execute your session with. |\n",
21+
"| %region | String | Specify the AWS region in which to initialize a session |\n",
22+
"| %session_id | String | Returns the session ID for the running session. |\n",
23+
"| %connections | List | Specify a comma separated list of connections to use in the session. |\n",
24+
"| %additional_python_modules | List | Comma separated list of pip packages, s3 paths or private pip arguments. |\n",
25+
"| %extra_py_files | List | Comma separated list of additional Python files from S3. |\n",
26+
"| %extra_jars | List | Comma separated list of additional Jars to include in the cluster. |\n",
27+
"| %number_of_workers | Integer | The number of workers of a defined worker_type that are allocated when a job runs. worker_type must be set too. |\n",
28+
"| %worker_type | String | Standard, G.1X, *or* G.2X. number_of_workers must be set too. Default is G.1X |\n",
29+
"| %glue_version | String | The version of Glue to be used by this session. Currently, the only valid options are 2.0 and 3.0 (eg: %glue_version 2.0) |\n",
30+
"| %security_config | String | Define a security configuration to be used with this session. |\n",
31+
"| %sql | String | Run SQL code. All lines after the initial %%sql magic will be passed as part of the SQL code. |\n",
32+
"| %streaming | String | Changes the session type to Glue Streaming. |\n",
33+
"| %etl | String | Changes the session type to Glue ETL. |\n",
34+
"| %status | | Returns the status of the current Glue session including its duration, configuration and executing user / role. |\n",
35+
"| %stop_session | | Stops the current session. |\n",
36+
"| %list_sessions | | Lists all currently running sessions by name and ID. |\n",
37+
"| %spark_conf | String | Specify custom spark configurations for your session. E.g. %spark_conf spark.serializer=org.apache.spark.serializer.KryoSerializer |"
38+
]
39+
},
40+
{
41+
"cell_type": "code",
42+
"execution_count": null,
43+
"metadata": {
44+
"trusted": true,
45+
"vscode": {
46+
"languageId": "python_glue_session"
47+
}
48+
},
49+
"outputs": [],
50+
"source": [
51+
"# Adding required libraries and extra jars to the job - # <------- PLEASE REPLACE ${BUCKET_NAME} BELOW!!!\n",
52+
"\n",
53+
"%extra_py_files s3://${BUCKET_NAME}/library/pycountry_convert.zip\n",
54+
"%extra_jars s3://crawler-public/json/serde/json-serde.jar\n",
55+
"\n",
56+
"# Adding required properties to the job - # <------- PLEASE REPLACE ${BUCKET_NAME} BELOW!!!\n",
57+
"\n",
58+
"%%configure \n",
59+
"{\n",
60+
" \"--enable-spark-ui\": \"true\",\n",
61+
" \"--spark-event-logs-path\": \"s3://${BUCKET_NAME}/output/lab3/sparklog/\",\n",
62+
" \"max_retries\": \"0\" \n",
63+
"}"
64+
]
65+
},
66+
{
67+
"cell_type": "code",
68+
"execution_count": null,
69+
"metadata": {
70+
"editable": true,
71+
"trusted": true,
72+
"vscode": {
73+
"languageId": "python_glue_session"
74+
}
75+
},
76+
"outputs": [],
77+
"source": [
78+
"#Importing all the basic Glue, Spark libraries \n",
79+
"\n",
80+
"import sys\n",
81+
"from awsglue.transforms import *\n",
82+
"from awsglue.utils import getResolvedOptions\n",
83+
"from pyspark.context import SparkContext\n",
84+
"from awsglue.context import GlueContext\n",
85+
"from awsglue.dynamicframe import DynamicFrame\n",
86+
"from awsglue.job import Job\n",
87+
"\n",
88+
"# Important further required libraries\n",
89+
"\n",
90+
"from pyspark.sql.functions import udf, col\n",
91+
"from pyspark.sql.types import IntegerType, StringType\n",
92+
"from pyspark import SparkContext\n",
93+
"from pyspark.sql import SQLContext\n",
94+
"from datetime import datetime\n",
95+
"\n",
96+
"# Starting Spark/Glue Context\n",
97+
"\n",
98+
"sc = SparkContext.getOrCreate()\n",
99+
"glueContext = GlueContext(sc)\n",
100+
"spark = glueContext.spark_session\n",
101+
"job = Job(glueContext)"
102+
]
103+
},
104+
{
105+
"cell_type": "code",
106+
"execution_count": null,
107+
"metadata": {
108+
"trusted": true,
109+
"vscode": {
110+
"languageId": "python_glue_session"
111+
}
112+
},
113+
"outputs": [],
114+
"source": [
115+
"# Important pycountry_convert function from the external python library (pycountry_convert.zip)\n",
116+
"\n",
117+
"from pycountry_convert import (\n",
118+
" convert_country_alpha2_to_country_name,\n",
119+
" convert_country_alpha2_to_continent,\n",
120+
" convert_country_name_to_country_alpha2,\n",
121+
" convert_country_alpha3_to_country_alpha2,\n",
122+
")\n",
123+
"\n",
124+
"\n",
125+
"# Defining the function code\n",
126+
"def get_country_code2(country_name):\n",
127+
" country_code2 = 'US'\n",
128+
" try:\n",
129+
" country_code2 = convert_country_name_to_country_alpha2(country_name)\n",
130+
" except KeyError:\n",
131+
" country_code2 = ''\n",
132+
" return country_code2\n",
133+
"\n",
134+
"# leveraging the Country Code UDF\n",
135+
"\n",
136+
"udf_get_country_code2 = udf(lambda z: get_country_code2(z), StringType())\n",
137+
"\n",
138+
"\n",
139+
"# Reading the dataset into a DataFrame\n",
140+
"s3_bucket = \"s3://${BUCKET_NAME}/\" # <------- PLEASE REPLACE ONLY THE ${BUCKET_NAME} HERE (Keep the \"s3://\" and the final \"/\" part)!!!\n",
141+
"job_time_string = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n",
142+
"\n",
143+
"df = spark.read.load(s3_bucket + \"input/lab2/sample.csv\", \n",
144+
" format=\"csv\", \n",
145+
" sep=\",\", \n",
146+
" inferSchema=\"true\", \n",
147+
" header=\"true\")\n",
148+
"\n",
149+
"# Performing a transformation that adds a new Country Code column to the dataframe based on the Country Code UDF output\n",
150+
"\n",
151+
"new_df = df.withColumn('country_code_2', udf_get_country_code2(col(\"country\")))"
152+
]
153+
},
154+
{
155+
"cell_type": "code",
156+
"execution_count": null,
157+
"metadata": {
158+
"trusted": true,
159+
"vscode": {
160+
"languageId": "python_glue_session"
161+
}
162+
},
163+
"outputs": [],
164+
"source": [
165+
"# Sinking the data into another S3 bucket path\n",
166+
"\n",
167+
"new_df.write.csv(s3_bucket + \"/output/lab3/notebook/\" + job_time_string + \"/\")"
168+
]
169+
}
170+
],
171+
"metadata": {
172+
"kernelspec": {
173+
"display_name": "Glue PySpark",
174+
"language": "python",
175+
"name": "glue_pyspark"
176+
},
177+
"language_info": {
178+
"codemirror_mode": {
179+
"name": "python",
180+
"version": 3
181+
},
182+
"file_extension": ".py",
183+
"mimetype": "text/x-python",
184+
"name": "Python_Glue_Session",
185+
"pygments_lexer": "python3"
186+
}
187+
},
188+
"nbformat": 4,
189+
"nbformat_minor": 4
190+
}

AWS_ETL_Job_In_Notebook/test.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
print("hello world")
2+
print("hello world 2")
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
create table workerslist (
2+
ID varchar(100) not null,
3+
Name varchar(100) not null,
4+
Description varchar(100) not null,
5+
Email varchar(100) not null,
6+
Phone varchar(100) not null,
7+
Amount varchar(100) not null,
8+
colo varchar(100) not null,
9+
col1 varchar(100) not null,
10+
col2 varchar(100) not null,
11+
col3 varchar(100) not null,
12+
col4 varchar(100) not null,
13+
col5 varchar(100) not null
14+
)
15+
16+
17+
SELECT *
18+
FROM DataSource2
19+
LEFT JOIN DataSource1 ON DataSource1.col0 = DataSource2.id
20+
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
ID,Name,Description,Email,Phone,Amount
2+
1,dsfgf,Activity unit,[email protected],9.46E+13,1.23E+15
3+
2,xyz1,Rolling mean employees,[email protected],9.15E+12,1.23E+15
4+
3,xyz2,Salaries and wages paid,[email protected],94534635633,1.23E+15
5+
4,dsvgs,"Sales, government funding, grants and subsidies",[email protected],94534635633,1.23E+15
6+
5,xyz4,Total income,[email protected],94534635633,1.23E+15
7+
6,xyz5,Total expenditure,[email protected],94534635633,1.23E+15
8+
7,dsdf,Operating profit before tax,[email protected],94534635633,1.23E+15
9+
8,xyz8,Total assets,[email protected],94534635633,1.23E+15
10+
9,xyz9,Fixed tangible assets,[email protected],94534635633,1.23E+15
11+
10,xyz10,Activity unit,[email protected],94534635633,1.23E+15
12+
11,zzzz2323241,testing,[email protected],94534635633,1.23E+15
13+
12,ewrwerwee,testing 2,[email protected],94534635633,1.23E+15
14+
13,rtt,fgdfg,[email protected],94534635633,1.23E+15
15+
14,dg,ett,[email protected],94534635633,1.23E+15
16+
15,grgf,dgdg,[email protected],94534635633,1.23E+15
17+
16,rgr,fdsgfsd,[email protected],94534635633,1.23E+15
18+
17,fgg,dgdg,[email protected],94534635633,1.23E+15
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
col0,col1,col2,col3,col4,col5
2+
1,dsfgf,Activity unit,[email protected],9.46E+13,1.23E+15
3+
2,xyz1,Rolling mean employees,[email protected],9.15E+12,1.23E+15
4+
3,xyz2,Salaries and wages paid,[email protected],94534635633,1.23E+15
5+
4,dsvgs,"Sales, government funding, grants and subsidies",[email protected],94534635633,1.23E+15
6+
5,xyz4,Total income,[email protected],94534635633,1.23E+15
7+
6,xyz5,Total expenditure,[email protected],94534635633,1.23E+15
8+
7,dsdf,Operating profit before tax,[email protected],94534635633,1.23E+15
9+
8,xyz8,Total assets,[email protected],94534635633,1.23E+15
10+
9,xyz9,Fixed tangible assets,[email protected],94534635633,1.23E+15
11+
10,xyz10,Activity unit,[email protected],94534635633,1.23E+15
12+
11,zzzz2323241,testing,[email protected],94534635633,1.23E+15
13+
12,ewrwerwee,testing 2,[email protected],94534635633,1.23E+15
14+
13,rtt,fgdfg,[email protected],94534635633,1.23E+15
15+
14,dg,ett,[email protected],94534635633,1.23E+15
16+
15,grgf,dgdg,[email protected],94534635633,1.23E+15
17+
16,rgr,fdsgfsd,[email protected],94534635633,1.23E+15
18+
17,fgg,dgdg,[email protected],94534635633,1.23E+15

AWS_RDS_Arora_Serverless/SQLCommands.sql

Lines changed: 0 additions & 9 deletions
This file was deleted.

0 commit comments

Comments
 (0)