AWS glue Pysparkを使用してカラムの結合を行う

目的:姓と名で分かれているカラムを結合させて、新しいカラムにいれる。

モジュールのインポート

pyspark.sql.functionsをsfとしてインポート

import pyspark
from pyspark.sql import functions as sf

DynamicFrameのインポート

fromDFを使用するためにDynamicFrameのインポートが必要になります。toDFはインポートしなくても使えたので、注意が必要です。

from awsglue.dynamicframe import DynamicFrame

DynamicFrameからDataFrameに変換

df = dropnullfields3.toDF()

姓と名を結合して新しいjoined_name列を作り、入れる

df = df.withColumn('joined_name', 
                    sf.concat(sf.col('firstname'),sf.lit(' '), sf.col('surname')))

DataFrameからDynamicFrameに変換

result = DynamicFrame.fromDF(df, glueContext, "result")

parquet形式で書き出してS3selectにて確認

すべてのソース

import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job

#モジュールpyspark.sqlのインポート

import pyspark
from pyspark.sql import functions as sf

from pyspark.sql.types import IntegerType
from pyspark.sql.functions import lit

#モジュールDynamicFrameのインポート、fromDFを使用するために必要
from awsglue.dynamicframe import DynamicFrame



## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])

sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
#コメント
#job.init(args['JOB_NAME'], args)

## @type: DataSource
## @args: [database = "from_csv_to_datacatalog", table_name = "from_csv_to_datacatalog_upload_csv", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "from_csv_to_datacatalog", table_name = "from_csv_to_datacatalog_upload_csv", transformation_ctx = "datasource0")

## @type: ApplyMapping
## @args: [mapping = [("id", "long", "id", "long"), ("firstname", "string", "firstname", "string"), ("surname", "string", "surname", "string"), ("firstname_kana", "string", "firstname_kana", "string"), ("surname_kana", "string", "surname_kana", "string"), ("zipcode", "string", "zipcode", "string"), ("prefectures", "string", "prefectures", "string"), ("tel", "string", "tel", "string"), ("email", "string", "email", "string"), ("birthday", "string", "birthday", "date")], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource0]
applymapping1 = ApplyMapping.apply(
    frame = datasource0, 
    mappings = [
        ("id", "long", "id", "long"), 
        ("firstname", "string", "firstname", "string"), 
        ("surname", "string", "surname", "string"), 
        ("firstname_kana", "string", "firstname_kana", "string"), 
        ("surname_kana", "string", "surname_kana", "string"), 
        ("zipcode", "string", "zipcode", "string"), 
        ("prefectures", "string", "prefectures", "string"), 
        ("tel", "string", "tel", "string"), 
        ("email", "string", "email", "string"), 
        ("birthday", "string", "birthday", "string")
        ], 
    transformation_ctx = "applymapping1"
    )

## @type: ResolveChoice
## @args: [choice = "make_struct", transformation_ctx = "resolvechoice2"]
## @return: resolvechoice2
## @inputs: [frame = applymapping1]
resolvechoice2 = ResolveChoice.apply(frame = applymapping1, choice = "make_struct", transformation_ctx = "resolvechoice2")

## @type: DropNullFields
## @args: [transformation_ctx = "dropnullfields3"]
## @return: dropnullfields3
## @inputs: [frame = resolvechoice2]
dropnullfields3 = DropNullFields.apply(frame = resolvechoice2, transformation_ctx = "dropnullfields3")

## @type: joined_name
## @args: [transformation_ctx = "dropnullfields3"]
## @return: result
## @inputs: [frame = dropnullfields3]

#Pysparkを使用するため、DynamicFrameからDataFrameに変換
df = dropnullfields3.toDF()

#姓と名を結合して新しいjoined_name列を作り、入れる
df = df.withColumn('joined_name', 
                    sf.concat(sf.col('firstname'),sf.lit(' '), sf.col('surname')))

# 固定値0で新しいカラムの追加
df = df.withColumn('column_name', lit(0).cast(IntegerType())) 

#DataFrameからDynamicFrameに変換
result = DynamicFrame.fromDF(df, glueContext, "result")

## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": "s3://datalake-test-datacatalog-s3/parquet"}, format = "parquet", transformation_ctx = "datasink4"]
## @return: datasink4
## @inputs: [frame = result]

datasink4 = glueContext.write_dynamic_frame.from_options(frame = result, connection_type = "s3", connection_options = {"path": "s3://datalake-test-datacatalog-s3/parquet"}, format = "parquet", transformation_ctx = "datasink4")

job.commit()

参考ページ

https://stackoverflow.com/questions/31450846/concatenate-columns-in-apache-spark-dataframe