$ pyspark

>>> df = sc.parallelize([{'a': 1, 'b':2, 'c':3}, {'a':8, 'b':5, 'c':6}, {'a':3, 'b':1, 'c':0}]).toDF().cache()

>>> df

DataFrame[a: bigint, b: bigint, c: bigint]

>>> df.columns

['a', 'b', 'c']

>>> def column_add(a,b):

... return a.__add__(b)

...

>>> df.withColumn('total', reduce(column_add, ( df[col] for col in df.columns ) )).collect()

[Row(a=1, b=2, c=3, total=6), Row(a=8, b=5, c=6, total=19), Row(a=3, b=1, c=0, total=4)]