Google bigquery 纳什。 自定义聚合乐趣:字符串,可选 如果想要提供定制的聚合功能,可以使用。该值为col名称 应替换为{}。例如,如果我们想要一个聚合函数,如- sum(coalesce(values\u col,0))则自定义的参数将是- 和(合并({},0))。 如果提供,这将覆盖agg_-fun参数。 前缀:字符串,可选 一个固定字符串,作为前缀添加到由 强调。 后缀:字符串,可选 一个固定字符串,作为后缀添加到由 强调。 """ self.query=“” self.index\u col=列表(index\u col) self.values\u col=values\u col self.pivot\u col=pivot\u col self.not_eq_default=not_eq_default self.table\u name=self.\u get\u table\u name(table\u name) self.piv\u col\u vals=self.\u get\u piv\u col\u vals(数据) self.piv\u col\u names=self.\u创建\u piv\u col\u名称(添加\u col\u nm\u后缀、前缀、后缀) self.function=custom_agg_fun if custom_agg_fun else agg_fun+“({})” def_get_table_name(self,table_name): """ 如果未提供表名,则返回表名或占位符。 """ 如果表名称为“其他”,则返回表名称“” 定义获取piv列值(自身、数据): """ 获取透视列的所有唯一值。 """ 如果isinstance(数据,pd.DataFrame): self.data=数据 elif isinstance(数据,str): self.data=pd.read\u csv(数据) 其他: raise VALUERROR(“提供的数据必须是数据帧或csv文件路径。”) 如果self.pivot\u col不在self.data.columns中: raise VALUERROR(“提供的数据必须具有要进行透视的列。”\ “还要确保数据中的列名与名称相同”\ “提供给pivot_col参数。”) 返回self.data[self.pivot\u col].astype(str.unique().tolist()) 定义清洁颜色名称(自我,颜色名称): """ 数据透视列值可以具有任意字符串,但为了 将它们转换为列名需要进行一些清理。这种方法 将字符串作为输入并返回干净的列名。 """ #用下划线替换空格 #删除除下划线以外的非字母数字字符 #用一个下划线替换多个连续下划线 #使所有字符小写 #删除尾随下划线 返回re.sub(“+”、“+”、re.sub(“[^0-9a-zA-Z+”、“”、re.sub(“,“,”,col\u name))).lower().rstrip(“\”) def_创建piv_col_名称(self、add_col_nm_后缀、前缀、后缀): """ 该方法创建了新数据透视表的数据透视列名列表。 """ prefix=前缀+“”如果前缀为else“” 后缀=“如果后缀为else,则后缀为” 如果添加\u col\u nm\u后缀: piv_col_name=[“{0}{1}{2}{3}”。格式(前缀,self.\u clean_col_name(piv_col_val),self.values\u col.lower(),后缀) 对于自个中的piv_colu vals.piv_colu vals] 其他: piv_col_name=[“{0}{1}{2}”。格式(前缀,self.\u clean_col_name(piv_col_val),后缀) 对于自个中的piv_colu vals.piv_colu vals] 返回piv_col_名称 定义添加选择语句(self): """ 添加查询的select语句部分。 """ query=“选择“+”.join([index\u col+”,“for index\u col in self.index\u col])+”\n 返回查询 定义添加案例陈述(自我): """ 添加查询的case语句部分。 """ case\u query=self.function.format(“case when{0}=\“{1}\”然后{2}否则{3}end”)+“as{4},\n” query=”“.join([case\u query.format(self.pivot\u col、piv\u col、self.values\u col、, self.not_eq_默认值,piv_col_name) 对于piv_col_val,zip中的piv_col_名称(self.piv_col_val,self.piv_col_名称)]) query=query[:-2]+“\n” 返回查询 def_add_from_语句(self): """ 添加查询的from语句部分。 """ query=“from{0}\n”。格式(self.table\u名称) 返回查询 def\u添加\u分组\u按\u语句(自我): """ 按查询的一部分添加组。 """ query=“按“+”.join进行分组([“{0},”.format(x)表示范围(1,len(self.index\u col)+1)]) 返回查询[:-1] def生成查询(自我): """ 返回用于创建数据透视表的查询。 """ self.query=self.\u添加\u选择\u语句()+\ self.\u add\u case\u statement()+\ self.\u从\u语句()添加\u+\ self.\u添加\u组\u by \u语句() 返回self.query def写入查询(自身、输出文件): """ 将查询写入文本文件。 """ 文本文件=打开(输出文件,“w”) text\u file.write(self.generate\u query()) text_file.close()

Google bigquery 纳什。 自定义聚合乐趣:字符串,可选 如果想要提供定制的聚合功能,可以使用。该值为col名称 应替换为{}。例如,如果我们想要一个聚合函数,如- sum(coalesce(values\u col,0))则自定义的参数将是- 和(合并({},0))。 如果提供,这将覆盖agg_-fun参数。 前缀:字符串,可选 一个固定字符串,作为前缀添加到由 强调。 后缀:字符串,可选 一个固定字符串,作为后缀添加到由 强调。 """ self.query=“” self.index\u col=列表(index\u col) self.values\u col=values\u col self.pivot\u col=pivot\u col self.not_eq_default=not_eq_default self.table\u name=self.\u get\u table\u name(table\u name) self.piv\u col\u vals=self.\u get\u piv\u col\u vals(数据) self.piv\u col\u names=self.\u创建\u piv\u col\u名称(添加\u col\u nm\u后缀、前缀、后缀) self.function=custom_agg_fun if custom_agg_fun else agg_fun+“({})” def_get_table_name(self,table_name): """ 如果未提供表名,则返回表名或占位符。 """ 如果表名称为“其他”,则返回表名称“” 定义获取piv列值(自身、数据): """ 获取透视列的所有唯一值。 """ 如果isinstance(数据,pd.DataFrame): self.data=数据 elif isinstance(数据,str): self.data=pd.read\u csv(数据) 其他: raise VALUERROR(“提供的数据必须是数据帧或csv文件路径。”) 如果self.pivot\u col不在self.data.columns中: raise VALUERROR(“提供的数据必须具有要进行透视的列。”\ “还要确保数据中的列名与名称相同”\ “提供给pivot_col参数。”) 返回self.data[self.pivot\u col].astype(str.unique().tolist()) 定义清洁颜色名称(自我,颜色名称): """ 数据透视列值可以具有任意字符串,但为了 将它们转换为列名需要进行一些清理。这种方法 将字符串作为输入并返回干净的列名。 """ #用下划线替换空格 #删除除下划线以外的非字母数字字符 #用一个下划线替换多个连续下划线 #使所有字符小写 #删除尾随下划线 返回re.sub(“+”、“+”、re.sub(“[^0-9a-zA-Z+”、“”、re.sub(“,“,”,col\u name))).lower().rstrip(“\”) def_创建piv_col_名称(self、add_col_nm_后缀、前缀、后缀): """ 该方法创建了新数据透视表的数据透视列名列表。 """ prefix=前缀+“”如果前缀为else“” 后缀=“如果后缀为else,则后缀为” 如果添加\u col\u nm\u后缀: piv_col_name=[“{0}{1}{2}{3}”。格式(前缀,self.\u clean_col_name(piv_col_val),self.values\u col.lower(),后缀) 对于自个中的piv_colu vals.piv_colu vals] 其他: piv_col_name=[“{0}{1}{2}”。格式(前缀,self.\u clean_col_name(piv_col_val),后缀) 对于自个中的piv_colu vals.piv_colu vals] 返回piv_col_名称 定义添加选择语句(self): """ 添加查询的select语句部分。 """ query=“选择“+”.join([index\u col+”,“for index\u col in self.index\u col])+”\n 返回查询 定义添加案例陈述(自我): """ 添加查询的case语句部分。 """ case\u query=self.function.format(“case when{0}=\“{1}\”然后{2}否则{3}end”)+“as{4},\n” query=”“.join([case\u query.format(self.pivot\u col、piv\u col、self.values\u col、, self.not_eq_默认值,piv_col_name) 对于piv_col_val,zip中的piv_col_名称(self.piv_col_val,self.piv_col_名称)]) query=query[:-2]+“\n” 返回查询 def_add_from_语句(self): """ 添加查询的from语句部分。 """ query=“from{0}\n”。格式(self.table\u名称) 返回查询 def\u添加\u分组\u按\u语句(自我): """ 按查询的一部分添加组。 """ query=“按“+”.join进行分组([“{0},”.format(x)表示范围(1,len(self.index\u col)+1)]) 返回查询[:-1] def生成查询(自我): """ 返回用于创建数据透视表的查询。 """ self.query=self.\u添加\u选择\u语句()+\ self.\u add\u case\u statement()+\ self.\u从\u语句()添加\u+\ self.\u添加\u组\u by \u语句() 返回self.query def写入查询(自身、输出文件): """ 将查询写入文本文件。 """ 文本文件=打开(输出文件,“w”) text\u file.write(self.generate\u query()) text_file.close(),google-bigquery,Google Bigquery,还有COUNTIF 选择COUNTIF(x0)作为正数值 从UNNEST([5,-2,3,6,-10,NULL,-7,4,0])开始作为x; 不是每个人都能使用python或pandas(想想数据分析师和BI都德:) 下面是标准中的一个动态透视过程SQL@Bigquery. 它还没有聚合起来。 首先,您需要提供一个表,其中包含已修改的值(如果需要)。 但它会自动创建一个表并生成所有数据透视列 开始的假设是,您输入了一个表myDataset.myTable,如下所示: 长期、LAT、KPI、美国

还有
COUNTIF

选择COUNTIF(x0)作为正数值
从UNNEST([5,-2,3,6,-10,NULL,-7,4,0])开始作为x;

不是每个人都能使用python或pandas(想想数据分析师和BI都德:) 下面是标准中的一个动态透视过程SQL@Bigquery. 它还没有聚合起来。 首先,您需要提供一个表,其中包含已修改的值(如果需要)。 但它会自动创建一个表并生成所有数据透视列

开始的假设是,您输入了一个表myDataset.myTable,如下所示:
长期、LAT、KPI、美国、欧元
A、 1,温度,78,45
A、 1,压力,120114
B、 1,温度,12,8
B、 1,压力,85,52
SELECT * 
FROM publicdata:samples.shakespeare
LIMIT 10;
SELECT
(CASE WHEN word = 'brave' THEN 'BRAVE' ELSE '' END) AS column_1,
(CASE WHEN word = 'attended' THEN 'ATTENDED' ELSE '' END) AS column_2,
SUM (word_count)
FROM publicdata:samples.shakespeare
WHERE (word = 'brave' OR word = 'attended')
GROUP BY column_1, column_2
LIMIT 10;
SELECT sum(CASE WHEN word = 'brave' THEN word_count ELSE 0 END) AS brave , sum(CASE WHEN word = 'attended' THEN word_count ELSE 0 END) AS attended, SUM (word_count) as total_word_count FROM publicdata:samples.shakespeare WHERE (word = 'brave' OR word = 'attended')
#standardSQL
SELECT MoteName
  , TIMESTAMP_TRUNC(Timestamp, hour) hour
  , AVG(IF(SensorName LIKE '%altitude', Data, null)) altitude
  , AVG(IF(SensorName LIKE '%light', Data, null)) light
  , AVG(IF(SensorName LIKE '%mic', Data, null)) mic
  , AVG(IF(SensorName LIKE '%temperature', Data, null)) temperature
FROM `data-sensing-lab.io_sensor_data.moscone_io13`
WHERE MoteName = 'XBee_40670F5F'
GROUP BY 1, 2
SELECT NTH(1, words) WITHIN RECORD column_1, NTH(2, words) WITHIN RECORD column_2, f0_
FROM (
  SELECT NEST(word) words, SUM(c)  
  FROM (
    SELECT word, SUM(word_count) c
    FROM publicdata:samples.shakespeare
    WHERE word in ('brave', 'attended')
    GROUP BY 1
  )
)
SELECT NTH(1, word) column_1, NTH(2, word) column_2, SUM(c)
FROM (
    SELECT word, SUM(word_count) c
    FROM publicdata:samples.shakespeare
    WHERE word in ('brave', 'attended')
    GROUP BY 1
)
SELECT
  MAX(column_1),
  MAX(column_2),
  SUM(wc),
FROM (
  SELECT
  (CASE WHEN word = 'brave' THEN 'BRAVE' ELSE '' END) AS column_1,
  (CASE WHEN word = 'attended' THEN 'ATTENDED' ELSE '' END) AS column_2,
  SUM (word_count) AS wc
  FROM publicdata:samples.shakespeare
  WHERE (word = 'brave' OR word = 'attended')
  GROUP BY column_1, column_2
  LIMIT 10
)
import re
import pandas as pd

class BqPivot():
    """
    Class to generate a SQL query which creates pivoted tables in BigQuery.

    Example
    -------

    The following example uses the kaggle's titanic data. It can be found here -
    `https://www.kaggle.com/c/titanic/data`

    This data is only 60 KB and it has been used for a demonstration purpose.
    This module comes particularly handy with huge datasets for which we would need
    BigQuery(https://en.wikipedia.org/wiki/BigQuery).

    >>> from bq_pivot import BqPivot
    >>> import pandas as pd
    >>> data = pd.read_csv("titanic.csv").head()
    >>> gen = BqPivot(data=data, index_col=["Pclass", "Survived", "PassengenId"],
                      pivot_col="Name", values_col="Age",
                      add_col_nm_suffix=False)
    >>> print(gen.generate_query())

    select Pclass, Survived, PassengenId, 
    sum(case when Name = "Braund, Mr. Owen Harris" then Age else 0 end) as braund_mr_owen_harris,
    sum(case when Name = "Cumings, Mrs. John Bradley (Florence Briggs Thayer)" then Age else 0 end) as cumings_mrs_john_bradley_florence_briggs_thayer,
    sum(case when Name = "Heikkinen, Miss. Laina" then Age else 0 end) as heikkinen_miss_laina,
    sum(case when Name = "Futrelle, Mrs. Jacques Heath (Lily May Peel)" then Age else 0 end) as futrelle_mrs_jacques_heath_lily_may_peel,
    sum(case when Name = "Allen, Mr. William Henry" then Age else 0 end) as allen_mr_william_henry
    from <--insert-table-name-here-->
    group by 1,2,3

    """
    def __init__(self, data, index_col, pivot_col, values_col, agg_fun="sum",
                 table_name=None, not_eq_default="0", add_col_nm_suffix=True, custom_agg_fun=None,
                 prefix=None, suffix=None):
        """
        Parameters
        ----------

        data: pandas.core.frame.DataFrame or string
            The input data can either be a pandas dataframe or a string path to the pandas
            data frame. The only requirement of this data is that it must have the column
            on which the pivot it to be done.

        index_col: list
            The names of the index columns in the query (the columns on which the group by needs to be performed)

        pivot_col: string
            The name of the column on which the pivot needs to be done.

        values_col: string
            The name of the column on which aggregation needs to be performed.

        agg_fun: string
            The name of the sql aggregation function.

        table_name: string
            The name of the table in the query.

        not_eq_default: numeric, optional
            The value to take when the case when statement is not satisfied. For example,
            if one is doing a sum aggregation on the value column then the not_eq_default should
            be equal to 0. Because the case statement part of the sql query would look like - 

            ... ...
            sum(case when <pivot_col> = <some_pivot_col_value> then values_col else 0)
            ... ...

            Similarly if the aggregation function is min then the not_eq_default should be
            positive infinity.

        add_col_nm_suffix: boolean, optional
            If True, then the original values column name will be added as suffix in the new 
            pivoted columns.

        custom_agg_fun: string, optional
            Can be used if one wants to give customized aggregation function. The values col name 
            should be replaced with {}. For example, if we want an aggregation function like - 
            sum(coalesce(values_col, 0)) then the custom_agg_fun argument would be - 
            sum(coalesce({}, 0)). 
            If provided this would override the agg_fun argument.

        prefix: string, optional
            A fixed string to add as a prefix in the pivoted column names separated by an
            underscore.

        suffix: string, optional
            A fixed string to add as a suffix in the pivoted column names separated by an
            underscore.        
        """
        self.query = ""

        self.index_col = list(index_col)
        self.values_col = values_col
        self.pivot_col = pivot_col

        self.not_eq_default = not_eq_default
        self.table_name = self._get_table_name(table_name)

        self.piv_col_vals = self._get_piv_col_vals(data)
        self.piv_col_names = self._create_piv_col_names(add_col_nm_suffix, prefix, suffix)

        self.function = custom_agg_fun if custom_agg_fun else agg_fun + "({})"

    def _get_table_name(self, table_name):
        """
        Returns the table name or a placeholder if the table name is not provided.
        """
        return table_name if table_name else "<--insert-table-name-here-->"

    def _get_piv_col_vals(self, data):
        """
        Gets all the unique values of the pivot column.
        """
        if isinstance(data, pd.DataFrame):
            self.data = data
        elif isinstance(data, str):
            self.data = pd.read_csv(data)
        else:
            raise ValueError("Provided data must be a pandas dataframe or a csv file path.")

        if self.pivot_col not in self.data.columns:
            raise ValueError("The provided data must have the column on which pivot is to be done. "\
                             "Also make sure that the column name in the data is same as the name "\
                             "provided to the pivot_col parameter.")

        return self.data[self.pivot_col].astype(str).unique().tolist()

    def _clean_col_name(self, col_name):
        """
        The pivot column values can have arbitrary strings but in order to 
        convert them to column names some cleaning is required. This method 
        takes a string as input and returns a clean column name.
        """

        # replace spaces with underscores
        # remove non alpha numeric characters other than underscores
        # replace multiple consecutive underscores with one underscore
        # make all characters lower case
        # remove trailing underscores
        return re.sub("_+", "_", re.sub('[^0-9a-zA-Z_]+', '', re.sub(" ", "_", col_name))).lower().rstrip("_")

    def _create_piv_col_names(self, add_col_nm_suffix, prefix, suffix):
        """
        The method created a list of pivot column names of the new pivoted table.
        """
        prefix = prefix + "_" if prefix else ""
        suffix = "_" + suffix if suffix else ""

        if add_col_nm_suffix:
            piv_col_names = ["{0}{1}_{2}{3}".format(prefix, self._clean_col_name(piv_col_val), self.values_col.lower(), suffix)
                             for piv_col_val in self.piv_col_vals]
        else:
            piv_col_names = ["{0}{1}{2}".format(prefix, self._clean_col_name(piv_col_val), suffix)
                             for piv_col_val in self.piv_col_vals]

        return piv_col_names

    def _add_select_statement(self):
        """
        Adds the select statement part of the query.
        """
        query = "select " + "".join([index_col + ", " for index_col in self.index_col]) + "\n"
        return query

    def _add_case_statement(self):
        """
        Adds the case statement part of the query.
        """
        case_query = self.function.format("case when {0} = \"{1}\" then {2} else {3} end") + " as {4},\n"

        query = "".join([case_query.format(self.pivot_col, piv_col_val, self.values_col,
                                           self.not_eq_default, piv_col_name)
                         for piv_col_val, piv_col_name in zip(self.piv_col_vals, self.piv_col_names)])

        query = query[:-2] + "\n"
        return query

    def _add_from_statement(self):
        """
        Adds the from statement part of the query.
        """
        query =  "from {0}\n".format(self.table_name)
        return query

    def _add_group_by_statement(self):
        """
        Adds the group by part of the query.
        """
        query = "group by " + "".join(["{0},".format(x) for x in range(1, len(self.index_col) + 1)])
        return query[:-1]

    def generate_query(self):
        """
        Returns the query to create the pivoted table.
        """
        self.query = self._add_select_statement() +\
                     self._add_case_statement() +\
                     self._add_from_statement() +\
                     self._add_group_by_statement()

        return self.query

    def write_query(self, output_file):
        """
        Writes the query to a text file.
        """
        text_file = open(output_file, "w")
        text_file.write(self.generate_query())
        text_file.close()
SELECT COUNTIF(x<0) AS num_negative, COUNTIF(x>0) AS num_positive
FROM UNNEST([5, -2, 3, 6, -10, NULL, -7, 4, 0]) AS x;
CALL warehouse.pivot ('myDataset','myTable',['LONG','LAT'],  'KPI');
create or replace procedure warehouse.pivot (dataset STRING, table_to_pivot STRING, ls_pks ARRAY<STRING>, pivot_column STRING)
BEGIN
  DECLARE sql_pivot STRING;
  DECLARE sql_pk_string STRING;
  DECLARE sql_val_string STRING;
  DECLARE sql_pivot_cols STRING DEFAULT "";
  DECLARE pivot_cols_stmt STRING;
  DECLARE pivot_ls_values ARRAY<STRING>;
  DECLARE ls_pivot_value_columns ARRAY<STRING>;
  DECLARE nb_pivot_col_values INT64;
  DECLARE nb_pivot_val_values INT64;
  DECLARE loop_index INT64 DEFAULT 0;
  DECLARE loop2_index INT64 DEFAULT 0;

  SET sql_pk_string= ( array_to_string(ls_pks,',') ) ;
  /* get the values of pivot column to prepare the new columns in out put*/
  SET pivot_cols_stmt = concat(
          'SELECT array_agg(DISTINCT cast(', pivot_column ,' as string) ORDER BY ', pivot_column,' ) as pivot_ls_values, ',
          'count(distinct ',pivot_column,') as nb_pivot_col_values ',
          ' FROM ', dataset,'.', table_to_pivot
         );
  EXECUTE IMMEDIATE pivot_cols_stmt into pivot_ls_values, nb_pivot_col_values;

  /*get the name of value columns to preapre the new columns in output*/
  set sql_val_string =concat(
    "select array_agg(COLUMN_NAME) as ls_pivot_value_columns, count(distinct COLUMN_NAME) as nb_pivot_val_values ",
    "FROM ",dataset,".INFORMATION_SCHEMA.COLUMNS where TABLE_NAME='",table_to_pivot,"' ",
    "and COLUMN_NAME not in ('",array_to_string(ls_pks,"','"),"', '",pivot_column,"')"
  );
  EXECUTE IMMEDIATE sql_val_string
  into ls_pivot_value_columns, nb_pivot_val_values  ;

  /*create statement to populate the new columns*/
  while loop_index < nb_pivot_col_values DO
      set loop2_index =0;
      loop
              SET sql_pivot_cols= concat (
                                    sql_pivot_cols,
                                    "max( ",
                                    "if( ", pivot_column , "= '",pivot_ls_values[OFFSET (loop_index)],"' , ", ls_pivot_value_columns[OFFSET (loop2_index)], ", null) ",
                                    ") as ", pivot_ls_values[OFFSET (loop_index)], "_", ls_pivot_value_columns[OFFSET (loop2_index)],", "
                                  );

        SET loop2_index = loop2_index +1;
        if loop2_index >= nb_pivot_val_values then
          break;
        end if;
      END LOOP;

    SET loop_index =loop_index+ 1;
  END WHILE;

  SET sql_pivot =concat (
      "create or replace TABLE ", dataset,".",table_to_pivot,"_pivot as SELECT ",
      sql_pk_string, ",", sql_pivot_cols, " FROM ",dataset,".", table_to_pivot ,
      " GROUP BY ", sql_pk_string
   );

  EXECUTE IMMEDIATE sql_pivot;
END;