Python BigQuery allowLargeResults with pandas.io.gbq

Python BigQuery allowLargeResults with pandas.io.gbq,python,google-bigquery,Python,Google Bigquery,我想使用这些数据。如何允许大的结果? 对于非BigQuery交互,可以像这样实现 熊猫的当前代码: sProjectID = "project-id" sQuery = ''' SELECT column1, column2 FROM [dataset_name.tablename] ''' from pandas.io import gbq df = gbq.read_gbq(sQuery, sProjectID) 编辑:在我的另一个回答中,我已经发布了正确

我想使用这些数据。如何允许大的结果?
对于非BigQuery交互,可以像这样实现

熊猫的当前代码:

sProjectID = "project-id"
sQuery = '''
    SELECT 
        column1, column2
    FROM [dataset_name.tablename]
'''
from pandas.io import gbq
df = gbq.read_gbq(sQuery, sProjectID)
编辑:在我的另一个回答中,我已经发布了正确的方法;先把数据放在谷歌存储中。这样,您将永远不会有太大的数据


好的,我没有找到一种直接的方法来处理pandas,所以我不得不用普通的API编写一些额外的代码。以下是我的解决方案(也是大部分在没有熊猫的情况下在本地完成的工作):


决定通过python3 google.cloud API发布正确的方法。看看我之前的回答,我发现它会像约塞米蒂·k所说的那样失败

大型结果确实需要遵循BigQuery->Storage->local->dataframe模式

BigQuery资源:

存储资源:

熊猫资源:

安装:

pip install pandas
pip install google-cloud-storage
pip install google-cloud-bigquery
完整实现(bigquery\u到\u dataframe.py):


您可以通过在
pd.read\u gbq
函数中将默认方言从legacy更改为standard

pd.read_gbq(query, 'my-super-project', dialect='standard')
实际上,您可以阅读参数AllowLargeResults的大查询文档:

AllowLargeResults:对于标准SQL查询,此标志为 总是允许忽略较大的结果


这看起来很有希望。但是,我得到了错误原因:notFound,消息:notFound:Table my_project\u id:sandbox.api\u large\u result\u dropoff。在运行此代码之前,我是否必须先执行一个初步步骤?您需要一个BigQuery项目(似乎您将其称为
my_project\u id
)和该项目中的一个数据集(上面称为
sandbox
)。如果保存在中间表中的原始查询结果仍然很大,则此操作不起作用。奇怪。那天我做这件事的时候为我工作。虽然我改变了我的方法,使用API来执行所有BQ查询,将它们保存在BQ中,然后使用API将它们下载到本地(并负责分页!),然后转换为数据帧。@Tensor请查看我的其他答案。我做了。。你需要的一切。数据大小没有上限在Python2.7中是不可能的?在Python2中是可能的。7@Flair道歉。我一直以为它只是Python3,但我刚刚在Python2 virtualenv上对它进行了测试,它工作了。我开发了一个python包(测试覆盖率为100%):它遵循上述方法。仍然需要指定一个高于某个大小阈值的目标表。
"""
We require python 3 for the google cloud python API
    mkvirtualenv --python `which python3` env3
And our dependencies:
    pip install pandas
    pip install google-cloud-bigquery
    pip install google-cloud-storage
"""
import os
import time
import uuid

from google.cloud import bigquery
from google.cloud import storage
import pandas as pd


def bq_to_df(project_id, dataset_id, table_id, storage_uri, local_data_path):
    """Pipeline to get data from BigQuery into a local pandas dataframe.

    :param project_id: Google project ID we are working in.
    :type project_id: str
    :param dataset_id: BigQuery dataset id.
    :type dataset_id: str
    :param table_id: BigQuery table id.
    :type table_id: str
    :param storage_uri: Google Storage uri where data gets dropped off.
    :type storage_uri: str
    :param local_data_path: Path where data should end up.
    :type local_data_path: str
    :return: Pandas dataframe from BigQuery table.
    :rtype: pd.DataFrame
    """
    bq_to_storage(project_id, dataset_id, table_id, storage_uri)

    storage_to_local(project_id, storage_uri, local_data_path)

    data_dir = os.path.join(local_data_path, "test_data")
    df = local_to_df(data_dir)

    return df


def bq_to_storage(project_id, dataset_id, table_id, target_uri):
    """Export a BigQuery table to Google Storage.

    :param project_id: Google project ID we are working in.
    :type project_id: str
    :param dataset_id: BigQuery dataset name where source data resides.
    :type dataset_id: str
    :param table_id: BigQuery table name where source data resides.
    :type table_id: str
    :param target_uri: Google Storage location where table gets saved.
    :type target_uri: str
    :return: The random ID generated to identify the job.
    :rtype: str
    """
    client = bigquery.Client(project=project_id)

    dataset = client.dataset(dataset_name=dataset_id)
    table = dataset.table(name=table_id)

    job = client.extract_table_to_storage(
        str(uuid.uuid4()),  # id we assign to be the job name
        table,
        target_uri
    )
    job.destination_format = 'CSV'
    job.write_disposition = 'WRITE_TRUNCATE'

    job.begin()  # async execution

    if job.errors:
        print(job.errors)

    while job.state != 'DONE':
        time.sleep(5)
        print("exporting '{}.{}' to '{}':  {}".format(
            dataset_id, table_id, target_uri, job.state
        ))
        job.reload()

    print(job.state)

    return job.name


def storage_to_local(project_id, source_uri, target_dir):
    """Save a file or folder from google storage to a local directory.

    :param project_id: Google project ID we are working in.
    :type project_id: str
    :param source_uri: Google Storage location where file comes form.
    :type source_uri: str
    :param target_dir: Local file location where files are to be stored.
    :type target_dir: str
    :return: None
    :rtype: None
    """
    client = storage.Client(project=project_id)

    bucket_name = source_uri.split("gs://")[1].split("/")[0]
    file_path = "/".join(source_uri.split("gs://")[1].split("/")[1::])
    bucket = client.lookup_bucket(bucket_name)

    folder_name = "/".join(file_path.split("/")[0:-1]) + "/"
    blobs = [o for o in bucket.list_blobs() if o.name.startswith(folder_name)]

    # get files if we wanted just files
    blob_name = file_path.split("/")[-1]
    if blob_name != "*":
        print("Getting just the file '{}'".format(file_path))
        our_blobs = [o for o in blobs if o.name.endswith(blob_name)]
    else:
        print("Getting all files in '{}'".format(folder_name))
        our_blobs = blobs

    print([o.name for o in our_blobs])

    for blob in our_blobs:
        filename = os.path.join(target_dir, blob.name)

        # create a complex folder structure if necessary
        if not os.path.isdir(os.path.dirname(filename)):
            os.makedirs(os.path.dirname(filename))

        with open(filename, 'wb') as f:
            blob.download_to_file(f)


def local_to_df(data_path):
    """Import local data files into a single pandas dataframe.

    :param data_path: File or folder path where csv data are located.
    :type data_path: str
    :return: Pandas dataframe containing data from data_path.
    :rtype: pd.DataFrame
    """
    # if data_dir is a file, then just load it into pandas
    if os.path.isfile(data_path):
        print("Loading '{}' into a dataframe".format(data_path))
        df = pd.read_csv(data_path, header=1)
    elif os.path.isdir(data_path):
        files = [os.path.join(data_path, fi) for fi in os.listdir(data_path)]
        print("Loading {} into a single dataframe".format(files))
        df = pd.concat((pd.read_csv(s) for s in files))
    else:
        raise ValueError(
            "Please enter a valid path.  {} does not exist.".format(data_path)
        )

    return df


if __name__ == '__main__':
    PROJECT_ID = "my-project"
    DATASET_ID = "bq_dataset"
    TABLE_ID = "bq_table"
    STORAGE_URI = "gs://my-bucket/path/for/dropoff/*"
    LOCAL_DATA_PATH = "/path/to/save/"

    bq_to_df(PROJECT_ID, DATASET_ID, TABLE_ID, STORAGE_URI, LOCAL_DATA_PATH)
pd.read_gbq(query, 'my-super-project', dialect='standard')