text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from torch import Tensor, nn
try:
from fairseq.model_parallel.megatron.mpu import (
get_cuda_rng_tracker,
get_model_parallel_world_size,
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
"""Model parallel Multi-headed attention.
This performs the Multi-headed attention over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.model_parallel_size = get_model_parallel_world_size()
self.num_heads_partition = num_heads // self.model_parallel_size
assert (
self.num_heads_partition * self.model_parallel_size == num_heads
), "Number of heads must be divisible by model parallel size"
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert (
not self.self_attention or self.qkv_same_dim
), "Self-attention requires query, key and value to be of the same size"
self.k_proj = ColumnParallelLinear(
self.kdim, embed_dim, bias=bias, gather_output=False
)
self.v_proj = ColumnParallelLinear(
self.vdim, embed_dim, bias=bias, gather_output=False
)
self.q_proj = ColumnParallelLinear(
embed_dim, embed_dim, bias=bias, gather_output=False
)
self.out_proj = RowParallelLinear(
embed_dim, embed_dim, bias=bias, input_is_parallel=True
)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
**unused_kwargs,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
is_tpu = query.device.type == "xla"
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(
bsz * self.num_heads_partition, -1, self.head_dim
)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(
bsz * self.num_heads_partition, -1, self.head_dim
)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = (
ModelParallelMultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
)
saved_state["prev_key"] = k.view(
bsz, self.num_heads_partition, -1, self.head_dim
)
saved_state["prev_value"] = v.view(
bsz, self.num_heads_partition, -1, self.head_dim
)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [
bsz * self.num_heads_partition,
tgt_len,
src_len,
]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads_partition, tgt_len, src_len
)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(
bsz * self.num_heads_partition, tgt_len, src_len
)
attn_weights_float = utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [
bsz * self.num_heads_partition,
tgt_len,
self.head_dim,
]
embed_dim_partition = embed_dim // self.model_parallel_size
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
attn = self.out_proj(attn)
# return attn_weights None to keep the return type same as single gpu multihead attention
# This will be deprecated.
attn_weights: Optional[Tensor] = None
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
if input_buffer[k] is not None:
input_buffer[k] = input_buffer[k].index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
| COCO-LM/fairseq/fairseq/model_parallel/modules/multihead_attention.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/modules/multihead_attention.py",
"repo_id": "COCO-LM",
"token_count": 6561
} | 200 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.models import (
CompositeEncoder,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
DownsampledMultiHeadAttention,
FairseqDropout,
GradMultiply,
LayerNorm,
LearnedPositionalEmbedding,
LinearizedConvolution,
)
logger = logging.getLogger(__name__)
@register_model("fconv_self_att")
class FConvModelSelfAtt(FairseqEncoderDecoderModel):
@classmethod
def hub_models(cls):
return {
"conv.stories.pretrained": {
"path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz",
"checkpoint_file": "pretrained_checkpoint.pt",
"tokenizer": "nltk",
},
"conv.stories": {
"path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz",
"checkpoint_file": "fusion_checkpoint.pt",
"tokenizer": "nltk",
"pretrained": "True",
"pretrained_checkpoint": "./pretrained_checkpoint.pt",
},
# Test set containing dictionaries
"data.stories": "https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2",
}
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(
layer is not None for layer in decoder.attention
)
self.pretrained_encoder = pretrained_encoder
if self.pretrained_encoder is None:
encoders = {"encoder": encoder}
else:
encoders = {"encoder": encoder, "pretrained": self.pretrained_encoder}
# for fusion model, CompositeEncoder contains both pretrained and training encoders
# these are forwarded and then combined in the decoder
self.encoder = CompositeEncoder(encoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR',
help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int,
help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int,
help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR',
help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int,
help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR',
help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR',
help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR',
help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR',
help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR',
help='use pretrained model when training [True, ...]')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
trained_encoder, trained_decoder = None, None
pretrained = eval(args.pretrained)
if pretrained:
logger.info("loading pretrained model")
if not os.path.exists(args.pretrained_checkpoint):
new_pretrained_checkpoint = os.path.join(
args.data, args.pretrained_checkpoint
)
if os.path.exists(new_pretrained_checkpoint):
args.pretrained_checkpoint = new_pretrained_checkpoint
trained_model = checkpoint_utils.load_model_ensemble(
filenames=[args.pretrained_checkpoint],
task=task,
)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
# freeze pretrained model
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
encoder = FConvEncoder(
task.source_dictionary,
embed_dim=args.encoder_embed_dim,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
attention=eval(args.encoder_attention),
attention_nheads=args.encoder_attention_nheads,
)
decoder = FConvDecoder(
task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
selfattention=eval(args.self_attention),
attention_nheads=args.multihead_attention_nheads,
selfattention_nheads=args.multihead_self_attention_nheads,
project_input=eval(args.project_input),
gated_attention=eval(args.gated_attention),
downsample=eval(args.downsample),
pretrained=pretrained,
trained_decoder=trained_decoder,
)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
@property
def pretrained(self):
return self.pretrained_encoder is not None
class FConvEncoder(FairseqEncoder):
"""Convolutional encoder"""
def __init__(
self,
dictionary,
embed_dim=512,
max_positions=1024,
convolutions=((512, 3),) * 20,
dropout=0.1,
attention=False,
attention_nheads=1,
):
super().__init__(dictionary)
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
)
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels)
if in_channels != out_channels
else None
)
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout)
)
self.attention.append(
SelfAttention(out_channels, embed_dim, attention_nheads)
if attention[i]
else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = self.dropout_module(x)
input_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
for proj, conv, attention in zip(
self.projections, self.convolutions, self.attention
):
residual = x if proj is None else proj(x)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = self.dropout_module(x)
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if attention is not None:
x = attention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)
return {
"encoder_out": (x, y),
"encoder_padding_mask": encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = tuple(
eo.index_select(0, new_order) for eo in encoder_out["encoder_out"]
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
if "pretrained" in encoder_out:
encoder_out["pretrained"]["encoder_out"] = tuple(
eo.index_select(0, new_order)
for eo in encoder_out["pretrained"]["encoder_out"]
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions
@with_incremental_state
class FConvDecoder(FairseqDecoder):
"""Convolutional decoder"""
def __init__(
self,
dictionary,
embed_dim=512,
out_embed_dim=256,
max_positions=1024,
convolutions=((512, 3),) * 8,
attention=True,
dropout=0.1,
selfattention=False,
attention_nheads=1,
selfattention_nheads=1,
project_input=False,
gated_attention=False,
downsample=False,
pretrained=False,
trained_decoder=None,
):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError(
"Attention is expected to be a list of booleans of "
"length equal to the number of layers."
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels)
if in_channels != out_channels
else None
)
self.convolutions.append(
LinearizedConv1d(
in_channels,
out_channels * 2,
kernel_size,
padding=(kernel_size - 1),
dropout=dropout,
)
)
self.attention.append(
DownsampledMultiHeadAttention(
out_channels,
embed_dim,
attention_nheads,
project_input=project_input,
gated=False,
downsample=False,
)
if attention[i]
else None
)
self.attproj.append(
Linear(out_channels, embed_dim, dropout=dropout)
if attention[i]
else None
)
self.selfattention.append(
SelfAttention(
out_channels,
embed_dim,
selfattention_nheads,
project_input=project_input,
gated=gated_attention,
downsample=downsample,
)
if selfattention[i]
else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
# model fusion
if self.pretrained:
# independent gates are learned from the concatenated input
self.gate1 = nn.Sequential(
Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid()
)
self.gate2 = nn.Sequential(
Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid()
)
# pretrained and trained models are joined
self.joining = nn.Sequential(
Linear(out_embed_dim * 2, out_embed_dim * 2),
LayerNorm(out_embed_dim * 2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim * 2),
LayerNorm(out_embed_dim * 2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim),
LayerNorm(out_embed_dim),
)
# pretrained model contains an output layer that is nhid -> vocab size
# but the models are combined in their hidden state
# the hook stores the output of the pretrained model forward
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs["out"] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out):
trained_encoder_out = encoder_out["pretrained"] if self.pretrained else None
encoder_out = encoder_out["encoder"]["encoder_out"]
encoder_a, encoder_b = self._split_encoder_out(encoder_out)
# embed positions
positions = self.embed_positions(prev_output_tokens)
# embed tokens and positions
x = self.embed_tokens(prev_output_tokens) + positions
x = self.dropout_module(x)
target_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
avg_attn_scores = None
for proj, conv, attention, selfattention, attproj in zip(
self.projections,
self.convolutions,
self.attention,
self.selfattention,
self.attproj,
):
residual = x if proj is None else proj(x)
x = self.dropout_module(x)
x = conv(x)
x = F.glu(x, dim=2)
# attention
if attention is not None:
r = x
x, attn_scores = attention(
attproj(x) + target_embedding, encoder_a, encoder_b
)
x = x + r
if not self.training and self.need_attn:
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if selfattention is not None:
x = selfattention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# project back to size of vocabulary
x = self.fc2(x)
x = self.dropout_module(x)
if not self.pretrained:
x = self.fc3(x)
# fusion gating
if self.pretrained:
trained_x, _ = self.pretrained_decoder.forward(
prev_output_tokens, trained_encoder_out
)
y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1)
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = gate1 * x
gated_x2 = gate2 * self.pretrained_outputs["out"]
fusion = torch.cat([gated_x1, gated_x2], dim=-1)
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return fusion_output, avg_attn_scores
else:
return x, avg_attn_scores
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
"""Split and transpose encoder outputs."""
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result
class SelfAttention(nn.Module):
def __init__(
self,
out_channels,
embed_dim,
num_heads,
project_input=False,
gated=False,
downsample=False,
):
super().__init__()
self.attention = DownsampledMultiHeadAttention(
out_channels,
embed_dim,
num_heads,
dropout=0,
bias=True,
project_input=project_input,
gated=gated,
downsample=downsample,
)
self.in_proj_q = Linear(out_channels, embed_dim)
self.in_proj_k = Linear(out_channels, embed_dim)
self.in_proj_v = Linear(out_channels, embed_dim)
self.ln = LayerNorm(out_channels)
def forward(self, x):
residual = x
query = self.in_proj_q(x)
key = self.in_proj_k(x)
value = self.in_proj_v(x)
x, _ = self.attention(
query, key, value, mask_future_timesteps=True, use_scalar_bias=True
)
return self.ln(x + residual)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def Linear(in_features, out_features, dropout=0.0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
@register_model_architecture("fconv_self_att", "fconv_self_att")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 3")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 8")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.decoder_attention = getattr(args, "decoder_attention", "True")
args.self_attention = getattr(args, "self_attention", "False")
args.encoder_attention = getattr(args, "encoder_attention", "False")
args.multihead_attention_nheads = getattr(args, "multihead_attention_nheads", 1)
args.multihead_self_attention_nheads = getattr(
args, "multihead_self_attention_nheads", 1
)
args.encoder_attention_nheads = getattr(args, "encoder_attention_nheads", 1)
args.project_input = getattr(args, "project_input", "False")
args.gated_attention = getattr(args, "gated_attention", "False")
args.downsample = getattr(args, "downsample", "False")
args.pretrained_checkpoint = getattr(args, "pretrained_checkpoint", "")
args.pretrained = getattr(args, "pretrained", "False")
@register_model_architecture("fconv_self_att", "fconv_self_att_wp")
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(
args, "encoder_layers", "[(128, 3)] * 2 + [(512,3)] * 1"
)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_layers = getattr(
args, "decoder_layers", "[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1"
)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.self_attention = getattr(args, "self_attention", "True")
args.multihead_self_attention_nheads = getattr(
args, "multihead_self_attention_nheads", 4
)
args.project_input = getattr(args, "project_input", "True")
args.gated_attention = getattr(args, "gated_attention", "True")
args.downsample = getattr(args, "downsample", "True")
base_architecture(args)
| COCO-LM/fairseq/fairseq/models/fconv_self_att.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/fconv_self_att.py",
"repo_id": "COCO-LM",
"token_count": 12353
} | 201 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
# -------------- Helper Functions --------------------------------------------------- #
def load_libnat():
try:
from fairseq import libnat_cuda
return libnat_cuda, True
except ImportError as e:
print(str(e) + "... fall back to CPU version")
try:
from fairseq import libnat
return libnat, False
except ImportError as e:
import sys
sys.stderr.write(
"ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n"
)
raise e
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
libnat, use_cuda = load_libnat()
def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels(
out_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
masked_tgt_masks = masked_tgt_masks.bool() & out_masks
mask_ins_targets = mask_ins_targets.type_as(in_tokens)[
:, 1 : in_masks.size(1)
].masked_fill_(~in_masks[:, 1:], 0)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx):
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
mask_inputs = [
[len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels
]
# generate labels
masked_tgt_masks = []
for mask_input in mask_inputs:
mask_label = []
for beam_size in mask_input[1:-1]: # HACK 1:-1
mask_label += [0] + [1 for _ in range(beam_size)]
masked_tgt_masks.append(
mask_label + [0 for _ in range(out_seq_len - len(mask_label))]
)
mask_ins_targets = [
mask_input[1:-1]
+ [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))]
for mask_input in mask_inputs
]
# transform to tensor
masked_tgt_masks = torch.tensor(
masked_tgt_masks, device=out_tokens.device
).bool()
mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
if use_cuda:
return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx)
return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx)
def _get_del_targets(in_tokens, out_tokens, padding_idx):
libnat, use_cuda = load_libnat()
def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
word_del_targets = libnat.generate_deletion_labels(
in_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_(
~in_masks, 0
)
return word_del_targets
def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx):
out_seq_len = out_tokens.size(1)
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
word_del_targets = [b[-1] for b in full_labels]
word_del_targets = [
labels + [0 for _ in range(out_seq_len - len(labels))]
for labels in word_del_targets
]
# transform to tensor
word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device)
return word_del_targets
if use_cuda:
return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx)
return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx)
def _apply_ins_masks(
in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx
):
in_masks = in_tokens.ne(padding_idx)
in_lengths = in_masks.sum(1)
# HACK: hacky way to shift all the paddings to eos first.
in_tokens.masked_fill_(~in_masks, eos_idx)
mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0)
out_lengths = in_lengths + mask_ins_pred.sum(1)
out_max_len = out_lengths.max()
out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None]
reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1)
out_tokens = (
in_tokens.new_zeros(in_tokens.size(0), out_max_len)
.fill_(padding_idx)
.masked_fill_(out_masks, unk_idx)
)
out_tokens[:, 0] = in_tokens[:, 0]
out_tokens.scatter_(1, reordering, in_tokens[:, 1:])
out_scores = None
if in_scores is not None:
in_scores.masked_fill_(~in_masks, 0)
out_scores = in_scores.new_zeros(*out_tokens.size())
out_scores[:, 0] = in_scores[:, 0]
out_scores.scatter_(1, reordering, in_scores[:, 1:])
return out_tokens, out_scores
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx):
word_ins_masks = in_tokens.eq(unk_idx)
out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks])
if in_scores is not None:
out_scores = in_scores.masked_scatter(
word_ins_masks, word_ins_scores[word_ins_masks]
)
else:
out_scores = None
return out_tokens, out_scores
def _apply_del_words(
in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx
):
# apply deletion to a tensor
in_masks = in_tokens.ne(padding_idx)
bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx)
max_len = in_tokens.size(1)
word_del_pred.masked_fill_(~in_masks, 1)
word_del_pred.masked_fill_(bos_eos_masks, 0)
reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1]
out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering)
out_scores = None
if in_scores is not None:
out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering)
out_attn = None
if in_attn is not None:
_mask = word_del_pred[:, :, None].expand_as(in_attn)
_reordering = reordering[:, :, None].expand_as(in_attn)
out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering)
return out_tokens, out_scores, out_attn
def _skip(x, mask):
"""
Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
"""
if isinstance(x, int):
return x
if x is None:
return None
if isinstance(x, torch.Tensor):
if x.size(0) == mask.size(0):
return x[mask]
elif x.size(1) == mask.size(0):
return x[:, mask]
if isinstance(x, list):
return [_skip(x_i, mask) for x_i in x]
if isinstance(x, dict):
return {k: _skip(v, mask) for k, v in x.items()}
raise NotImplementedError
def _skip_encoder_out(encoder, encoder_out, mask):
if not mask.any():
return encoder_out
else:
return encoder.reorder_encoder_out(
encoder_out, mask.nonzero(as_tuple=False).squeeze()
)
def _fill(x, mask, y, padding_idx):
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None:
return y
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
dims = [x.size(0), y.size(1) - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = padding_idx
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
| COCO-LM/fairseq/fairseq/models/nat/levenshtein_utils.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/nat/levenshtein_utils.py",
"repo_id": "COCO-LM",
"token_count": 4925
} | 202 |
#!/usr/bin/env python3
import logging
import math
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from torch import Tensor
logger = logging.getLogger(__name__)
class Conv1dSubsampler(nn.Module):
"""Convolutional subsampler: a stack of 1D convolution (along temporal
dimension) followed by non-linear activation via gated linear units
(https://arxiv.org/abs/1911.08460)
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
in_channels: int,
mid_channels: int,
out_channels: int,
kernel_sizes: List[int] = (3, 3),
):
super(Conv1dSubsampler, self).__init__()
self.n_layers = len(kernel_sizes)
self.conv_layers = nn.ModuleList(
nn.Conv1d(
in_channels if i == 0 else mid_channels // 2,
mid_channels if i < self.n_layers - 1 else out_channels * 2,
k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(kernel_sizes)
)
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for _ in range(self.n_layers):
out = ((out.float() - 1) / 2 + 1).floor().long()
return out
def forward(self, src_tokens, src_lengths):
bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D)
x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T
for conv in self.conv_layers:
x = conv(x)
x = nn.functional.glu(x, dim=1)
_, _, out_seq_len = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D)
return x, self.get_out_seq_lens_tensor(src_lengths)
@register_model("s2t_transformer")
class S2TTransformerModel(FairseqEncoderDecoderModel):
"""Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
speech-to-text tasks. The Transformer encoder/decoder remains the same.
A trainable input subsampler is prepended to the Transformer encoder to
project inputs into the encoder dimension as well as downsample input
sequence for computational efficiency."""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
@classmethod
def build_encoder(cls, args):
encoder = S2TTransformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
logger.info(
f"loaded pretrained encoder from: "
f"{args.load_pretrained_encoder_from}"
)
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
class S2TTransformerEncoder(FairseqEncoder):
"""Speech-to-text Transformer encoder that consists of input subsampler and
Transformer encoder."""
def __init__(self, args):
super().__init__(None)
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.transformer_layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def forward(self, src_tokens, src_lengths):
x, input_lengths = self.subsample(src_tokens, src_lengths)
x = self.embed_scale * x
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm is not None:
x = self.layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[] if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[] if len(encoder_out["encoder_padding_mask"]) == 0
else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]]
)
new_encoder_embedding = (
[] if len(encoder_out["encoder_embedding"]) == 0
else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
class TransformerDecoderScriptable(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer")
def base_architecture(args):
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
@register_model_architecture("s2t_transformer", "s2t_transformer_s")
def s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_xs")
def s2t_transformer_xs(args):
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 3)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.dropout = getattr(args, "dropout", 0.3)
s2t_transformer_s(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_sp")
def s2t_transformer_sp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_s(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_m")
def s2t_transformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_mp")
def s2t_transformer_mp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_m(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_l")
def s2t_transformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_lp")
def s2t_transformer_lp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_l(args)
| COCO-LM/fairseq/fairseq/models/speech_to_text/s2t_transformer.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/speech_to_text/s2t_transformer.py",
"repo_id": "COCO-LM",
"token_count": 8097
} | 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Tuple
import torch
import torch.nn.functional as F
from fairseq.data import Dictionary
from torch import nn
CHAR_PAD_IDX = 0
CHAR_EOS_IDX = 257
logger = logging.getLogger(__name__)
class CharacterTokenEmbedder(torch.nn.Module):
def __init__(
self,
vocab: Dictionary,
filters: List[Tuple[int, int]],
char_embed_dim: int,
word_embed_dim: int,
highway_layers: int,
max_char_len: int = 50,
char_inputs: bool = False,
):
super(CharacterTokenEmbedder, self).__init__()
self.onnx_trace = False
self.embedding_dim = word_embed_dim
self.max_char_len = max_char_len
self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0)
self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim))
self.eos_idx, self.unk_idx = 0, 1
self.char_inputs = char_inputs
self.convolutions = nn.ModuleList()
for width, out_c in filters:
self.convolutions.append(
nn.Conv1d(char_embed_dim, out_c, kernel_size=width)
)
last_dim = sum(f[1] for f in filters)
self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None
self.projection = nn.Linear(last_dim, word_embed_dim)
assert (
vocab is not None or char_inputs
), "vocab must be set if not using char inputs"
self.vocab = None
if vocab is not None:
self.set_vocab(vocab, max_char_len)
self.reset_parameters()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def set_vocab(self, vocab, max_char_len):
word_to_char = torch.LongTensor(len(vocab), max_char_len)
truncated = 0
for i in range(len(vocab)):
if i < vocab.nspecial:
char_idxs = [0] * max_char_len
else:
chars = vocab[i].encode()
# +1 for padding
char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars))
if len(char_idxs) > max_char_len:
truncated += 1
char_idxs = char_idxs[:max_char_len]
word_to_char[i] = torch.LongTensor(char_idxs)
if truncated > 0:
logger.info(
"truncated {} words longer than {} characters".format(
truncated, max_char_len
)
)
self.vocab = vocab
self.word_to_char = word_to_char
@property
def padding_idx(self):
return Dictionary().pad() if self.vocab is None else self.vocab.pad()
def reset_parameters(self):
nn.init.xavier_normal_(self.char_embeddings.weight)
nn.init.xavier_normal_(self.symbol_embeddings)
nn.init.xavier_uniform_(self.projection.weight)
nn.init.constant_(
self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.0
)
nn.init.constant_(self.projection.bias, 0.0)
def forward(
self,
input: torch.Tensor,
):
if self.char_inputs:
chars = input.view(-1, self.max_char_len)
pads = chars[:, 0].eq(CHAR_PAD_IDX)
eos = chars[:, 0].eq(CHAR_EOS_IDX)
if eos.any():
if self.onnx_trace:
chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars)
else:
chars[eos] = 0
unk = None
else:
flat_words = input.view(-1)
chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as(
input
)
pads = flat_words.eq(self.vocab.pad())
eos = flat_words.eq(self.vocab.eos())
unk = flat_words.eq(self.vocab.unk())
word_embs = self._convolve(chars)
if self.onnx_trace:
if pads.any():
word_embs = torch.where(
pads.unsqueeze(1), word_embs.new_zeros(1), word_embs
)
if eos.any():
word_embs = torch.where(
eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs
)
if unk is not None and unk.any():
word_embs = torch.where(
unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs
)
else:
if pads.any():
word_embs[pads] = 0
if eos.any():
word_embs[eos] = self.symbol_embeddings[self.eos_idx]
if unk is not None and unk.any():
word_embs[unk] = self.symbol_embeddings[self.unk_idx]
return word_embs.view(input.size()[:2] + (-1,))
def _convolve(
self,
char_idxs: torch.Tensor,
):
char_embs = self.char_embeddings(char_idxs)
char_embs = char_embs.transpose(1, 2) # BTC -> BCT
conv_result = []
for conv in self.convolutions:
x = conv(char_embs)
x, _ = torch.max(x, -1)
x = F.relu(x)
conv_result.append(x)
x = torch.cat(conv_result, dim=-1)
if self.highway is not None:
x = self.highway(x)
x = self.projection(x)
return x
class Highway(torch.nn.Module):
"""
A `Highway layer <https://arxiv.org/abs/1505.00387>`_.
Adopted from the AllenNLP implementation.
"""
def __init__(self, input_dim: int, num_layers: int = 1):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList(
[nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
# As per comment in AllenNLP:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
nn.init.constant_(layer.bias[self.input_dim :], 1)
nn.init.constant_(layer.bias[: self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
proj_x, gate = projection.chunk(2, dim=-1)
proj_x = self.activation(proj_x)
gate = torch.sigmoid(gate)
x = gate * x + (gate.new_tensor([1]) - gate) * proj_x
return x
| COCO-LM/fairseq/fairseq/modules/character_token_embedder.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/character_token_embedder.py",
"repo_id": "COCO-LM",
"token_count": 3591
} | 204 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
"Cannot enable dropout during inference for module {} "
"because module_name was not set".format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
"Enabling dropout during inference for module: {}".format(name)
)
self.apply_during_inference = True
else:
logger.info("Disabling dropout for module: {}".format(name))
| COCO-LM/fairseq/fairseq/modules/fairseq_dropout.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/fairseq_dropout.py",
"repo_id": "COCO-LM",
"token_count": 761
} | 205 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.unfold import unfold1d
def LightweightConv(
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
bias=False,
):
if torch.cuda.is_available():
try:
from fairseq.modules.lightconv_layer import LightconvLayer
return LightconvLayer(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
bias=bias,
)
except ImportError as e:
print(e)
return LightweightConv1dTBC(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
bias=bias,
)
class LightweightConv1d(nn.Module):
"""Lightweight Convolution assuming the input is BxCxT
This is just an example that explains LightConv clearer than the TBC version.
We don't use this module in the model.
Args:
input_size: # of channels of the input and output
kernel_size: convolution channels
padding: padding
num_heads: number of heads used. The weight is of shape
`(num_heads, 1, kernel_size)`
weight_softmax: normalize the weight with softmax before the convolution
Shape:
Input: BxCxT, i.e. (batch_size, input_size, timesteps)
Output: BxCxT, i.e. (batch_size, input_size, timesteps)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
"""
def __init__(
self,
input_size,
kernel_size=1,
padding=0,
num_heads=1,
weight_softmax=False,
bias=False,
weight_dropout=0.0,
):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.num_heads = num_heads
self.padding = padding
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, input):
"""
input size: B x C x T
output size: B x C x T
"""
B, C, T = input.size()
H = self.num_heads
weight = self.weight
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
weight = self.weight_dropout_module(weight)
# Merge every C/H entries into the batch dimension (C = self.input_size)
# B x C x T -> (B * C/H) x H x T
# One can also expand the weight to C x 1 x K by a factor of C/H
# and do not reshape the input instead, which is slow though
input = input.view(-1, H, T)
output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
output = output.view(B, C, T)
if self.bias is not None:
output = output + self.bias.view(1, -1, 1)
return output
@with_incremental_state
class LightweightConv1dTBC(nn.Module):
"""Lightweight Convolution assuming the input is TxBxC
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
bias: use bias
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
"""
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
bias=False,
):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.reset_parameters()
self.onnx_trace = False
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, x, incremental_state=None, unfold=False):
"""Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
"""
unfold = unfold or (incremental_state is not None)
if unfold:
output = self._forward_unfolded(x, incremental_state)
else:
output = self._forward_expanded(x, incremental_state)
if self.bias is not None:
output = output + self.bias.view(1, 1, -1)
return output
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def _forward_unfolded(self, x, incremental_state):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K)
if self.weight_softmax:
weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(
weight
)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
weight = (
weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1)
)
weight = self.weight_dropout_module(weight)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_state):
"""Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
"""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if self.weight_softmax:
weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(
weight
)
weight = weight.view(1, H, K).expand(T * B, H, K).contiguous()
weight = weight.view(T, B * H, K).transpose(0, 1)
x = x.view(T, B * H, R).transpose(0, 1)
P = self.padding_l
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(
weight
)
weight_expanded = weight_expanded.narrow(2, P, T)
weight_expanded = self.weight_dropout_module(weight_expanded)
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def extra_repr(self):
s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format(
self.input_size,
self.kernel_size,
self.padding_l,
self.num_heads,
self.weight_softmax,
self.bias is not None,
)
if self.weight_dropout_module.p > 0.0:
s += ", weight_dropout={}".format(self.weight_dropout_module.p)
return s
| COCO-LM/fairseq/fairseq/modules/lightweight_convolution.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/lightweight_convolution.py",
"repo_id": "COCO-LM",
"token_count": 5052
} | 206 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qact import ActivationQuantizer # NOQA
from .qconv import IntConv2d # NOQA
from .qemb import IntEmbedding # NOQA
from .qlinear import IntLinear # NOQA
| COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py",
"repo_id": "COCO-LM",
"token_count": 105
} | 207 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
transpose last 2 dimensions of the input
"""
import torch.nn as nn
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
| COCO-LM/fairseq/fairseq/modules/transpose_last.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/transpose_last.py",
"repo_id": "COCO-LM",
"token_count": 208
} | 208 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from itertools import chain
import torch
from fairseq import optim
from omegaconf import DictConfig
from .dynamic_loss_scaler import DynamicLossScaler
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in mro(method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return torch.is_tensor(self.fp32_params) or (
isinstance(self.fp32_params, dict)
and all(torch.is_tensor(t) for t in self.fp32_params.values())
)
@classmethod
def build_fp32_params(cls, args, params, flatten=True):
# create FP32 copy of parameters and grads
if flatten:
is_pipeline_parallel = getattr(
args, "pipeline_model_parallel", False
) and getattr(args, "distributed_no_spawn", False)
total_param_size = sum(p.data.numel() for p in params)
devices = [torch.cuda.current_device()]
if is_pipeline_parallel:
devices = list(set(args.pipeline_devices))
fp32_params = {}
for device in devices:
if is_pipeline_parallel:
device_param_size = sum(
p.data.numel() for p in params if p.device.index == device
)
device_params = [p for p in params if p.device.index == device]
else:
device_param_size = total_param_size
device_params = params
fp32_params[device] = (
device_params[0].new(0).float().new(device_param_size)
)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset : offset + numel].copy_(p.data.view(-1))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(
device_param_size
)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, "param_group"):
p32.param_group = p.param_group
fp32_params.append(p32)
return fp32_params
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
# copy FP16 grads to FP32
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (
p.grad.data
if p.grad is not None
else p.data.new_zeros(p.data.shape)
)
numel = grad_data.numel()
self.fp32_params[device].grad.data[
offset : offset + numel
].copy_(grad_data.view(-1))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
if p32.grad is None:
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
# copy FP32 params back into FP16 model
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(
self.fp32_params[device]
.data[offset : offset + numel]
.view_as(p.data)
)
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(
0, aggregate_norm_fn
)
if self.scaler is not None:
if grad_norm > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm
self.scaler.check_overflow(grad_norm)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
if getattr(self, "supports_step_with_scale", False):
self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad is not None:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(cfg.common, "fp16_scale_window", None) is None:
if len(cfg.optimization.update_freq) > 1:
raise ValueError(
"--fp16-scale-window must be given explicitly when using a "
"custom --update-freq schedule"
)
data_parallel_size = int(
cfg.distributed_training.distributed_world_size
/ cfg.common.model_parallel_size
)
scale_window = int(
2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0]
)
else:
scale_window = cfg.common.fp16_scale_window
if not getattr(cfg.common, "bf16", False):
self.scaler = DynamicLossScaler(
init_scale=cfg.common.fp16_init_scale,
scale_window=scale_window,
tolerance=cfg.common.fp16_scale_tolerance,
threshold=cfg.common.threshold_loss_scale,
min_loss_scale=cfg.common.min_loss_scale,
)
else:
# disable loss scaling for bfloat16
self.scaler = None
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
cfg (omegaconf.DictConfig): fairseq args
params (iterable): iterable of parameters to optimize
"""
flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False)
if getattr(cfg.common, "bf16", False):
flatten = False # mixed precision is faster on TPUs without flat grads
fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params)
if flatten and not fp32_optimizer.supports_flat_params:
raise RuntimeError(
f"chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads"
)
return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
@property
def lr_scheduler(self):
return getattr(self.fp32_optimizer, "lr_scheduler", None)
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
@property
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params
class _MemoryEfficientFP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in MRO (method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return False
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
# Hack: PyTorch automatically casts the optimizer state to match the
# type of the current parameters. But with --memory-efficient-fp16 the
# params are FP16 while the optimizer state is FP32 and we don't want
# to cast. A workaround is to manually copy back the original state
# after the optimizer has been loaded.
if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False):
groups = self.optimizer.param_groups
saved_groups = state_dict["param_groups"]
id_map = {
old_id: p
for old_id, p in zip(
chain(*(g["params"] for g in saved_groups)),
chain(*(g["params"] for g in groups)),
)
}
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
def _unscale_grads(self):
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.wrapped_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
max_norm = float(max_norm)
grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(
0, aggregate_norm_fn
)
if self.scaler is not None:
grad_norm_cpu = float(grad_norm)
if grad_norm_cpu > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm_cpu
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm_cpu)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
if getattr(self, "supports_step_with_scale", False):
# NOTE(msb) optimizer divides by scale factor
self.wrapped_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.wrapped_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
else:
self._multiply_factor = 1.0
@property
def supports_flat_params(self):
return self.wrapped_optimizer.supports_flat_params
class MemoryEfficientFP16Optimizer(
_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer
):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(
self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs
):
if not allow_unsupported and not optimizer.supports_memory_efficient_fp16:
raise ValueError(
"Unsupported optimizer: {}".format(optimizer.__class__.__name__)
)
super().__init__(cfg.optimizer)
self.wrapped_optimizer = optimizer
if getattr(cfg.common, "fp16_scale_window", None) is None:
if len(cfg.optimization.update_freq) > 1:
raise ValueError(
"--fp16-scale-window must be given explicitly when using a "
"custom --update-freq schedule"
)
data_parallel_size = int(
cfg.distributed_training.distributed_world_size
/ cfg.common.model_parallel_size
)
scale_window = int(
2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0]
)
else:
scale_window = cfg.common.fp16_scale_window
if not getattr(cfg.common, "bf16", False):
self.scaler = DynamicLossScaler(
init_scale=cfg.common.fp16_init_scale,
scale_window=scale_window,
tolerance=cfg.common.fp16_scale_tolerance,
threshold=cfg.common.threshold_loss_scale,
min_loss_scale=cfg.common.min_loss_scale,
)
else:
# disable loss scaling for bfloat16
self.scaler = None
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(cfg.optimizer, params)
return cls(cfg, params, fp16_optimizer, **kwargs)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.wrapped_optimizer.optimizer = optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
@property
def lr_scheduler(self):
return getattr(self.wrapped_optimizer, "lr_scheduler", None)
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.wrapped_optimizer.all_reduce_grads(module)
| COCO-LM/fairseq/fairseq/optim/fp16_optimizer.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/optim/fp16_optimizer.py",
"repo_id": "COCO-LM",
"token_count": 10159
} | 209 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from fairseq.distributed import utils
try:
from fairscale.optim import OSS
_has_fairscale = True
except ImportError:
_has_fairscale = False
def shard_(optimizer, group):
if not _has_fairscale:
raise ImportError(
"\n\nPlease install the fairscale package:" "\n\n pip install fairscale"
)
class FairseqOSS(OSS):
@property
def disable_mem_eff_fp16_loading_hack(self):
return True
def __getattr__(self, name):
if name.startswith("supports") and hasattr(self.optim, name):
return getattr(self.optim, name)
raise AttributeError(
"'FairseqOSS' object has no attribute {0!r}".format(name)
)
def broadcast_global_state_dict(
self, state_dict: Dict[str, Any]
) -> Dict[str, Any]:
"""
Broadcasts the entire state_dict to all other ranks
each rank is responsible to load their own partition of data
"""
return utils.broadcast_object(
state_dict,
src_rank=0,
group=self.group,
)
torch_optimizer = optimizer.optimizer
optim_cls = type(torch_optimizer)
optimizer.optimizer = FairseqOSS(
torch_optimizer.param_groups,
optim_cls,
group=group,
**optimizer.optimizer_config
)
| COCO-LM/fairseq/fairseq/optim/shard.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/optim/shard.py",
"repo_id": "COCO-LM",
"token_count": 721
} | 210 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
DenoisingDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
import numpy as np
logger = logging.getLogger(__name__)
@register_task("denoising")
class DenoisingTask(LegacyFairseqTask):
"""
Denoising task for applying sequence to sequence denoising. (ie. BART)
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments"
" per sample for dataset",
)
parser.add_argument(
"--sample-break-mode",
default="complete_doc",
type=str,
help="mode for breaking sentence",
)
parser.add_argument(
"--mask",
default=0.0,
type=float,
help="fraction of words/subwords that will be masked",
)
parser.add_argument(
"--mask-random",
default=0.0,
type=float,
help="instead of using [MASK], use random token this often",
)
parser.add_argument(
"--insert",
default=0.0,
type=float,
help="insert this percentage of additional random tokens",
)
parser.add_argument(
"--permute",
default=0.0,
type=float,
help="take this proportion of subwords and permute them",
)
parser.add_argument(
"--rotate",
default=0.5,
type=float,
help="rotate this proportion of inputs",
)
parser.add_argument(
"--poisson-lambda",
default=3.0,
type=float,
help="randomly shuffle sentences for this proportion of inputs",
)
parser.add_argument(
"--permute-sentences",
default=0.0,
type=float,
help="shuffle this proportion of sentences in all inputs",
)
parser.add_argument(
"--mask-length",
default="subword",
type=str,
choices=["subword", "word", "span-poisson"],
help="mask length to choose",
)
parser.add_argument(
"--replace-length",
default=-1,
type=int,
help="when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)",
)
parser.add_argument(
"--max-source-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
dictionary = Dictionary.load(os.path.join(args.data, "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = StripTokenDataset(dataset, self.dictionary.eos())
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s> and one for </s>
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
document_sep_len=0,
)
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, self.source_dictionary.eos())
mask_whole_words = (
get_whole_word_mask(self.args, self.source_dictionary)
if self.args.mask_length != "subword"
else None
)
self.datasets[split] = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
)
logger.info(
"Split: {0}, Loaded {1} samples of denoising_dataset".format(
split,
len(self.datasets[split]),
)
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We assume that the input begins with a
bos symbol (`<s>`) and ends with an eos symbol (`</s>`).
"""
pad = self.source_dictionary.pad()
eos = self.source_dictionary.eos()
src_dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=self.args.tokens_per_sample - 2, # for <s> and </s>
pad=pad,
eos=eos,
break_mode=self.args.sample_break_mode,
document_sep_len=0,
)
prev_output_tokens = PrependTokenDataset(
StripTokenDataset(src_dataset, eos), eos
)
src_dataset = PadDataset(src_dataset, pad_idx=pad, left_pad=False)
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
"prev_output_tokens": PadDataset(
prev_output_tokens, pad_idx=pad, left_pad=False
),
},
"target": src_dataset,
},
sizes=[np.array(src_lengths)],
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dictionary
| COCO-LM/fairseq/fairseq/tasks/denoising.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/tasks/denoising.py",
"repo_id": "COCO-LM",
"token_count": 4395
} | 211 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from fairseq.dataclass import ChoiceEnum
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationConfig, TranslationTask, load_langpair_dataset
from fairseq.utils import new_arange
NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise", "full_mask"])
@dataclass
class TranslationLevenshteinConfig(TranslationConfig):
noise: NOISE_CHOICES = field(
default="random_delete",
metadata={
"help": "type of noise"
},
)
@register_task("translation_lev", dataclass=TranslationLevenshteinConfig)
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
cfg: TranslationLevenshteinConfig
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.cfg.noise == "random_delete":
return _random_delete(target_tokens)
elif self.cfg.noise == "random_mask":
return _random_mask(target_tokens)
elif self.cfg.noise == "full_mask":
return _full_mask(target_tokens)
elif self.cfg.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0),
max_iter=getattr(args, "iter_decode_max_iter", 10),
beam_size=getattr(args, "iter_decode_with_beam", 1),
reranking=getattr(args, "iter_decode_with_external_reranker", False),
decoding_format=getattr(args, "decoding_format", None),
adaptive=not getattr(args, "iter_decode_force_max_iter", False),
retain_history=getattr(args, "retain_iter_history", False),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError(
"Constrained decoding with the translation_lev task is not supported"
)
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| COCO-LM/fairseq/fairseq/tasks/translation_lev.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/tasks/translation_lev.py",
"repo_id": "COCO-LM",
"token_count": 3542
} | 212 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from argparse import Namespace
from itertools import chain
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from omegaconf import DictConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.validate")
def main(cfg: DictConfig, override_args=None):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, "model_overrides", "{}")))
else:
overrides = None
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(saved_cfg)
# Build criterion
criterion = task.build_criterion(saved_cfg.criterion)
criterion.eval()
for subset in cfg.dataset.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if data_parallel_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=cfg.common.all_gather_list_size,
group=distributed_utils.get_data_parallel_group(),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(
override_parser, suppress_defaults=True
)
distributed_utils.call_main(
convert_namespace_to_omegaconf(args), main, override_args=override_args
)
if __name__ == "__main__":
cli_main()
| COCO-LM/fairseq/fairseq_cli/validate.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq_cli/validate.py",
"repo_id": "COCO-LM",
"token_count": 2273
} | 213 |
import torch
import fused_xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = fused_xentropy_cuda.forward(
logits, labels, half_to_float)
if padding_idx >= 0:
losses.masked_fill_(labels==padding_idx, 0)
ctx.save_for_backward(logits, max_log_sum_exp, labels,
torch.LongTensor([padding_idx]))
return losses
@staticmethod
def backward(ctx, grad_loss):
logits, max_log_sum_exp, labels, padding_idx = ctx.saved_tensors
if not grad_loss.is_contiguous():
grad_loss = grad_loss.contiguous()
if padding_idx >= 0:
grad_loss.masked_fill_(labels==padding_idx.item(), 0)
grad_logits = fused_xentropy_cuda.backward(
grad_loss, logits, max_log_sum_exp,
labels)
return grad_logits, None, None, None, None
| COCO-LM/fairseq/fused_ops/fused_ops/xentropy/softmax_xentropy.py/0 | {
"file_path": "COCO-LM/fairseq/fused_ops/fused_ops/xentropy/softmax_xentropy.py",
"repo_id": "COCO-LM",
"token_count": 464
} | 214 |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
main(args)
| COCO-LM/fairseq/scripts/constraints/extract.py/0 | {
"file_path": "COCO-LM/fairseq/scripts/constraints/extract.py",
"repo_id": "COCO-LM",
"token_count": 1451
} | 215 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import os
import shutil
import tempfile
import unittest
import numpy as np
import torch
from scripts.average_checkpoints import average_checkpoints
from torch import nn
class ModelWithSharedParameter(nn.Module):
def __init__(self):
super(ModelWithSharedParameter, self).__init__()
self.embedding = nn.Embedding(1000, 200)
self.FC1 = nn.Linear(200, 200)
self.FC2 = nn.Linear(200, 200)
# tie weight in FC2 to FC1
self.FC2.weight = nn.Parameter(self.FC1.weight)
self.FC2.bias = nn.Parameter(self.FC1.bias)
self.relu = nn.ReLU()
def forward(self, input):
return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input)
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict(
[
("a", torch.DoubleTensor([100.0])),
("b", torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])),
("c", torch.IntTensor([7, 8, 9])),
]
)
params_1 = collections.OrderedDict(
[
("a", torch.DoubleTensor([1.0])),
("b", torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
("c", torch.IntTensor([2, 2, 2])),
]
)
params_avg = collections.OrderedDict(
[
("a", torch.DoubleTensor([50.5])),
("b", torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])),
# We expect truncation for integer division
("c", torch.IntTensor([4, 5, 5])),
]
)
fd_0, path_0 = tempfile.mkstemp()
fd_1, path_1 = tempfile.mkstemp()
torch.save(collections.OrderedDict([("model", params_0)]), path_0)
torch.save(collections.OrderedDict([("model", params_1)]), path_1)
output = average_checkpoints([path_0, path_1])["model"]
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for (k_expected, v_expected), (k_out, v_out) in zip(
params_avg.items(), output.items()
):
self.assertEqual(
k_expected,
k_out,
"Key mismatch - expected {} but found {}. "
"(Expected list of keys: {} vs actual list of keys: {})".format(
k_expected, k_out, params_avg.keys(), output.keys()
),
)
np.testing.assert_allclose(
v_expected.numpy(),
v_out.numpy(),
err_msg="Tensor value mismatch for key {}".format(k_expected),
)
def test_average_checkpoints_with_shared_parameters(self):
def _construct_model_with_shared_parameters(path, value):
m = ModelWithSharedParameter()
nn.init.constant_(m.FC1.weight, value)
torch.save({"model": m.state_dict()}, path)
return m
tmpdir = tempfile.mkdtemp()
paths = []
path = os.path.join(tmpdir, "m1.pt")
m1 = _construct_model_with_shared_parameters(path, 1.0)
paths.append(path)
path = os.path.join(tmpdir, "m2.pt")
m2 = _construct_model_with_shared_parameters(path, 2.0)
paths.append(path)
path = os.path.join(tmpdir, "m3.pt")
m3 = _construct_model_with_shared_parameters(path, 3.0)
paths.append(path)
new_model = average_checkpoints(paths)
self.assertTrue(
torch.equal(
new_model["model"]["embedding.weight"],
(m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0,
)
)
self.assertTrue(
torch.equal(
new_model["model"]["FC1.weight"],
(m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0,
)
)
self.assertTrue(
torch.equal(
new_model["model"]["FC2.weight"],
(m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0,
)
)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
unittest.main()
| COCO-LM/fairseq/tests/test_average_checkpoints.py/0 | {
"file_path": "COCO-LM/fairseq/tests/test_average_checkpoints.py",
"repo_id": "COCO-LM",
"token_count": 2279
} | 216 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from fairseq.data import iterators
class TestIterators(unittest.TestCase):
def test_counting_iterator(self, ref=None, itr=None):
if ref is None:
assert itr is None
ref = list(range(10))
itr = iterators.CountingIterator(ref)
else:
assert len(ref) == 10
assert itr is not None
self.assertTrue(itr.has_next())
self.assertEqual(itr.n, 0)
self.assertEqual(next(itr), ref[0])
self.assertEqual(itr.n, 1)
self.assertEqual(next(itr), ref[1])
self.assertEqual(itr.n, 2)
itr.skip(3)
self.assertEqual(itr.n, 5)
self.assertEqual(next(itr), ref[5])
itr.skip(3)
self.assertEqual(itr.n, 9)
self.assertEqual(next(itr), ref[9])
self.assertFalse(itr.has_next())
def test_grouped_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.GroupedIterator(x, 1)
self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])
itr = iterators.GroupedIterator(x, 4)
self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
itr = iterators.GroupedIterator(x, 5)
self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
# test CountingIterator functionality
x = list(range(30))
ref = list(iterators.GroupedIterator(x, 3))
itr = iterators.GroupedIterator(x, 3)
self.test_counting_iterator(ref, itr)
def test_sharded_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0)
self.assertEqual(list(itr), x)
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0)
self.assertEqual(list(itr), [0, 2, 4, 6, 8])
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1)
self.assertEqual(list(itr), [1, 3, 5, 7, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.assertEqual(list(itr), [0, 3, 6, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1)
self.assertEqual(list(itr), [1, 4, 7, None])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2)
self.assertEqual(list(itr), [2, 5, 8, None])
# test CountingIterator functionality
x = list(range(30))
ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0))
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.test_counting_iterator(ref, itr)
def test_counting_iterator_take(self):
ref = list(range(10))
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
def test_counting_iterator_buffered_iterator_take(self):
ref = list(range(10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(buffered_itr), 5)
self.assertEqual(len(list(iter(buffered_itr))), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
ref = list(range(4, 10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr, start=4)
itr.take(5)
self.assertEqual(len(itr), 5)
self.assertEqual(len(buffered_itr), 1)
self.assertEqual(next(itr), ref[0])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
if __name__ == "__main__":
unittest.main()
| COCO-LM/fairseq/tests/test_iterators.py/0 | {
"file_path": "COCO-LM/fairseq/tests/test_iterators.py",
"repo_id": "COCO-LM",
"token_count": 2293
} | 217 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data import TokenBlockDataset
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none")
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [
torch.tensor([4, 3, 2, 1], dtype=torch.long),
torch.tensor([5, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([6, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=3, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
def test_4billion_tokens(self):
"""Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745"""
data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
ds[-1] # __getitem__ works
start, end = ds.slice_indices[-1]
assert end > 4294967295 # data must be sufficiently large to overflow uint32
assert not isinstance(
end + 1, float
) # this would also raise, since np.uint64(1) + 1 => 2.0
if __name__ == "__main__":
unittest.main()
| COCO-LM/fairseq/tests/test_token_block_dataset.py/0 | {
"file_path": "COCO-LM/fairseq/tests/test_token_block_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1830
} | 218 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
## The script is largely adapted from the huggingface transformers library.
""" GLUE processors and helpers """
import logging
import os
import csv
import sys
import copy
import json
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def glue_convert_examples_to_features(examples, tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token_id=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token_id: Padding token id
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids = inputs["input_ids"]
if "token_type_ids" in inputs:
token_type_ids = inputs["token_type_ids"]
else:
token_type_ids = []
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_tokens: %s" % " ".join(tokenizer.convert_ids_to_tokens(input_ids)))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['premise'].numpy().decode('utf-8'),
tensor_dict['hypothesis'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question1'].numpy().decode('utf-8'),
tensor_dict['question2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question'].numpy().decode('utf-8'),
tensor_dict['sentence'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
| COCO-LM/huggingface/utils_for_glue.py/0 | {
"file_path": "COCO-LM/huggingface/utils_for_glue.py",
"repo_id": "COCO-LM",
"token_count": 11442
} | 219 |
_base_ = [
'../_base_/models/upernet_cswin.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
backbone=dict(
type='CSWin',
embed_dim=96,
depth=[2,4,32,2],
num_heads=[4,8,16,32],
split_size=[1,2,7,7],
drop_path_rate=0.6,
use_chk=False,
),
decode_head=dict(
in_channels=[96,192,384,768],
num_classes=150
),
auxiliary_head=dict(
in_channels=384,
num_classes=150
))
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
data=dict(samples_per_gpu=2)
| CSWin-Transformer/segmentation/configs/cswin/upernet_cswin_base.py/0 | {
"file_path": "CSWin-Transformer/segmentation/configs/cswin/upernet_cswin_base.py",
"repo_id": "CSWin-Transformer",
"token_count": 691
} | 220 |
[build-system]
requires = ["setuptools", "setuptools-scm"]
build-backend = "setuptools.build_meta"
[project]
name = "ClimaX"
version = "0.3.1"
authors =[
{name="Tung Nguyen", email="[email protected]"},
{name="Jayesh K. Gupta", email="[email protected]"}
]
description = ""
readme = "README.md"
requires-python = ">=3.8"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
]
dependencies = [
]
[project.urls]
"Homepage" = "https://microsoft.github.io/ClimaX/"
"Bug Tracker" = "https://github.com/microsoft/ClimaX/issues"
[tool.setuptools.packages.find]
where = ["src"]
| ClimaX/pyproject.toml/0 | {
"file_path": "ClimaX/pyproject.toml",
"repo_id": "ClimaX",
"token_count": 253
} | 221 |
datadir: /data/CMIP6/HAMMOZ
name: 10m_u_component_of_wind
cmip_name: uas
era_name: u10
run: r1i1p1f1
version: v20190627
res:
- 1.40625
# - 5.625 | ClimaX/snakemake_configs/HAMMOZ/config_10m_u_component_of_wind.yml/0 | {
"file_path": "ClimaX/snakemake_configs/HAMMOZ/config_10m_u_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 77
} | 222 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: http://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP
name: v_component_of_wind
cmip_name: va
era_name: v
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190815
res:
- 1.40625
# - 5.625 | ClimaX/snakemake_configs/MPI-ESM/config_v_component_of_wind.yml/0 | {
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_v_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 122
} | 223 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import warnings
from typing import List
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class LinearWarmupCosineAnnealingLR(_LRScheduler):
"""Sets the learning rate of each parameter group to follow a linear warmup schedule between
warmup_start_lr and base_lr followed by a cosine annealing schedule between base_lr and
eta_min."""
def __init__(
self,
optimizer: Optimizer,
warmup_epochs: int,
max_epochs: int,
warmup_start_lr: float = 0.0,
eta_min: float = 0.0,
last_epoch: int = -1,
) -> None:
"""
Args:
optimizer (Optimizer): Wrapped optimizer.
warmup_epochs (int): Maximum number of iterations for linear warmup
max_epochs (int): Maximum number of iterations
warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
"""
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.warmup_start_lr = warmup_start_lr
self.eta_min = eta_min
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""Compute learning rate using chainable form of the scheduler."""
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.",
UserWarning,
)
if self.last_epoch == self.warmup_epochs:
return self.base_lrs
if self.last_epoch == 0:
return [self.warmup_start_lr] * len(self.base_lrs)
if self.last_epoch < self.warmup_epochs:
return [
group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
if (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0:
return [
group["lr"]
+ (base_lr - self.eta_min) * (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
return [
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
/ (
1
+ math.cos(
math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs)
)
)
* (group["lr"] - self.eta_min)
+ self.eta_min
for group in self.optimizer.param_groups
]
def _get_closed_form_lr(self) -> List[float]:
"""Called when epoch is passed as a param to the `step` function of the scheduler."""
if self.last_epoch < self.warmup_epochs:
return [
self.warmup_start_lr
+ self.last_epoch * (base_lr - self.warmup_start_lr) / max(1, self.warmup_epochs - 1)
for base_lr in self.base_lrs
]
return [
self.eta_min
+ 0.5
* (base_lr - self.eta_min)
* (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
for base_lr in self.base_lrs
]
| ClimaX/src/climax/utils/lr_scheduler.py/0 | {
"file_path": "ClimaX/src/climax/utils/lr_scheduler.py",
"repo_id": "ClimaX",
"token_count": 1811
} | 224 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import random
import argparse
import pickle
import numpy as np
import torch
import models
import data
from util import util
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
# experiment specifics
parser.add_argument('--name', type=str, default='deepfashionHD', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# input/output sizes
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=("resize_and_crop", "crop", "scale_width", "scale_width_and_crop", "scale_shortside", "scale_shortside_and_crop", "fixed", "none"))
parser.add_argument('--load_size', type=int, default=256, help='Scale images to this size. The final image will be cropped to --crop_size.')
parser.add_argument('--crop_size', type=int, default=256, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.')
parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
# for setting inputs
parser.add_argument('--dataroot', type=str, default='dataset/deepfashionHD')
parser.add_argument('--dataset_mode', type=str, default='deepfashionHD')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster')
parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache')
# for displays
parser.add_argument('--display_winsize', type=int, default=512, help='display window size')
# for generator
parser.add_argument('--netG', type=str, default='spade', help='selects model to use for netG (pix2pixhd | spade)')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
# for feature encoder
parser.add_argument('--netCorr', type=str, default='NoVGGHPM')
parser.add_argument('--nef', type=int, default=32, help='# of gen filters in first conv layer')
# for instance-wise features
parser.add_argument('--CBN_intype', type=str, default='warp_mask', help='type of CBN input for framework, warp/mask/warp_mask')
parser.add_argument('--match_kernel', type=int, default=1, help='correspondence matrix match kernel size')
parser.add_argument('--featEnc_kernel', type=int, default=3, help='kernel size in domain adaptor')
parser.add_argument('--PONO', action='store_true', help='use positional normalization ')
parser.add_argument('--PONO_C', action='store_true', help='use C normalization in corr module')
parser.add_argument('--vgg_normal_correct', action='store_true', help='if true, correct vgg normalization and replace vgg FM model with ctx model')
parser.add_argument('--use_coordconv', action='store_true', help='if true, use coordconv in CorrNet')
parser.add_argument('--video_like', action='store_true', help='useful in deepfashion')
parser.add_argument('--amp', action='store_true', help='use torch.cuda.amp')
parser.add_argument('--temperature', type=float, default=0.01)
parser.add_argument('--iteration_count', type=int, default=5)
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, unknown = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
# modify dataset-related parser options
dataset_mode = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_mode)
parser = dataset_option_setter(parser, self.isTrain)
opt, unknown = parser.parse_known_args()
# if there is opt_file, load it.
# The previous default options will be overwritten
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open(file_name + '.txt', 'wt') as opt_file:
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open(file_name + '.pkl', 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for k, v in sorted(vars(opt).items()):
if hasattr(new_opt, k) and v != getattr(new_opt, k):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open(file_name + '.pkl', 'rb'))
return new_opt
def parse(self, save=False):
# gather options from base, train, dataset, model
opt = self.gather_options()
# train or test
opt.isTrain = self.isTrain
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
# Set semantic_nc based on the option.
# This will be convenient in many places
opt.semantic_nc = opt.label_nc + (1 if opt.contain_dontcare_label else 0)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = list(range(len(str_ids)))
seed = 1234
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| CoCosNet-v2/options/base_options.py/0 | {
"file_path": "CoCosNet-v2/options/base_options.py",
"repo_id": "CoCosNet-v2",
"token_count": 3817
} | 225 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import cv2
import torch
import numpy as np
from PIL import Image
from skimage import feature
from data.pix2pix_dataset import Pix2pixDataset
from data.base_dataset import get_params, get_transform
class CelebAHQEdgeDataset(Pix2pixDataset):
#hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck,
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
parser.set_defaults(no_pairing_check=True)
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=15)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
if opt.phase == 'train':
fd = open(os.path.join(opt.dataroot, 'train.txt'))
lines = fd.readlines()
fd.close()
elif opt.phase == 'test':
fd = open(os.path.join(opt.dataroot, 'val.txt'))
lines = fd.readlines()
fd.close()
image_paths = []
label_paths = []
for i in range(len(lines)):
image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg'))
subfolder = str(int(lines[i].strip()) // 2000)
label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', subfolder, lines[i].strip().zfill(5) + '_{}.png'))
return label_paths, image_paths
def get_ref(self, opt):
extra = ''
if opt.phase == 'test':
extra = '_test'
with open('./data/celebahq_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_edges(self, edge, t):
edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1])
edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1])
edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:])
edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:])
return edge
def get_label_tensor(self, path):
inner_parts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'eye_g', 'hair']
img_path = self.labelpath_to_imgpath(path)
img = Image.open(img_path).resize((self.opt.load_size, self.opt.load_size), resample=Image.BILINEAR)
params = get_params(self.opt, img.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
transform_img = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False)
inner_label = np.ones(img.size, dtype=np.uint8)
edges = np.zeros(img.size, dtype=np.uint8)
tensors_dist = 0
e = 1
for part in inner_parts:
edge = np.zeros(img.size, dtype=np.uint8) #this for distance transform map on each facial part
if os.path.exists(path.format(part)):
part_label = Image.open(path.format(part)).convert('L').resize((self.opt.load_size, self.opt.load_size), resample=Image.NEAREST)
part_label = np.array(part_label)
if part == 'hair':
inner_label[part_label == 255] = 1
else:
inner_label[part_label == 255] = 0
edges = self.get_edges(edges, part_label)
edge = self.get_edges(edge, part_label)
im_dist = cv2.distanceTransform(255-edge*255, cv2.DIST_L1, 3)
im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8)
tensor_dist = transform_img(Image.fromarray(im_dist))
tensors_dist = tensor_dist if e == 1 else torch.cat([tensors_dist, tensor_dist])
e += 1
# canny edge for background
canny_edges = feature.canny(np.array(img.convert('L')))
canny_edges = canny_edges * inner_label
edges_all = edges + canny_edges
edges_all[edges_all > 1] = 1
tensor_edges_all = transform_label(Image.fromarray(edges_all * 255))
edges[edges > 1] = 1
tensor_edges = transform_label(Image.fromarray(edges * 255))
label_tensor = torch.cat((tensor_edges_all, tensors_dist, tensor_edges), dim=0)
return label_tensor, params
def imgpath_to_labelpath(self, path):
root, name = path.split('CelebA-HQ-img/')
subfolder = str(int(name.split('.')[0]) // 2000)
label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', subfolder, name.split('.')[0].zfill(5) + '_{}.png')
return label_path
def labelpath_to_imgpath(self, path):
root= path.replace('\\', '/').split('CelebAMask-HQ-mask-anno/')[0]
name = os.path.basename(path).split('_')[0]
img_path = os.path.join(root, 'CelebA-HQ-img', str(int(name)) + '.jpg')
return img_path
# In ADE20k, 'unknown' label is of value 0.
# Change the 'unknown' label to the last label to match other datasets.
# def postprocess(self, input_dict):
# label = input_dict['label']
# label = label - 1
# label[label == -1] = self.opt.label_nc
# input_dict['label'] = label
# if input_dict['label_ref'] is not None:
# label_ref = input_dict['label_ref']
# label_ref = label_ref - 1
# label_ref[label_ref == -1] = self.opt.label_nc
# input_dict['label_ref'] = label_ref
| CoCosNet/data/celebahqedge_dataset.py/0 | {
"file_path": "CoCosNet/data/celebahqedge_dataset.py",
"repo_id": "CoCosNet",
"token_count": 2971
} | 226 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#08.09 change pad
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer, equal_lr
from models.networks.architecture import ResnetBlock as ResnetBlock
from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock
from models.networks.architecture import Attention
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm1d
class SPADEGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.sw, self.sh = self.compute_latent_vector_size(opt)
ic = 0 + (3 if 'warp' in self.opt.CBN_intype else 0) + (self.opt.semantic_nc if 'mask' in self.opt.CBN_intype else 0)
self.fc = nn.Conv2d(ic, 16 * nf, 3, padding=1)
if opt.eqlr_sn:
self.fc = equal_lr(self.fc)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
if opt.use_attention:
self.attn = Attention(4 * nf, 'spectral' in opt.norm_G)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
num_up_layers = 5
sw = opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, warp_out=None):
seg = input if warp_out is None else warp_out
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
x = self.head_0(x, seg)
x = self.up(x)
x = self.G_middle_0(x, seg)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
if self.opt.use_attention:
x = self.attn(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class AdaptiveFeatureGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
parser.add_argument('--num_upsampling_layers',
choices=('normal', 'more', 'most'), default='normal',
help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator")
return parser
def __init__(self, opt):
# TODO: kernel=4, concat noise, or change architecture to vgg feature pyramid
super().__init__()
self.opt = opt
kw = 3
pw = int(np.ceil((kw - 1.0) / 2))
ndf = opt.ngf
norm_layer = get_nonspade_norm_layer(opt, opt.norm_E)
self.layer1 = norm_layer(nn.Conv2d(opt.spade_ic, ndf, kw, stride=1, padding=pw))
self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, opt.adaptor_kernel, stride=2, padding=pw))
self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=1, padding=pw))
if opt.warp_stride == 2:
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=1, padding=pw))
else:
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, opt.adaptor_kernel, stride=2, padding=pw))
self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=1, padding=pw))
self.actvn = nn.LeakyReLU(0.2, False)
self.opt = opt
nf = opt.ngf
self.head_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt, use_se=opt.adaptor_se)
if opt.adaptor_nonlocal:
self.attn = Attention(8 * nf, False)
self.G_middle_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt, use_se=opt.adaptor_se)
self.G_middle_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt, use_se=opt.adaptor_se)
if opt.adaptor_res_deeper:
self.deeper0 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
if opt.dilation_conv:
self.deeper1 = SPADEResnetBlock(4 * nf, 4 * nf, opt, dilation=2)
self.deeper2 = SPADEResnetBlock(4 * nf, 4 * nf, opt, dilation=4)
self.degridding0 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 3, stride=1, padding=2, dilation=2))
self.degridding1 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 3, stride=1, padding=1))
else:
self.deeper1 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
self.deeper2 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
def forward(self, input, seg):
x = self.layer1(input)
x = self.layer2(self.actvn(x))
x = self.layer3(self.actvn(x))
x = self.layer4(self.actvn(x))
x = self.layer5(self.actvn(x))
x = self.head_0(x, seg)
if self.opt.adaptor_nonlocal:
x = self.attn(x)
x = self.G_middle_0(x, seg)
x = self.G_middle_1(x, seg)
if self.opt.adaptor_res_deeper:
x = self.deeper0(x, seg)
x = self.deeper1(x, seg)
x = self.deeper2(x, seg)
if self.opt.dilation_conv:
x = self.degridding0(x)
x = self.degridding1(x)
return x
class ReverseGenerator(BaseNetwork):
def __init__(self, opt, ic, oc, size):
super().__init__()
self.opt = opt
self.downsample = True if size == 256 else False
nf = opt.ngf
opt.spade_ic = ic
if opt.warp_reverseG_s:
self.backbone_0 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
else:
self.backbone_0 = SPADEResnetBlock(4 * nf, 8 * nf, opt)
self.backbone_1 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.backbone_2 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.backbone_3 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.backbone_4 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.backbone_5 = SPADEResnetBlock(2 * nf, nf, opt)
del opt.spade_ic
if self.downsample:
kw = 3
pw = int(np.ceil((kw - 1.0) / 2))
ndf = opt.ngf
norm_layer = get_nonspade_norm_layer(opt, opt.norm_E)
self.layer1 = norm_layer(nn.Conv2d(ic, ndf, kw, stride=1, padding=pw))
self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, 4, stride=2, padding=pw))
self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=1, padding=pw))
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 4, stride=2, padding=pw))
self.up = nn.Upsample(scale_factor=2)
self.actvn = nn.LeakyReLU(0.2, False)
self.conv_img = nn.Conv2d(nf, oc, 3, padding=1)
def forward(self, x):
input = x
if self.downsample:
x = self.layer1(input)
x = self.layer2(self.actvn(x))
x = self.layer3(self.actvn(x))
x = self.layer4(self.actvn(x))
x = self.backbone_0(x, input)
if not self.opt.warp_reverseG_s:
x = self.backbone_1(x, input)
x = self.backbone_2(x, input)
x = self.backbone_3(x, input)
if self.downsample:
x = self.up(x)
x = self.backbone_4(x, input)
if self.downsample:
x = self.up(x)
x = self.backbone_5(x, input)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class DomainClassifier(BaseNetwork):
def __init__(self, opt):
super().__init__()
nf = opt.ngf
kw = 4 if opt.domain_rela else 3
pw = int((kw - 1.0) / 2)
self.feature = nn.Sequential(nn.Conv2d(4 * nf, 2 * nf, kw, stride=2, padding=pw),
SynchronizedBatchNorm2d(2 * nf, affine=True),
nn.LeakyReLU(0.2, False),
nn.Conv2d(2 * nf, nf, kw, stride=2, padding=pw),
SynchronizedBatchNorm2d(nf, affine=True),
nn.LeakyReLU(0.2, False),
nn.Conv2d(nf, int(nf // 2), kw, stride=2, padding=pw),
SynchronizedBatchNorm2d(int(nf // 2), affine=True),
nn.LeakyReLU(0.2, False)) #32*8*8
model = [nn.Linear(int(nf // 2) * 8 * 8, 100),
SynchronizedBatchNorm1d(100, affine=True),
nn.ReLU()]
if opt.domain_rela:
model += [nn.Linear(100, 1)]
else:
model += [nn.Linear(100, 2),
nn.LogSoftmax(dim=1)]
self.classifier = nn.Sequential(*model)
def forward(self, x):
x = self.feature(x)
x = self.classifier(x.view(x.shape[0], -1))
return x
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
class EMA():
def __init__(self, mu):
self.mu = mu
self.shadow = {}
self.original = {}
def register(self, name, val):
self.shadow[name] = val.clone()
def __call__(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
decay = self.mu
new_average = (1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
| CoCosNet/models/networks/generator.py/0 | {
"file_path": "CoCosNet/models/networks/generator.py",
"repo_id": "CoCosNet",
"token_count": 5757
} | 227 |
source_lang=python
target_lang=python
python run.py \
--model_name_or_path microsoft/unixcoder-base \
--query_data_file ../data/code_to_code_search_test.json \
--candidate_data_file ../data/code_to_code_search_test.json \
--trace_file ../saved_models/code_to_code_search/preds.txt \
--query_lang ${source_lang} \
--candidate_lang ${target_lang} \
--code_length 512 \
--eval_batch_size 256
| CodeBERT/CodeExecutor/downstream/run.sh/0 | {
"file_path": "CodeBERT/CodeExecutor/downstream/run.sh",
"repo_id": "CodeBERT",
"token_count": 173
} | 228 |
import os
import torch
import logging
import argparse
import random
import json
from tqdm import tqdm
import multiprocessing
import time
from itertools import cycle
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from models import build_or_load_gen_model
from configs import add_args, set_seed, set_dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from utils import CommentGenDataset, SimpleGenDataset
from evaluator.smooth_bleu import bleu_fromstr
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_loaders(data_files, args, tokenizer, pool, eval=False):
def fn(features):
return features
global_rank = args.global_rank
for data_file in data_files:
if args.raw_input:
dataset = SimpleGenDataset(tokenizer, pool, args, data_file)
else:
dataset = CommentGenDataset(tokenizer, pool, args, data_file)
data_len = len(dataset)
if global_rank == 0:
logger.info(f"Data length: {data_len}.")
if eval:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=args.train_batch_size if not eval else args.eval_batch_size, \
num_workers=args.cpu_count, collate_fn=fn)
yield dataset, sampler, dataloader
def eval_bleu_epoch(args, eval_dataloader, model, tokenizer):
logger.info(f" ***** Running bleu evaluation on {args.eval_file} *****")
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
if hasattr(model, "module"):
model = model.module
pred_ids, ex_ids = [], []
for step, examples in enumerate(eval_dataloader, 1):
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(args.local_rank)
ids = [ex.example_id for ex in examples]
source_mask = source_ids.ne(tokenizer.pad_id)
preds = model.generate(source_ids,
attention_mask=source_mask,
use_cache=True,
num_beams=args.beam_size,
early_stopping=True,
max_length=args.max_target_length)
top_preds = list(preds.cpu().numpy())
pred_ids.extend(top_preds)
# [1:] to remove beginning '<msg>'
pred_nls = [tokenizer.decode(id[1:], skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids]
valid_file = args.dev_filename
golds = []
with open(valid_file, "r") as f:
for line in f:
golds.append(json.loads(line)["msg"])
golds = golds[:len(pred_nls)]
bleu = bleu_fromstr(pred_nls, golds, rmstop=False)
return bleu
def save_model(model, optimizer, scheduler, output_dir, config):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
config.save_pretrained(output_dir)
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
output_optimizer_file = os.path.join(output_dir, "optimizer.pt")
torch.save(
optimizer.state_dict(),
output_optimizer_file,
_use_new_zipfile_serialization=False,
)
output_scheduler_file = os.path.join(output_dir, "scheduler.pt")
torch.save(
scheduler.state_dict(),
output_scheduler_file,
_use_new_zipfile_serialization=False,
)
def main(args):
dist.init_process_group(backend="nccl")
local_rank = dist.get_rank() % args.gpu_per_node
args.global_rank = local_rank + args.node_index * args.gpu_per_node
args.local_rank = local_rank
args.world_size = dist.get_world_size()
logger.warning("Process rank: %s, global rank: %s, world size: %s, bs: %s",
args.local_rank, args.global_rank, \
torch.distributed.get_world_size(), \
args.train_batch_size)
torch.cuda.set_device(local_rank)
set_seed(args)
config, model, tokenizer = build_or_load_gen_model(args)
model = DDP(model.cuda(), device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
pool = multiprocessing.Pool(args.cpu_count)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
args.warmup_steps = int(args.train_steps * 0.1)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.train_steps,
)
if os.path.exists("{}/checkpoints-last/optimizer.pt".format(args.output_dir)):
optimizer.load_state_dict(
torch.load(
"{}/checkpoints-last/optimizer.pt".format(args.output_dir),
map_location="cpu",
)
)
scheduler.load_state_dict(
torch.load(
"{}/checkpoints-last/scheduler.pt".format(args.output_dir),
map_location="cpu",
)
)
global_step = 0
save_steps = args.save_steps
train_file = args.train_filename
valid_file = args.dev_filename
if os.path.isdir(train_file):
train_files = [file for file in os.listdir(train_file) if file.startswith("train") and file.endswith(".jsonl")]
else:
train_files = [train_file]
random.seed(args.seed)
random.shuffle(train_files)
train_files = [os.path.join(train_file, file) for file in train_files]
valid_files = [valid_file]
# bleu = eval_bleu_epoch(args, valid_dataloader, model, tokenizer)
# logger.warning("Initial bleu: {}".format(bleu))
for epoch in range(1, args.train_epochs + 1):
# set seed for reproducible data split
save_seed = args.seed
args.seed += epoch
set_seed(args)
args.seed = save_seed
model.train()
nb_tr_examples, nb_tr_steps, tr_loss = 0, 0, 0
for _, _, train_dataloader in get_loaders(train_files, args, tokenizer, pool): # WARNING: this is an iterator, to save memory
for step, examples in enumerate(train_dataloader, 1):
if step == 1:
ex = examples[0]
logger.info(f"batch size: {len(examples)}")
logger.info(f"example source: {tokenizer.convert_ids_to_tokens(ex.source_ids)}")
# logger.info(f"example label: {tokenizer.convert_ids_to_tokens(ex.source_labels)}")
logger.info(f"example target: {tokenizer.convert_ids_to_tokens(ex.target_ids)}")
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(local_rank)
source_labels = None
target_ids = torch.tensor(
[ex.target_ids for ex in examples], dtype=torch.long
).to(local_rank)
source_mask = source_ids.ne(tokenizer.pad_id)
target_mask = target_ids.ne(tokenizer.pad_id)
loss = model(
input_ids=source_ids,
input_labels=source_labels,
decoder_input_ids=target_ids,
attention_mask=source_mask,
decoder_attention_mask=target_mask,
encoder_loss=False
)
if args.gpu_per_node > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if nb_tr_steps % args.gradient_accumulation_steps == 0:
# Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.global_rank == 0 and global_step % args.log_steps == 0:
train_loss = round(
tr_loss * args.gradient_accumulation_steps / nb_tr_steps,
4,
)
logger.info(
"step {}/{}: Train loss {}".format(
global_step,
args.train_steps,
round(train_loss, 3),
)
)
if global_step == args.train_steps and args.global_rank == 0:
# end training
_, _, valid_dataloader = next(get_loaders(valid_files, args, tokenizer, pool, eval=True))
bleu = eval_bleu_epoch(args, valid_dataloader, model, tokenizer)
output_dir = os.path.join(args.output_dir, "checkpoints-last" + "-" + str(bleu))
save_model(model, optimizer, scheduler, output_dir, config)
logger.info(f"Reach max steps {args.train_steps}.")
time.sleep(5)
return
if args.global_rank == 0 and \
global_step % save_steps == 0 and \
nb_tr_steps % args.gradient_accumulation_steps == 0:
_, _, valid_dataloader = next(get_loaders(valid_files, args, tokenizer, pool, eval=True))
bleu = eval_bleu_epoch(args, valid_dataloader, model, tokenizer)
output_dir = os.path.join(args.output_dir, "checkpoints-" + str(global_step) + "-" + str(bleu))
save_model(model, optimizer, scheduler, output_dir, config)
logger.info(
"Save the {}-step model and optimizer into {}".format(
global_step, output_dir
)
)
time.sleep(5)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = add_args(parser)
args.cpu_count = multiprocessing.cpu_count()
# remove long tokenization warning. ref: https://github.com/huggingface/transformers/issues/991
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
logger.info(args)
main(args)
logger.info("Training finished.")
# torch.multiprocessing.spawn(main, args=(args,), nprocs=torch.cuda.device_count())
| CodeBERT/CodeReviewer/code/run_finetune_msg.py/0 | {
"file_path": "CodeBERT/CodeReviewer/code/run_finetune_msg.py",
"repo_id": "CodeBERT",
"token_count": 5785
} | 229 |
# Clone Detection
## Task Definition
Given two codes as the input, the task is to do binary classification (0/1), where 1 stands for semantic equivalence and 0 for others. Models are evaluated by F1 score.
## Updates
2021-9-13: We have update the evaluater script. Since it's a binary classification, we use binary F1 score instead of "macro" F1 score.
## Dataset
The dataset we use is [BigCloneBench](https://www.cs.usask.ca/faculty/croy/papers/2014/SvajlenkoICSME2014BigERA.pdf) and filtered following the paper [Detecting Code Clones with Graph Neural Network and Flow-Augmented Abstract Syntax Tree](https://arxiv.org/pdf/2002.08653.pdf).
### Data Format
1. dataset/data.jsonl is stored in jsonlines format. Each line in the uncompressed file represents one function. One row is illustrated below.
- **func:** the function
- **idx:** index of the example
2. train.txt/valid.txt/test.txt provide examples, stored in the following format: idx1 idx2 label
### Data Statistics
Data statistics of the dataset are shown in the below table:
| | #Examples |
| ----- | :-------: |
| Train | 901,028 |
| Dev | 415,416 |
| Test | 415,416 |
You can get data using the following command.
```
unzip dataset.zip
```
## Evaluator
We provide a script to evaluate predictions for this task, and report F1 score
### Example
```bash
python evaluator/evaluator.py -a evaluator/answers.txt -p evaluator/predictions.txt
```
{'Recall': 0.25, 'Prediction': 0.5, 'F1': 0.3333333333333333}
### Input predictions
A predications file that has predictions in TXT format, such as evaluator/predictions.txt. For example:
```b
13653451 21955002 0
1188160 8831513 1
1141235 14322332 0
16765164 17526811 1
```
## Pipeline-GraphCodeBERT
We also provide a pipeline that fine-tunes GraphCodeBERT on this task.
### Dependency
- pip install torch
- pip install transformers
- pip install tree_sitter
- pip sklearn
### Tree-sitter (optional)
If the built file "parser/my-languages.so" doesn't work for you, please rebuild as the following command:
```shell
cd parser
bash build.sh
cd ..
```
### Fine-tune
We use 4*V100-16G to fine-tune and 10% valid data to evaluate.
```shell
mkdir saved_models
python run.py \
--output_dir=saved_models \
--config_name=microsoft/graphcodebert-base \
--model_name_or_path=microsoft/graphcodebert-base \
--tokenizer_name=microsoft/graphcodebert-base \
--do_train \
--train_data_file=dataset/train.txt \
--eval_data_file=dataset/valid.txt \
--test_data_file=dataset/test.txt \
--epoch 1 \
--code_length 512 \
--data_flow_length 128 \
--train_batch_size 16 \
--eval_batch_size 32 \
--learning_rate 2e-5 \
--max_grad_norm 1.0 \
--evaluate_during_training \
--seed 123456 2>&1| tee saved_models/train.log
```
### Inference
We use full test data for inference.
```shell
python run.py \
--output_dir=saved_models \
--config_name=microsoft/graphcodebert-base \
--model_name_or_path=microsoft/graphcodebert-base \
--tokenizer_name=microsoft/graphcodebert-base \
--do_eval \
--do_test \
--train_data_file=dataset/train.txt \
--eval_data_file=dataset/valid.txt \
--test_data_file=dataset/test.txt \
--epoch 1 \
--code_length 512 \
--data_flow_length 128 \
--train_batch_size 16 \
--eval_batch_size 32 \
--learning_rate 2e-5 \
--max_grad_norm 1.0 \
--evaluate_during_training \
--seed 123456 2>&1| tee saved_models/test.log
```
### Evaluation
```shell
python evaluator/evaluator.py -a dataset/test.txt -p saved_models/predictions.txt 2>&1| tee saved_models/score.log
```
## Result
The results on the test set are shown as below:
| Method | Precision | Recall | F1 |
| ------------- | :-------: | :-------: | :-------: |
| Deckard | 0.93 | 0.02 | 0.03 |
| RtvNN | 0.95 | 0.01 | 0.01 |
| CDLH | 0.92 | 0.74 | 0.82 |
| ASTNN | 0.92 | 0.94 | 0.93 |
| FA-AST-GMN | **0.96** | 0.94 | 0.95 |
| CodeBERT | 0.947 | 0.934 | 0.941 |
| GraphCodeBERT | 0.948 | **0.952** | **0.950** |
| CodeBERT/GraphCodeBERT/clonedetection/README.md/0 | {
"file_path": "CodeBERT/GraphCodeBERT/clonedetection/README.md",
"repo_id": "CodeBERT",
"token_count": 1655
} | 230 |
model=../../../../pretrained-model/UniXcoder-base
mkdir saved_models
CUDA_VISIBLE_DEVICES=0,1,2,3 python run.py \
--output_dir=./saved_models \
--model_type=roberta \
--model_name_or_path=$model \
--do_train \
--train_data_file=../../dataset/train.txt \
--eval_data_file=../../dataset/valid.txt \
--test_data_file=../../dataset/test.txt \
--epoch 1 \
--block_size 512 \
--train_batch_size 16 \
--eval_batch_size 32 \
--learning_rate 5e-5 \
--max_grad_norm 1.0 \
--evaluate_during_training \
--seed 123456 2>&1| tee saved_models/train.log
CUDA_VISIBLE_DEVICES=0,1,2,3 python run.py \
--output_dir=./saved_models \
--model_type=roberta \
--model_name_or_path=$model \
--do_eval \
--do_test \
--train_data_file=../../dataset/train.txt \
--eval_data_file=../../dataset/valid.txt \
--test_data_file=../../dataset/test.txt \
--epoch 1 \
--block_size 512 \
--train_batch_size 16 \
--eval_batch_size 32 \
--learning_rate 5e-5 \
--max_grad_norm 1.0 \
--evaluate_during_training \
--seed 123456 2>&1| tee saved_models/test.log
python ../evaluator/evaluator.py -a ../../dataset/test.txt -p saved_models/predictions.txt 2>&1| tee saved_models/score.log
| CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/run.sh/0 | {
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/run.sh",
"repo_id": "CodeBERT",
"token_count": 570
} | 231 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import sys
import argparse
import logging
import os
import pickle
import random
import torch
import json
import numpy as np
from model import Model
from torch.nn import CrossEntropyLoss, MSELoss
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
code_tokens,
code_ids,
nl_tokens,
nl_ids,
url,
):
self.code_tokens = code_tokens
self.code_ids = code_ids
self.nl_tokens = nl_tokens
self.nl_ids = nl_ids
self.url = url
def convert_examples_to_features(js,tokenizer,args):
"""convert examples to token ids"""
code = ' '.join(js['code_tokens']) if type(js['code_tokens']) is list else ' '.join(js['code_tokens'].split())
code_tokens = tokenizer.tokenize(code)[:args.code_length-4]
code_tokens =[tokenizer.cls_token,"<encoder-only>",tokenizer.sep_token]+code_tokens+[tokenizer.sep_token]
code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
padding_length = args.code_length - len(code_ids)
code_ids += [tokenizer.pad_token_id]*padding_length
nl = ' '.join(js['docstring_tokens']) if type(js['docstring_tokens']) is list else ' '.join(js['doc'].split())
nl_tokens = tokenizer.tokenize(nl)[:args.nl_length-4]
nl_tokens = [tokenizer.cls_token,"<encoder-only>",tokenizer.sep_token]+nl_tokens+[tokenizer.sep_token]
nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens)
padding_length = args.nl_length - len(nl_ids)
nl_ids += [tokenizer.pad_token_id]*padding_length
return InputFeatures(code_tokens,code_ids,nl_tokens,nl_ids,js['url'] if "url" in js else js["retrieval_idx"])
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path=None):
self.examples = []
data = []
with open(file_path) as f:
if "jsonl" in file_path:
for line in f:
line = line.strip()
js = json.loads(line)
if 'function_tokens' in js:
js['code_tokens'] = js['function_tokens']
data.append(js)
elif "codebase"in file_path or "code_idx_map" in file_path:
js = json.load(f)
for key in js:
temp = {}
temp['code_tokens'] = key.split()
temp["retrieval_idx"] = js[key]
temp['doc'] = ""
temp['docstring_tokens'] = ""
data.append(temp)
elif "json" in file_path:
for js in json.load(f):
data.append(js)
for js in data:
self.examples.append(convert_examples_to_features(js,tokenizer,args))
if "train" in file_path:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens]))
logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids))))
logger.info("nl_tokens: {}".format([x.replace('\u0120','_') for x in example.nl_tokens]))
logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return (torch.tensor(self.examples[i].code_ids),torch.tensor(self.examples[i].nl_ids))
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, model, tokenizer):
""" Train the model """
#get training dataset
train_dataset = TextDataset(tokenizer, args, args.train_data_file)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,num_workers=4)
#get optimizer and scheduler
optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = len(train_dataloader) * args.num_train_epochs)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size//args.n_gpu)
logger.info(" Total train batch size = %d", args.train_batch_size)
logger.info(" Total optimization steps = %d", len(train_dataloader)*args.num_train_epochs)
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
model.train()
tr_num,tr_loss,best_mrr = 0,0,0
for idx in range(args.num_train_epochs):
for step,batch in enumerate(train_dataloader):
#get inputs
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
#get code and nl vectors
code_vec = model(code_inputs=code_inputs)
nl_vec = model(nl_inputs=nl_inputs)
#calculate scores and loss
scores = torch.einsum("ab,cb->ac",nl_vec,code_vec)
loss_fct = CrossEntropyLoss()
loss = loss_fct(scores*20, torch.arange(code_inputs.size(0), device=scores.device))
#report loss
tr_loss += loss.item()
tr_num += 1
if (step+1)%100 == 0:
logger.info("epoch {} step {} loss {}".format(idx,step+1,round(tr_loss/tr_num,5)))
tr_loss = 0
tr_num = 0
#backward
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
scheduler.step()
#evaluate
results = evaluate(args, model, tokenizer,args.eval_data_file, eval_when_training=True)
for key, value in results.items():
logger.info(" %s = %s", key, round(value,4))
#save best model
if results['eval_mrr']>best_mrr:
best_mrr = results['eval_mrr']
logger.info(" "+"*"*20)
logger.info(" Best mrr:%s",round(best_mrr,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-mrr'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
def evaluate(args, model, tokenizer,file_name,eval_when_training=False):
query_dataset = TextDataset(tokenizer, args, file_name)
query_sampler = SequentialSampler(query_dataset)
query_dataloader = DataLoader(query_dataset, sampler=query_sampler, batch_size=args.eval_batch_size,num_workers=4)
code_dataset = TextDataset(tokenizer, args, args.codebase_file)
code_sampler = SequentialSampler(code_dataset)
code_dataloader = DataLoader(code_dataset, sampler=code_sampler, batch_size=args.eval_batch_size,num_workers=4)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num queries = %d", len(query_dataset))
logger.info(" Num codes = %d", len(code_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
code_vecs = []
nl_vecs = []
for batch in query_dataloader:
nl_inputs = batch[1].to(args.device)
with torch.no_grad():
nl_vec = model(nl_inputs=nl_inputs)
nl_vecs.append(nl_vec.cpu().numpy())
for batch in code_dataloader:
code_inputs = batch[0].to(args.device)
with torch.no_grad():
code_vec = model(code_inputs=code_inputs)
code_vecs.append(code_vec.cpu().numpy())
model.train()
code_vecs = np.concatenate(code_vecs,0)
nl_vecs = np.concatenate(nl_vecs,0)
scores = np.matmul(nl_vecs,code_vecs.T)
sort_ids = np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
nl_urls = []
code_urls = []
for example in query_dataset.examples:
nl_urls.append(example.url)
for example in code_dataset.examples:
code_urls.append(example.url)
ranks = []
for url, sort_id in zip(nl_urls,sort_ids):
rank = 0
find = False
for idx in sort_id[:1000]:
if find is False:
rank += 1
if code_urls[idx] == url:
find = True
if find:
ranks.append(1/rank)
else:
ranks.append(0)
result = {
"eval_mrr":float(np.mean(ranks))
}
return result
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str,
help="The input training data file (a json file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the MRR(a jsonl file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input test data file to test the MRR(a josnl file).")
parser.add_argument("--codebase_file", default=None, type=str,
help="An optional input test data file to codebase (a jsonl file).")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--nl_length", default=128, type=int,
help="Optional NL input sequence length after tokenization.")
parser.add_argument("--code_length", default=256, type=int,
help="Optional Code input sequence length after tokenization.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_zero_shot", action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_F2_norm", action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size for evaluation.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1, type=int,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
#print arguments
args = parser.parse_args()
#set log
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO )
#set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
logger.info("device: %s, n_gpu: %s",device, args.n_gpu)
# Set seed
set_seed(args.seed)
#build model
tokenizer = RobertaTokenizer.from_pretrained(args.model_name_or_path)
config = RobertaConfig.from_pretrained(args.model_name_or_path)
model = RobertaModel.from_pretrained(args.model_name_or_path)
model = Model(model)
logger.info("Training/evaluation parameters %s", args)
model.to(args.device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Training
if args.do_train:
train(args, model, tokenizer)
# Evaluation
results = {}
if args.do_eval:
if args.do_zero_shot is False:
checkpoint_prefix = 'checkpoint-best-mrr/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
model.to(args.device)
result = evaluate(args, model, tokenizer,args.eval_data_file)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],3)))
if args.do_test:
if args.do_zero_shot is False:
checkpoint_prefix = 'checkpoint-best-mrr/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
model.to(args.device)
result = evaluate(args, model, tokenizer,args.test_data_file)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],3)))
if __name__ == "__main__":
main()
| CodeBERT/UniXcoder/downstream-tasks/code-search/run.py/0 | {
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-search/run.py",
"repo_id": "CodeBERT",
"token_count": 7582
} | 232 |
{
"train_micro_batch_size_per_gpu": 8,
"gradient_accumulation_steps": 1,
"fp16": {
"enabled": false
},
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": 5e8
}
} | CodeT/DIVERSE/code/src/ds_config.json/0 | {
"file_path": "CodeT/DIVERSE/code/src/ds_config.json",
"repo_id": "CodeT",
"token_count": 111
} | 233 |
include CONTRIBUTING.md
include LICENSE-IMAGE.md
include LICENSE.md
include README.md
include ThirdPartyNotices.txt
| Cognitive-Face-Python/MANIFEST.in/0 | {
"file_path": "Cognitive-Face-Python/MANIFEST.in",
"repo_id": "Cognitive-Face-Python",
"token_count": 37
} | 234 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: test_face.py
Description: Unittests for Face section of the Cognitive Face API.
"""
import unittest
import cognitive_face as CF
from . import util
class TestFace(unittest.TestCase):
"""Unittests for Face section."""
def test_detect(self):
"""Unittest for `face.detect`."""
image = '{}detection1.jpg'.format(util.BASE_URL_IMAGE)
res = CF.face.detect(image)
print(res)
self.assertIsInstance(res, list)
util.wait()
def test_find_similars_face_ids(self):
"""Unittest for `face.find_similars` with face ids."""
res = CF.face.find_similars(
util.DataStore.face_id, face_ids=util.DataStore.face_ids)
print(res)
self.assertIsInstance(res, list)
util.wait()
def test_find_similars_face_list(self):
"""Unittest for `face.find_similars` in face list."""
res = CF.face.find_similars(
util.DataStore.face_id, face_list_id=util.DataStore.face_list_id)
print(res)
self.assertIsInstance(res, list)
util.wait()
def test_find_similars_large_face_list(self):
"""Unittest for `face.find_similars` in large face list."""
CF.util.wait_for_large_face_list_training(
util.DataStore.large_face_list_id)
res = CF.face.find_similars(
util.DataStore.face_id,
large_face_list_id=util.DataStore.large_face_list_id)
print(res)
self.assertIsInstance(res, list)
util.wait()
def test_group(self):
"""Unittest for `face.group`."""
temp_face_ids = util.DataStore.face_ids
temp_face_ids.append(util.DataStore.face_id)
temp_face_ids.append(util.DataStore.another_face_id)
res = CF.face.group(temp_face_ids)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_identify_person_group(self):
"""Unittest for `face.identify` in person gorup."""
CF.util.wait_for_person_group_training(util.DataStore.person_group_id)
res = CF.face.identify(
util.DataStore.face_ids,
person_group_id=util.DataStore.person_group_id)
print(res)
self.assertIsInstance(res, list)
util.wait()
def test_identify_large_person_group(self):
"""Unittest for `face.identify` in large person gorup."""
CF.util.wait_for_large_person_group_training(
util.DataStore.large_person_group_id)
res = CF.face.identify(
util.DataStore.face_ids,
large_person_group_id=util.DataStore.large_person_group_id)
print(res)
self.assertIsInstance(res, list)
util.wait()
def test_verify_face_ids(self):
"""Unittest for `face.verify` with face ids."""
res = CF.face.verify(
util.DataStore.face_id,
another_face_id=util.DataStore.another_face_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_verify_person_group(self):
"""Unittest for `face.verify` in person group."""
CF.util.wait_for_person_group_training(util.DataStore.person_group_id)
res = CF.face.verify(
util.DataStore.face_id,
person_group_id=util.DataStore.person_group_id,
person_id=util.DataStore.person_id['Dad'])
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_verify_large_person_group(self):
"""Unittest for `face.verify` in large person group."""
CF.util.wait_for_large_person_group_training(
util.DataStore.large_person_group_id)
res = CF.face.verify(
util.DataStore.face_id,
large_person_group_id=util.DataStore.large_person_group_id,
person_id=util.DataStore.large_person_group_person_id['Dad'])
print(res)
self.assertIsInstance(res, dict)
util.wait()
if __name__ == '__main__':
unittest.main()
| Cognitive-Face-Python/cognitive_face/tests/test_face.py/0 | {
"file_path": "Cognitive-Face-Python/cognitive_face/tests/test_face.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1903
} | 235 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: __init__.py
Description: View components for Python SDK sample.
"""
import wx
import wx.lib.agw.labelbook as LB
from wx.lib.agw.fmresources import INB_FIT_LABELTEXT
from wx.lib.agw.fmresources import INB_LEFT
from wx.lib.agw.fmresources import INB_NO_RESIZE
from view.panel_detection import DetectionPanel
from view.panel_subscription import SubscriptionPanel
from view.panel_find_similar import FindSimilarPanel
from view.panel_group import GroupPanel
from view.panel_identification import IdentificationPanel
from view.panel_verification import VerificationPanel
TITLE = u"Microsoft Cognitive Services Face Samples"
class MyLabelBook(LB.LabelBook):
"""LabelBook part in Main Frame."""
def __init__(self, parent):
agw_style = INB_LEFT | INB_FIT_LABELTEXT | INB_NO_RESIZE
super(MyLabelBook, self).__init__(parent, agwStyle=agw_style)
subscription_panel = SubscriptionPanel(self)
subscription_text = u"Subscription Key Management"
self.AddPage(subscription_panel, subscription_text, True)
self.AddPage(wx.Panel(self), u"Select a scenario:")
self.EnableTab(1, False)
self.AddPage(DetectionPanel(self), u" - Face Detection")
self.AddPage(FindSimilarPanel(self), u" - Face Find Similar")
self.AddPage(GroupPanel(self), u" - Face Grouping")
self.AddPage(IdentificationPanel(self), u" - Face Identification")
self.AddPage(VerificationPanel(self), u" - Face Verification")
class MyTitle(wx.Panel):
"""Title part in Main Frame."""
def __init__(self, parent):
super(MyTitle, self).__init__(parent)
self.SetBackgroundColour('#00b294')
self.SetMinSize((-1, 80))
sizer = wx.BoxSizer()
sizer.AddStretchSpacer()
family = wx.FONTFAMILY_DEFAULT
style = wx.FONTSTYLE_NORMAL
weight = wx.FONTWEIGHT_NORMAL
font = wx.Font(20, family, style, weight)
self.text = wx.StaticText(self, label=TITLE, style=wx.ALIGN_CENTER)
self.text.SetFont(font)
sizer.Add(self.text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.AddStretchSpacer()
self.SetSizer(sizer)
class MyFrame(wx.Frame):
"""Main Frame."""
def __init__(self, parent):
super(MyFrame, self).__init__(parent, title=TITLE, size=(1280, 768))
icon_path = 'Assets/Microsoft-logo_rgb_c-gray.png'
self.SetIcon(wx.Icon(icon_path))
sizer = wx.BoxSizer(wx.VERTICAL)
self.title = MyTitle(self)
sizer.Add(self.title, flag=wx.EXPAND)
self.book = MyLabelBook(self)
sizer.Add(self.book, 1, flag=wx.EXPAND)
status_text = (
'Microsoft will receive the images you upload and may use them to '
'improve Face API and related services. By submitting an image, '
'you confirm you have consent from everyone in it.')
self.status = wx.StatusBar(self)
self.status.SetStatusText(status_text)
sizer.Add(self.status, flag=wx.EXPAND)
self.SetSizer(sizer)
self.Layout()
class MyApp(wx.App):
"""The whole app."""
def OnInit(self):
"""Show main frame."""
frame = MyFrame(None)
frame.Show()
return True
| Cognitive-Face-Python/sample/view/__init__.py/0 | {
"file_path": "Cognitive-Face-Python/sample/view/__init__.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1361
} | 236 |
export CUDA_VISIBLE_DEVICES=0
python t5_run_train.py \
--model_name_or_path t5-base \
--subtask Com \
--method MainExp \
--train_file pretrain \
--max_steps 100000 \
--save_steps 100000 \
--batch_size 8 \
--ebatch_size 16 \
--gas 1 \
--seed 1 \
--set set1 | ContextualSP/abstraction_probing/code/t5_code/Com_MainExp_pretrain.sh/0 | {
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_MainExp_pretrain.sh",
"repo_id": "ContextualSP",
"token_count": 103
} | 237 |
import pdb
import subprocess
import argparse
import os
def run_command(bash_command):
process = subprocess.Popen(bash_command.split())
output, error = process.communicate()
print(error)
print(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, default="", help="model_name_or_path")
parser.add_argument("--output_dir", type=str, default="./checkpoint/", help="output dir")
parser.add_argument("--train_file", type=str, default='pretrain', help="train file")
parser.add_argument("--validation_file", type=str, default='test', help="validation file")
parser.add_argument("--max_steps", type=int, default=100000, help="max_steps")
parser.add_argument("--batch_size", type=int, default=8, help="batch_size")
parser.add_argument("--ebatch_size", type=int, default=16, help="eval batch_size")
parser.add_argument("--learning_rate", type=float, default=1e-5, help="learning_rate")
parser.add_argument("--weight_decay", type=float, default=1e-2, help="weight_decay")
parser.add_argument("--gas", type=int, default=1, help="gradient_accumulation_steps")
parser.add_argument("--save_steps", type=int, default=100000, help="save_steps")
parser.add_argument("--device_num", type=int, default=1, help="device_num")
parser.add_argument("--method", type=str, default='MainExp', help="method")
parser.add_argument("--seed", type=int, default=1, help="seed")
parser.add_argument("--init_weights", type=bool, default=False, help="init_weights")
parser.add_argument("--subtask", type=str, default='Com', help="subtask")
parser.add_argument("--set", type=str, default='set1', help="subtask")
args = parser.parse_args()
print("START training")
run_command("printenv")
output_dir = './checkpoint/' + args.subtask + '/' + args.method + '_' + args.train_file + '_' + args.set + '_seed' + str(args.seed)
# ./checkpoint/Com/MainExp_pretrain_set1_seed1
train_file = '../../data/' + args.subtask + '/' + args.set + '/' + args.train_file + '.json'
# .../data/Com/set1/pretrain.json
validation_file = '../../data/' + args.subtask + '/' + args.set + '/' + args.validation_file + '.json'
# .../data/Com/set1/test.json
cmd = f"""
python -m torch.distributed.launch --nproc_per_node {args.device_num} --master_port=12343 t5_train_model.py \
--model_name_or_path {args.model_name_or_path} \
--output_dir {output_dir} \
--do_train \
--do_eval \
--train_file {train_file} \
--validation_file {validation_file} \
--per_device_train_batch_size {args.batch_size} \
--per_device_eval_batch_size {args.ebatch_size} \
--overwrite_output_dir \
--gradient_accumulation_steps {args.gas} \
--max_steps {args.max_steps} \
--logging_steps 10 \
--learning_rate {args.learning_rate} \
--save_steps {args.save_steps} \
--eval_steps {args.save_steps} \
--evaluation_strategy steps \
--freeze_model_parameter False \
--weight_decay {args.weight_decay} \
--label_smoothing_factor 0.1 \
--lr_scheduler_type constant \
--fp16 False \
--predict_with_generate \
--num_beams 5 \
--seed {args.seed} \
--adafactor False \
--max_source_length 1024 \
--max_target_length 1024 \
--gradient_checkpointing False \
--init_weights {args.init_weights}
"""
print("RUN {}".format(cmd))
run_command(cmd)
| ContextualSP/abstraction_probing/code/t5_code/t5_run_train.py/0 | {
"file_path": "ContextualSP/abstraction_probing/code/t5_code/t5_run_train.py",
"repo_id": "ContextualSP",
"token_count": 1451
} | 238 |
# define common functions | ContextualSP/adaptershare/experiments/__init__.py/0 | {
"file_path": "ContextualSP/adaptershare/experiments/__init__.py",
"repo_id": "ContextualSP",
"token_count": 4
} | 239 |
#pretrain config
mlm:
data_format: MLM
enable_san: false
metric_meta:
- ACC
n_class: 30522
task_type: MaskLM
loss: MlmCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion | ContextualSP/adaptershare/experiments/mlm/mlm.yml/0 | {
"file_path": "ContextualSP/adaptershare/experiments/mlm/mlm.yml",
"repo_id": "ContextualSP",
"token_count": 80
} | 240 |
import os
import argparse
import random
from sys import path
path.append(os.getcwd())
from experiments.common_utils import dump_rows
from data_utils.task_def import DataFormat
from data_utils.log_wrapper import create_logger
from experiments.glue.glue_utils import *
logger = create_logger(__name__, to_disk=True, log_file="xnli_prepro.log")
def load_xnli(file, header=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if blocks[1] == "-":
continue
lab = blocks[1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[9],
"premise": blocks[6],
"hypothesis": blocks[7],
"label": lab,
"lang": blocks[0],
}
rows.append(sample)
cnt += 1
return rows
def parse_args():
parser = argparse.ArgumentParser(description="Preprocessing XNLI dataset.")
parser.add_argument("--seed", type=int, default=13)
parser.add_argument("--root_dir", type=str, default="data")
args = parser.parse_args()
return args
def main(args):
root = args.root_dir
assert os.path.exists(root)
######################################
# XNLI/SciTail Tasks
######################################
xnli_dev_path = os.path.join(root, "XNLI/xnli.dev.tsv")
xnli_test_path = os.path.join(root, "XNLI/xnli.test.tsv")
######################################
# Loading DATA
######################################
xnli_dev_data = load_xnli(xnli_dev_path)
xnli_test_data = load_xnli(xnli_test_path)
logger.info("Loaded {} XNLI train samples".format(len(xnli_dev_data)))
logger.info("Loaded {} XNLI test samples".format(len(xnli_test_data)))
canonical_data_suffix = "canonical_data"
canonical_data_root = os.path.join(root, canonical_data_suffix)
if not os.path.isdir(canonical_data_root):
os.mkdir(canonical_data_root)
# BUILD XNLI
xnli_dev_fout = os.path.join(canonical_data_root, "xnli_dev.tsv")
xnli_test_fout = os.path.join(canonical_data_root, "xnli_test.tsv")
dump_rows(xnli_dev_data, xnli_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(xnli_test_data, xnli_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with XNLI")
if __name__ == "__main__":
args = parse_args()
main(args)
| ContextualSP/adaptershare/experiments/xnli/xnli_prepro.py/0 | {
"file_path": "ContextualSP/adaptershare/experiments/xnli/xnli_prepro.py",
"repo_id": "ContextualSP",
"token_count": 1177
} | 241 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
# This is a quick hack of adamaxw by xiaodong liu
import math
import torch
from torch.optim import Optimizer
class AdamaxW(Optimizer):
r"""Implements AdamaxW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adamax\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamaxW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamaxW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
eps = group['eps']
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of inf gradient values
state['exp_inf'] = torch.zeros_like(p.data)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and inf moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
norm_buf = torch.cat([
exp_inf.mul_(beta2).unsqueeze(0),
grad.abs().add_(eps).unsqueeze_(0)
], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
bias_correction = 1 - beta1 ** state['step']
clr = group['lr'] / bias_correction
p.data.addcdiv_(exp_avg, exp_inf, value=-clr)
return loss | ContextualSP/adaptershare/mt_dnn/optim.py/0 | {
"file_path": "ContextualSP/adaptershare/mt_dnn/optim.py",
"repo_id": "ContextualSP",
"token_count": 2034
} | 242 |
import os
import argparse
import torch
import json
from models import *
from utils import *
from tqdm import tqdm
def load_model_and_data_iter(args):
ckpt_path = args.checkpoint
device = torch.device(args.device)
config = json.load(open(os.path.join(os.path.dirname(ckpt_path), 'config.json'), 'r', encoding='utf-8'))
config['checkpoint'] = ckpt_path
config['device'] = device
model = load_model_from_checkpoint(**config)
print('load model from {} over.'.format(ckpt_path))
model.eval()
print('-------------------Config-------------------')
for key, val in config.items():
print(key, val)
print('load {} from {} over .'.format(config['model'], ckpt_path))
bert_version = config['bert_version']
tokenizer = BertTokenizer.from_pretrained(bert_version)
print('load {} tokenizer over'.format(bert_version))
return config, model, tokenizer
# load schemas from database
def get_column_names_unique(column_names: List[Tuple[int, str]], table_names: List[str], primary_keys: List[int]) -> List[str]:
column_names_dict = defaultdict(int)
for tbl_idx, col_name in column_names:
column_names_dict[col_name] += 1
column_names_unique = []
for c_idx, (tbl_idx, col_name) in enumerate(column_names):
if tbl_idx == -1:
column_names_unique.append(col_name)
continue
if column_names_dict[col_name] == 1:
column_names_unique.append(col_name)
elif c_idx in primary_keys:
column_names_unique.append(col_name)
else:
tbl_name = table_names[tbl_idx]
full_name = '{} . {}'.format(tbl_name, col_name)
column_names_unique.append(full_name)
assert len(column_names_unique) == len(column_names)
return column_names_unique
def alt_tbl_name(tbl_name):
tbl_name = tbl_name.split()
if len(tbl_name) > 1 and tbl_name[0] == 'reference':
tbl_name = tbl_name[1:]
if len(tbl_name) > 1 and tbl_name[-1] == 'data':
tbl_name = tbl_name[:-1]
if len(tbl_name) > 1 and tbl_name[-1] == 'list':
tbl_name = tbl_name[:-1]
return ' '.join(tbl_name)
def remove_shared_prefix(col_name: str, tbl_name: str) -> str:
col_tokens, tbl_tokens = col_name.split(), tbl_name.split()
idx = 0
while idx < len(col_tokens) and idx < len(tbl_tokens) and col_tokens[idx] == tbl_tokens[idx]:
idx += 1
return " ".join(col_tokens[idx:])
def get_column_name_normalized(column_lem_names: List[Tuple[int, str]], table_lem_names: List[str], verbose: bool = False):
column_norm_names, table_norm_names = [], []
for tbl_name in table_lem_names:
table_norm_names.append(alt_tbl_name(tbl_name))
for col_idx, (tbl_idx, col_name) in enumerate(column_lem_names):
if col_name == '*':
column_norm_names.append('*')
continue
col_norm_name = remove_shared_prefix(col_name, table_norm_names[tbl_idx])
if col_norm_name != col_name and verbose:
logging.info(" {}\t{}\t{}".format(table_norm_names[tbl_idx], col_name, col_norm_name))
column_norm_names.append(col_norm_name)
return column_norm_names, table_norm_names
def load_schema(obj: Dict) -> SpiderSchema:
column_names_lemma = obj['column_names_lemma']
table_names_lemma = obj['table_names_lemma']
column_names_original = [x[1] for x in obj['column_names_original']]
column_to_table, table_to_columns = {}, {}
for col_idx, (tbl_idx, _) in enumerate(obj['column_names']):
if tbl_idx not in table_to_columns:
table_to_columns[tbl_idx] = []
table_to_columns[tbl_idx].append(col_idx)
column_to_table[col_idx] = tbl_idx
col_norm_names, tbl_norm_names = get_column_name_normalized(column_names_lemma, table_names_lemma, True)
return SpiderSchema(
db_id=obj['db_id'],
column_names=col_norm_names,
column_types=obj['column_types'],
column_names_lemma=[x[1] for x in column_names_lemma],
column_names_original=column_names_original,
table_names=tbl_norm_names,
table_names_lemma=table_names_lemma,
table_names_original=obj['table_names_original'],
table_to_columns=table_to_columns,
column_to_table=column_to_table,
primary_keys=obj['primary_keys'],
foreign_keys=obj['foreign_keys'])
def load_schemas(path: str):
databases = json.load(open(path, 'r', encoding='utf-8'))
schemas = {}
for database in databases:
schema = load_schema(database)
schemas[schema.db_id] = schema
return schemas
def process_examples(input_path: str, database_dir: str, tokenizer: BertTokenizer, table_path: str, output_path):
schemas = load_schemas(table_path)
print('load schemas over.')
value_matchers = load_value_matchers(database_dir, schemas)
print('load value matchers over')
processed_examples = []
for raw_example in tqdm(json.load(open(input_path, 'r', encoding='utf-8'))):
db_id = raw_example['db_id']
assert db_id in schemas
processed_example = process_slsql_example(raw_example, tokenizer, schemas[db_id], value_matchers[db_id])
processed_examples += [processed_example]
save_json_objects(processed_examples, output_path)
print('process examples over, save into {}'.format(output_path))
def fix_tok(tok):
tok = tok.lower()
if tok == '-lrb-':
tok = '('
elif tok == '-rrb-':
tok = ')'
elif tok == '\"':
tok = '\''
return tok
spider_type_mappings = {
'text': 'text',
'time': 'time',
'number': 'number',
'boolean': 'boolean',
'others': 'text'
}
def get_data_type(db_data_type: str):
if db_data_type.startswith("int") or db_data_type.startswith("bigint") or db_data_type.startswith("mediumint"):
return "int"
if db_data_type.startswith("smallint") or db_data_type.startswith("tinyint") or db_data_type.startswith("bit") or db_data_type.startswith("bool") :
return "int"
if db_data_type.startswith("real") or db_data_type.startswith("numeric") or db_data_type.startswith("number"):
return "real"
if db_data_type.startswith("double") or db_data_type.startswith("decimal") or db_data_type.startswith("float"):
return "real"
if db_data_type.startswith("text") or db_data_type.startswith("varchar") or db_data_type.startswith("char"):
return "text"
if db_data_type.startswith("timestamp") or db_data_type.startswith("date") or db_data_type.startswith("year"):
return "datetime"
if len(db_data_type) == 0 or db_data_type.startswith("blob"):
return "text"
return 'text'
#raise ValueError("not support data type: " + db_data_type)
def get_column_with_values(path: str):
column_values = defaultdict(list)
try:
conn = sqlite3.connect(path)
conn.text_factory = lambda b: b.decode(errors = 'ignore')
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables =[x[0] for x in cur.fetchall()]
for table in tables:
col_results = cur.execute("PRAGMA table_info('%s')" % table).fetchall()
columns = []
for col in col_results:
col_name = col[1]
data_type = get_data_type(col[2].lower())
columns.append((col_name, data_type))
assert len(columns) > 0
# get rows
cur.execute("SELECT * FROM " + table + ";")
row_results = cur.fetchall()
rows = []
for row in row_results:
assert len(row) == len(columns)
rows.append(row)
for i, (col_name, col_type) in enumerate(columns):
values = [row[i] for row in rows]
unique_name = '{}.{}'.format(table, col_name).lower()
column_values[unique_name] = (unique_name, col_type, values)
except:
pass
return column_values.values()
def load_value_matchers(database_dir: str, schemas: Dict[str, SpiderSchema]):
db_matchers = {}
for schema in schemas.values():
db_id = schema.db_id
column_with_values = get_column_with_values(os.path.join(database_dir, db_id, f'{db_id}.sqlite'))
db_matchers[db_id] = ValueMatcher(column_with_values)
return db_matchers
def process_slsql_example(query: Dict, tokenizer: BertTokenizer, schema: SpiderSchema, value_matcher: ValueMatcher) -> Dict:
question = query['question']
assert len(query['toks']) == len(query['lemma'])
question_utterance = generate_utterance(tokenizer, question, [fix_tok(x) for x in query['toks']], [fix_tok(x) for x in query['lemma']])
# Step 2: process tables & columns
processed_tables = []
for tbl_idx, col_indices in schema.table_to_columns.items():
# special column *
if tbl_idx == -1:
table_json = {
'index': -1,
'utterance': Utterance('*', tokens=[]).to_json(),
'columns': None
}
processed_tables += [table_json]
continue
tbl_name = schema.table_names[tbl_idx]
table_utterance = generate_utterance(tokenizer, tbl_name)
processed_columns = []
for col_idx in col_indices:
column_type = schema.column_types[col_idx]
column_utterance = generate_utterance(tokenizer, schema.column_names[col_idx])
column_json = {
'index': col_idx,
'utterance': column_utterance.to_json(),
'data_type': spider_type_mappings.get(column_type, 'text')
}
processed_columns += [column_json]
table_json = {
'index': tbl_idx,
'utterance': table_utterance.to_json(),
'columns': processed_columns
}
processed_tables += [table_json]
matched_values = value_matcher.match(question_utterance.text_tokens, 0.8, 3)
processed_query = {
'question': question_utterance.to_json(),
'tables': processed_tables,
'schema': schema.to_json(),
'values': [v.to_json() for v in matched_values]
}
return processed_query
def predict_alignments(model: nn.Module, data_iter: DataLoader, saved_path: str, threshold: float):
slsql_align_labels = []
model.eval()
with torch.no_grad():
for model_input in data_iter:
model_output = model(**model_input)
example = model_input['example'][0]
meta_index: MetaIndex = model_input['meta_index'][0]
question: Utterance = Utterance.from_json(example['question'])
schema: SpiderSchema = SpiderSchema.from_json(example['schema'])
values = [ValueMatch.from_json(v) for v in example['values']]
identify_logits = { SQLTokenType.table: model_output['table_logits'][0], SQLTokenType.column: model_output['column_logits'][0], SQLTokenType.value: model_output['value_logits'][0] }
tbl_align_weights, col_align_weights, val_align_weights = meta_index.split(model_output['alignment_weights'][0])
align_weights = { SQLTokenType.table: tbl_align_weights, SQLTokenType.column: col_align_weights, SQLTokenType.value: val_align_weights }
pred_align_labels = greedy_link_spider(identify_logits, align_weights, question, schema, values, threshold=threshold)
assert len(pred_align_labels) == len(question.tokens)
sql_align_label = [label.to_slsql(schema) for label in pred_align_labels]
slsql_align_labels += [sql_align_label]
save_json_objects(slsql_align_labels, saved_path)
print('predict alignments over, saved into {}'.format(saved_path))
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-ckpt', '--checkpoint', default='baseline/slsql/codalab/saved_models_large/align_model.bin')
parser.add_argument('-data', '--data_dir', default='baseline/slsql/codalab/dev_data')
parser.add_argument('-db_dir', '--database_dir', default='baseline/slsql/data/database')
parser.add_argument('-threshold', '--threshold', default=0.4, type=float)
parser.add_argument('-output_dir', '--output_dir', default='output')
parser.add_argument('-gpu', '--device', default='cuda:0' if torch.cuda.is_available() else 'cpu')
args = parser.parse_args()
config, model, tokenizer = load_model_and_data_iter(args)
print('loading data iterator ...')
processed_path = os.path.join(args.output_dir, 'dev.val_processed.json')
process_examples(
input_path=os.path.join(args.output_dir, 'dev.processed.json'),
table_path=os.path.join(args.output_dir, 'tables.processed.json'),
database_dir=args.database_dir,
tokenizer=tokenizer,
output_path=processed_path
)
data_iter = get_data_iterator_func(config['model'])(processed_path, tokenizer, 1, config['device'], False, False, 512, None)
predict_alignments(model, data_iter, os.path.join(args.output_dir, 'dev.align.json'), args.threshold)
print('Run Alignment Over') | ContextualSP/awakening_latent_grounding/predict.py/0 | {
"file_path": "ContextualSP/awakening_latent_grounding/predict.py",
"repo_id": "ContextualSP",
"token_count": 5850
} | 243 |
import numpy as np
import torch
from utils.data_types import *
from utils.nlp_utils import *
class GreedyLinker:
schema: SpiderSchema
question: Utterance
matched_values: List[ValueMatch]
threshold: float
identify_results: Dict[SQLTokenType, List[float]]
alignment_dict: Dict[Tuple[SQLTokenType, int, int], Tuple[float, int]]
def __init__(self, schema: SpiderSchema, question: Utterance, matched_values: List[ValueMatch], threshold=0.3) -> None:
self.schema = schema
self.question = question
self.matched_values = matched_values
self.threshold = threshold
pass
def link(self, identify_results: Dict[SQLTokenType, List[float]], align_weights: Dict[SQLTokenType, List[List[float]]]) -> List[AlignmentLabel]:
self.identify_results = identify_results
alignments = self.init_skeleton_alignments(align_weights)
alignments = self.fix_missing(alignments)
return alignments
def fix_missing(self, alignments: List[AlignmentLabel]) -> List[AlignmentLabel]:
# fix column suffix missing
for i, align_label in enumerate(alignments):
if align_label.align_type != SQLTokenType.column:
continue
column_original_lemma = self.schema.column_names_lemma[self.schema.id_map[align_label.align_value]].split()
if i + 1 < len(alignments) and alignments[i+1].align_type == SQLTokenType.null and alignments[i+1].token.lemma in column_original_lemma:
alignments[i+1] = AlignmentLabel(token=alignments[i+1].token, align_type=SQLTokenType.column, align_value=align_label.align_value, confidence=align_label.confidence)
# fix column prefix missing
for i in range(len(alignments)-1, -1, -1):
align_label = alignments[i]
if align_label.align_type != SQLTokenType.column:
continue
column_original_lemma = self.schema.column_names_lemma[self.schema.id_map[align_label.align_value]].split()
if i - 1 >= 0 and alignments[i-1].align_type == SQLTokenType.null and alignments[i-1].token.lemma in column_original_lemma:
alignments[i-1] = AlignmentLabel(token=alignments[i-1].token, align_type=SQLTokenType.column, align_value=align_label.align_value, confidence=align_label.confidence)
schema_extact_matches = self._lookup_all_schema_extract_matches()
# Fix column missing with value exists
for i, align_label in enumerate(alignments):
if align_label.align_type != SQLTokenType.value:
continue
column_name = align_label.align_value.replace("VAL_", "")
column_idx = self.schema.id_map[column_name]
for match in schema_extact_matches:
if match['type'] != SQLTokenType.column or match['id'] != column_idx or not match['is_distinct']:
continue
is_all_unmatched = True
for q_idx in range(match['start'], match['end'] + 1):
if alignments[q_idx].align_type != SQLTokenType.null:
is_all_unmatched = False
break
if is_all_unmatched:
for q_idx in range(match['start'], match['end'] + 1):
alignments[q_idx] = AlignmentLabel(self.question.tokens[q_idx], align_type=SQLTokenType.column, align_value=column_name, confidence=1.0)
# Fix table column that occurs multiple times
for match in schema_extact_matches:
if self.identify_results[match['type']][match['id']] < 0.5 or not match['is_distinct']:
continue
is_all_unmatched = True
for q_idx in range(match['start'], match['end'] + 1):
if alignments[q_idx].align_type != SQLTokenType.null:
is_all_unmatched = False
break
if is_all_unmatched:
align_value = self.schema.get_identifier_name(type=match['type'].abbr, index=match['id'])
for q_idx in range(match['start'], match['end'] + 1):
alignments[q_idx] = AlignmentLabel(self.question.tokens[q_idx], align_type=match['type'], align_value=align_value, confidence=1.0)
return alignments
def _lookup_extract_tokens(self, query: str) -> List[Tuple[int, int]]:
ngrams = permutate_ngrams(tokens=[x.lemma for x in self.question.tokens])
matched_spans = []
for i, j, ngram in ngrams:
if ngram == query:
matched_spans.append((i, j))
return matched_spans
def _lookup_all_schema_extract_matches(self, identify_threshold: float=0.5) -> List[Dict]:
schema_matches = []
for tbl_idx in range(self.schema.num_tables):
if self.identify_results[SQLTokenType.table][tbl_idx] < identify_threshold:
continue
table_lemma = self.schema.table_names_lemma[tbl_idx]
for start, end in self._lookup_extract_tokens(table_lemma):
match = { 'type': SQLTokenType.table, 'id': tbl_idx, 'start': start, 'end': end, 'is_distinct': True }
schema_matches.append(match)
for col_idx in range(self.schema.num_columns):
if self.identify_results[SQLTokenType.column][col_idx] < identify_threshold:
continue
column_lemma = self.schema.column_names_lemma[col_idx]
for start, end in self._lookup_extract_tokens(column_lemma):
match = { 'type': SQLTokenType.column, 'id': col_idx, 'start': start, 'end': end, 'is_distinct': True }
schema_matches.append(match)
for i, match in enumerate(schema_matches):
is_distinct = match['is_distinct']
for j in range(i + 1, len(schema_matches)):
match_j = schema_matches[j]
if match_j['start'] > match['end'] or match_j['end'] < match['start']:
continue
is_distinct = False
match_j['is_distinct'] = False
match['is_distinct'] = is_distinct
return schema_matches
def _is_ngram_tokens(self, token1: Token, token2: Token, entity: str):
ngram12 = '{} {}'.format(token1.lemma, token2.lemma)
return ngram12 in entity
def init_skeleton_alignments(self, align_weights: Dict[SQLTokenType, List[List[float]]]):
alignments = self._init_alignments(align_weights)
question_align_labels = []
for q_idx in range(len(self.question.tokens)):
if q_idx not in alignments or len(alignments) == 0:
question_align_labels.append(AlignmentLabel(token=self.question.tokens[q_idx], align_type=SQLTokenType.null, align_value=None, confidence=1.0))
continue
question_align_labels.append(alignments[q_idx][0])
return question_align_labels
def _init_alignments(self, align_weights: Dict[SQLTokenType, List[List[float]]]) -> Dict[int, List[AlignmentLabel]]:
alignments = defaultdict(list)
threshold = self.threshold
low_threshold = max(0.2, 1.0 / self.question.num_tokens)
# First is Value as value has span information
val_align_weights = align_weights[SQLTokenType.value]
col_align_weights = align_weights[SQLTokenType.column]
tbl_align_weights = align_weights[SQLTokenType.table]
columns_with_value = set([])
for v_idx, value in enumerate(self.matched_values):
if self.identify_results[SQLTokenType.value][v_idx] < 0.5:
continue
confidence = self.identify_results[SQLTokenType.value][v_idx]
for q_idx in range(value.start, value.end + 1):
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.value,
align_value="VAL_{}".format(value.column),
confidence=confidence))
columns_with_value.add(value.column)
for c_idx in range(1, self.schema.num_columns): # Ignore column *
if self.identify_results[SQLTokenType.column][c_idx] < 0.5:
continue
align_vector = np.array(col_align_weights[c_idx]) # * self.identify_results[SQLTokenType.column][c_idx]
ranks = np.argsort(align_vector)[::-1]
total_score = 0.0
for rk in range(len(align_vector)):
q_idx = ranks[rk]
score = align_vector[q_idx]
if score < threshold / len(self.schema.column_names_lemma[c_idx].split()):
break
if total_score >= threshold:
break
total_score += score
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.column,
align_value=self.schema.get_col_identifier_name(c_idx),
confidence=score))
for t_idx in range(self.schema.num_tables): # Ignore column *
if self.identify_results[SQLTokenType.table][t_idx] < 0.5:
continue
align_vector = np.array(tbl_align_weights[t_idx]) #* self.identify_results[SQLTokenType.table][t_idx]
ranks = np.argsort(align_vector)[::-1]
total_score = 0.0
for rk in range(len(align_vector)):
q_idx = ranks[rk]
score = align_vector[q_idx]
if score < low_threshold or rk > 4:
break
if total_score >= threshold:
break
total_score += score
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.table,
align_value=self.schema.get_tbl_identifier_name(t_idx),
confidence=score
))
for q_idx in alignments:
alignments[q_idx] = list(sorted(alignments[q_idx], key=lambda x: self.get_alignment_label_sort_weight(x), reverse=True))
return alignments
def get_alignment_label_sort_weight(self, align_label: AlignmentLabel) -> float:
if align_label.align_type == SQLTokenType.value:
return 100.0 + align_label.confidence
elif align_label.align_type == SQLTokenType.column:
column_idx = self.schema.id_map[align_label.align_value]
weight = 1.0
if align_label.token.lemma.lower() in self.schema.column_names_original[column_idx].lower():
weight = 1.5
return align_label.confidence * weight
elif align_label.align_type == SQLTokenType.table:
table_idx = self.schema.id_map[align_label.align_value]
weight = 1.0
if align_label.token.lemma.lower() in self.schema.table_names_original[table_idx].lower():
weight *= 1.5
return align_label.confidence * weight
else:
print(align_label)
raise NotImplementedError()
class SpiderGreedyLinker:
schema: SpiderSchema
question: Utterance
matched_values: List[ValueMatch]
identify_results: Dict[SQLTokenType, List[float]]
alignment_dict: Dict[Tuple[SQLTokenType, int, int], Tuple[float, int]]
threshold: float
def __init__(self, schema: SpiderSchema, question: Utterance, matched_values: List[ValueMatch], threshold=0.3) -> None:
self.schema = schema
self.question = question
self.matched_values = matched_values
self.threshold = threshold
'''
Lookup all linking relations with different confidences
'''
def search_all(self, identify_results: Dict[SQLTokenType, List[float]], align_weights: Dict[SQLTokenType, List[List[float]]]) -> List[List[Dict]]:
assert len(identify_results[SQLTokenType.table]) == len(self.schema.table_names_original)
assert len(identify_results[SQLTokenType.column]) == len(self.schema.column_names_original)
assert len(identify_results[SQLTokenType.value]) == len(self.matched_values)
self.identify_results = identify_results
init_alignments = self._init_alignments(align_weights)
alignments_with_scores = defaultdict(list)
for q_idx, align_labels in init_alignments.items():
for rk, align_label in enumerate(align_labels):
slsql_label = align_label.to_slsql(self.schema)
if align_label.confidence > self.threshold and rk < 1:
slsql_label['confidence'] = 'high'
else:
slsql_label['confidence'] = 'low'
is_added = False
for label in alignments_with_scores[q_idx]:
if label['type'] == slsql_label['type'] and label['id'] == slsql_label['id']:
is_added = True
break
if not is_added:
alignments_with_scores[q_idx].append(slsql_label)
pass
schema_extact_matches = self._lookup_all_schema_extract_matches()
for match in schema_extact_matches:
if self.identify_results[match['type']][match['id']] < 0.5:
continue
align_value = self.schema.get_identifier_name(match['type'].abbr, match['id'])
slsql_label = { 'type': match['type'].abbr, 'id': match['id'], 'confidence': 'low', 'value': align_value }
is_all_unmatched = True
for q_idx in range(match['start'], match['end'] + 1):
if q_idx in init_alignments and \
not (len(init_alignments[q_idx]) == 1 and init_alignments[q_idx][0].align_type == match['type'] and self.schema.id_map[init_alignments[q_idx][0].align_value] == match['id']):
is_all_unmatched = False
break
if is_all_unmatched and match['is_distinct']:
slsql_label['confidence'] = 'high'
for q_idx in range(match['start'], match['end'] + 1):
is_added = False
for label in alignments_with_scores[q_idx]:
if label['type'] == slsql_label['type'] and label['id'] == slsql_label['id']:
is_added = True
break
if not is_added:
slsql_label['token'] = self.question.tokens[q_idx].token
alignments_with_scores[q_idx].append(slsql_label)
all_alignment_labels = []
for q_idx in range(self.question.num_tokens):
if q_idx not in alignments_with_scores:
all_alignment_labels.append(None)
else:
sorted_alignments = sorted(alignments_with_scores[q_idx], key=lambda x: x['confidence'] == 'high', reverse=True)
alignment_sets = set([])
distinct_labels = []
for alignment in sorted_alignments:
if (alignment['type'], alignment['id']) in alignment_sets:
continue
alignment_sets.add((alignment['type'], alignment['id']))
distinct_labels.append(alignment)
all_alignment_labels.append(distinct_labels)
return all_alignment_labels
def link(self, identify_results: Dict[SQLTokenType, List[float]], align_weights: Dict[SQLTokenType, List[List[float]]]) -> List[AlignmentLabel]:
assert len(identify_results[SQLTokenType.table]) == len(self.schema.table_names_original)
assert len(identify_results[SQLTokenType.column]) == len(self.schema.column_names_original)
assert len(identify_results[SQLTokenType.value]) == len(self.matched_values)
self.identify_results = identify_results
alignments = self.init_skeleton_alignments(align_weights)
alignments = self.fix_missing(alignments)
return alignments
def fix_missing(self, alignments: List[AlignmentLabel]) -> List[AlignmentLabel]:
# fix column suffix missing
for i, align_label in enumerate(alignments):
if align_label.align_type != SQLTokenType.column:
continue
column_original_lemma = self.schema.column_names_lemma[self.schema.id_map[align_label.align_value]].split()
if i + 1 < len(alignments) and alignments[i+1].align_type == SQLTokenType.null and alignments[i+1].token.lemma in column_original_lemma:
alignments[i+1] = AlignmentLabel(token=alignments[i+1].token, align_type=SQLTokenType.column, align_value=align_label.align_value, confidence=align_label.confidence)
# fix column prefix missing
for i in range(len(alignments)-1, -1, -1):
align_label = alignments[i]
if align_label.align_type != SQLTokenType.column:
continue
column_original_lemma = self.schema.column_names_lemma[self.schema.id_map[align_label.align_value]].split()
if i - 1 >= 0 and alignments[i-1].align_type == SQLTokenType.null and alignments[i-1].token.lemma in column_original_lemma:
alignments[i-1] = AlignmentLabel(token=alignments[i-1].token, align_type=SQLTokenType.column, align_value=align_label.align_value, confidence=align_label.confidence)
schema_extact_matches = self._lookup_all_schema_extract_matches()
# Fix column missing with value exists
for i, align_label in enumerate(alignments):
if align_label.align_type != SQLTokenType.value:
continue
column_name = align_label.align_value.replace("VAL_", "")
column_idx = self.schema.id_map[column_name]
for match in schema_extact_matches:
if match['type'] != SQLTokenType.column or match['id'] != column_idx or not match['is_distinct']:
continue
is_all_unmatched = True
for q_idx in range(match['start'], match['end'] + 1):
if alignments[q_idx].align_type != SQLTokenType.null:
is_all_unmatched = False
break
if is_all_unmatched:
for q_idx in range(match['start'], match['end'] + 1):
alignments[q_idx] = AlignmentLabel(self.question.tokens[q_idx], align_type=SQLTokenType.column, align_value=column_name, confidence=1.0)
# Fix table column that occurs multiple times
for match in schema_extact_matches:
if self.identify_results[match['type']][match['id']] < 0.5 or not match['is_distinct']:
continue
is_all_unmatched = True
for q_idx in range(match['start'], match['end'] + 1):
if alignments[q_idx].align_type != SQLTokenType.null:
is_all_unmatched = False
break
if is_all_unmatched:
align_value = self.schema.get_identifier_name(type=match['type'].abbr, index=match['id'])
for q_idx in range(match['start'], match['end'] + 1):
alignments[q_idx] = AlignmentLabel(self.question.tokens[q_idx], align_type=match['type'], align_value=align_value, confidence=1.0)
return alignments
def _lookup_extract_tokens(self, query: str) -> List[Tuple[int, int]]:
ngrams = permutate_ngrams(tokens=[x.lemma for x in self.question.tokens])
matched_spans = []
for i, j, ngram in ngrams:
if ngram == query:
matched_spans.append((i, j))
return matched_spans
def _lookup_all_schema_extract_matches(self, identify_threshold: float=0.5) -> List[Dict]:
schema_matches = []
for tbl_idx in range(self.schema.num_tables):
if self.identify_results[SQLTokenType.table][tbl_idx] < identify_threshold:
continue
table_lemma = self.schema.table_names_lemma[tbl_idx]
for start, end in self._lookup_extract_tokens(table_lemma):
match = { 'type': SQLTokenType.table, 'id': tbl_idx, 'start': start, 'end': end, 'is_distinct': True }
schema_matches.append(match)
for col_idx in range(self.schema.num_columns):
if self.identify_results[SQLTokenType.column][col_idx] < identify_threshold:
continue
column_lemma = self.schema.column_names_lemma[col_idx]
for start, end in self._lookup_extract_tokens(column_lemma):
match = { 'type': SQLTokenType.column, 'id': col_idx, 'start': start, 'end': end, 'is_distinct': True }
schema_matches.append(match)
for i, match in enumerate(schema_matches):
is_distinct = match['is_distinct']
for j in range(i + 1, len(schema_matches)):
match_j = schema_matches[j]
if match_j['start'] > match['end'] or match_j['end'] < match['start']:
continue
is_distinct = False
match_j['is_distinct'] = False
match['is_distinct'] = is_distinct
return schema_matches
def _is_ngram_tokens(self, token1: Token, token2: Token, entity: str):
ngram12 = '{} {}'.format(token1.lemma, token2.lemma)
return ngram12 in entity
def init_skeleton_alignments(self, align_weights: Dict[SQLTokenType, List[List[float]]]):
alignments = self._init_alignments(align_weights)
question_align_labels = []
for q_idx in range(len(self.question.tokens)):
if q_idx not in alignments or len(alignments) == 0:
question_align_labels.append(AlignmentLabel(token=self.question.tokens[q_idx], align_type=SQLTokenType.null, align_value=None, confidence=1.0))
continue
question_align_labels.append(alignments[q_idx][0])
return question_align_labels
def _init_alignments(self, align_weights: Dict[SQLTokenType, List[List[float]]]) -> Dict[int, List[AlignmentLabel]]:
alignments = defaultdict(list)
threshold = self.threshold
low_threshold = max(0.05, 1.0 / self.question.num_tokens)
# First is Value as value has span information
val_align_weights = align_weights[SQLTokenType.value]
col_align_weights = align_weights[SQLTokenType.column]
tbl_align_weights = align_weights[SQLTokenType.table]
columns_with_value = set([])
for v_idx, value in enumerate(self.matched_values):
if self.identify_results[SQLTokenType.value][v_idx] < 0.5:
continue
confidence = self.identify_results[SQLTokenType.value][v_idx]
for q_idx in range(value.start, value.end + 1):
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.value,
align_value="VAL_{}".format(value.column),
confidence=confidence))
columns_with_value.add(value.column)
for c_idx in range(1, self.schema.num_columns): # Ignore column *
# if self.identify_results[SQLTokenType.column][c_idx] < 0.5:
# continue
align_vector = np.array(col_align_weights[c_idx]) * self.identify_results[SQLTokenType.column][c_idx]
ranks = np.argsort(align_vector)[::-1]
for rk in range(len(align_vector)):
q_idx = ranks[rk]
score = align_vector[q_idx] #* self.identify_results[SQLTokenType.column][c_idx]
# if self.schema.get_column_full_name(c_idx) in columns_with_value:
# score *= 0.5
if score >= threshold:
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.column,
align_value=self.schema.get_col_identifier_name(c_idx),
confidence=score
))
if score >= low_threshold and self.identify_results[SQLTokenType.column][c_idx] > 0.5:
if rk < 1 or self.question.tokens[q_idx].lemma in self.schema.column_names_lemma[c_idx].split(' '):
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.column,
align_value=self.schema.get_col_identifier_name(c_idx),
confidence=score
))
for t_idx in range(self.schema.num_tables):
# if self.identify_results[SQLTokenType.table][t_idx] < 0.5:
# continue
align_vector = np.array(tbl_align_weights[t_idx]) * self.identify_results[SQLTokenType.table][t_idx]
ranks = np.argsort(align_vector)[::-1]
for rk in range(len(align_vector)):
q_idx = ranks[rk]
score = align_vector[q_idx] #* self.identify_results[SQLTokenType.table][t_idx]
if score >= threshold:
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.table,
align_value=self.schema.table_names_original[t_idx].lower(),
confidence=score
))
if score >= low_threshold and self.identify_results[SQLTokenType.table][t_idx] > 0.5:
if rk < 1 or self.question.tokens[q_idx].lemma in self.schema.table_names_lemma[t_idx].split(' '):
alignments[q_idx].append(AlignmentLabel(
token=self.question.tokens[q_idx],
align_type=SQLTokenType.table,
align_value=self.schema.get_tbl_identifier_name(t_idx),
confidence=score
))
for q_idx in alignments:
alignments[q_idx] = list(sorted(alignments[q_idx], key=lambda x: self.get_alignment_label_sort_weight(x), reverse=True))
return alignments
def get_alignment_label_sort_weight(self, align_label: AlignmentLabel) -> float:
if align_label.align_type == SQLTokenType.value:
return 100.0 + align_label.confidence
elif align_label.align_type == SQLTokenType.column:
column_idx = self.schema.id_map[align_label.align_value]
weight = 1.0
if align_label.token.lemma.lower() in self.schema.column_names_original[column_idx].lower():
weight = 1.5
return align_label.confidence * weight
elif align_label.align_type == SQLTokenType.table:
table_idx = self.schema.id_map[align_label.align_value]
weight = 1.0
if align_label.token.lemma.lower() in self.schema.table_names_original[table_idx].lower():
weight *= 1.5
return align_label.confidence * weight
else:
print(align_label)
raise NotImplementedError()
def greedy_link_spider(
identify_logits: Dict[SQLTokenType, torch.Tensor],
alignment_weights: Dict[SQLTokenType, torch.Tensor],
question: Utterance,
schema: SpiderSchema,
values: List[ValueMatch],
threshold: float = 0.25
) -> List[AlignmentLabel]:
linker = SpiderGreedyLinker(schema=schema, question=question, matched_values=values, threshold=threshold)
for align_type in identify_logits:
identify_logits[align_type] = torch.softmax(identify_logits[align_type], dim=-1)[:, 1].cpu().tolist()
alignment_weights[align_type] = alignment_weights[align_type].cpu().tolist()
alignment_labels = linker.link(identify_results=identify_logits, align_weights=alignment_weights)
return alignment_labels
def greedy_search_all_spider(
identify_logits: Dict[SQLTokenType, torch.Tensor],
alignment_weights: Dict[SQLTokenType, torch.Tensor],
question: Utterance,
schema: SpiderSchema,
values: List[ValueMatch],
threshold: float = 0.3
) -> List[List[Dict]]:
linker = SpiderGreedyLinker(schema=schema, question=question, matched_values=values, threshold=threshold)
return linker.search_all(identify_results=identify_logits, align_weights=alignment_weights)
def mask_value_alignments(align_weights: torch.Tensor, values: List[ValueMatch]) -> torch.Tensor:
for v_i in range(len(align_weights)):
start, end = values[v_i].start, values[v_i].end
mask = torch.zeros(align_weights.size(1), dtype=torch.bool)
mask[start:end+1] = 1
align_weights[v_i].masked_fill_(mask == 0, 0.0)
return align_weights
def generate_alignments_spider(
align_weights: Dict[SQLTokenType, torch.Tensor],
question: Utterance,
schema: SpiderSchema,
values: List[ValueMatch],
threshold: float=0.3,
) -> List[AlignmentLabel]:
assert len(align_weights[SQLTokenType.table]) == schema.num_tables
assert len(align_weights[SQLTokenType.column]) == schema.num_columns
assert len(align_weights[SQLTokenType.value]) == len(values)
align_weights[SQLTokenType.value] = mask_value_alignments(align_weights[SQLTokenType.value], values)
align_matrix = torch.cat([align_weights[SQLTokenType.table], align_weights[SQLTokenType.column], align_weights[SQLTokenType.value]], dim=0)
align_matrix = align_matrix.transpose(0, 1) # question_length * num_entities
assert len(align_matrix) == question.num_tokens
align_labels = []
for q_idx in range(question.num_tokens):
max_idx = torch.argmax(align_matrix[q_idx], dim=-1).item()
confidence = align_matrix[q_idx, max_idx]
if confidence < threshold:
align_label = AlignmentLabel(question.tokens[q_idx], SQLTokenType.null, None, 1 - confidence)
align_labels.append(align_label)
continue
if max_idx < schema.num_tables:
align_labels.append(AlignmentLabel(question.tokens[q_idx], SQLTokenType.table, schema.get_tbl_identifier_name(max_idx), confidence))
elif max_idx < schema.num_tables + schema.num_columns:
column_idx = max_idx - schema.num_tables
align_labels.append(AlignmentLabel(question.tokens[q_idx], SQLTokenType.column, schema.get_col_identifier_name(column_idx), confidence))
elif max_idx < schema.num_tables + schema.num_columns + len(values):
value_idx = max_idx - schema.num_tables - schema.num_columns
align_labels.append(AlignmentLabel(question.tokens[q_idx], SQLTokenType.value, 'VAL_{}'.format(values[value_idx].column), confidence))
else:
raise NotImplementedError()
return align_labels | ContextualSP/awakening_latent_grounding/utils/schema_linker.py/0 | {
"file_path": "ContextualSP/awakening_latent_grounding/utils/schema_linker.py",
"repo_id": "ContextualSP",
"token_count": 15121
} | 244 |
import torch
from torch import nn
class LstmRnn(nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.i_dim = input_dim
self.h_dim = hidden_dim
self.lstm = nn.LSTMCell(input_dim, hidden_dim)
self.h0 = nn.Parameter(torch.empty(size=(1, hidden_dim), dtype=torch.float32))
self.c0 = nn.Parameter(torch.empty(size=(1, hidden_dim), dtype=torch.float32))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.h0, val=0)
nn.init.constant_(self.c0, val=0)
nn.init.xavier_uniform_(self.lstm.weight_ih)
nn.init.orthogonal_(self.lstm.weight_hh)
nn.init.constant_(self.lstm.bias_ih, val=0)
nn.init.constant_(self.lstm.bias_hh, val=0)
def forward(self, x, mask, backward=False):
L = x.shape[1]
prev_h = self.h0.expand(x.shape[0], -1)
prev_c = self.c0.expand(x.shape[0], -1)
h = []
for idx in range(L):
idx = L - 1 - idx if backward else idx
mask_idx = mask[:, idx, None]
h_idx, c_idx = self.lstm(x[:, idx], (prev_h, prev_c))
prev_h = h_idx * mask_idx + prev_h * (1. - mask_idx)
prev_c = c_idx * mask_idx + prev_c * (1. - mask_idx)
h.append(prev_h)
return torch.stack(h[::-1] if backward else h, dim=1)
| ContextualSP/compositional_generalization/modules/LstmRnn.py/0 | {
"file_path": "ContextualSP/compositional_generalization/modules/LstmRnn.py",
"repo_id": "ContextualSP",
"token_count": 732
} | 245 |
{
"random_seed": 42,
"numpy_seed": 42,
"pytorch_seed": 42,
"dataset_reader": {
"type": "rewrite",
"lazy": false,
"super_mode": "before",
"joint_encoding": true,
"use_bert": true,
"language": "zh",
"extra_stop_words": ["的", "是", "我", "了", "去"]
},
"model": {
"type": "rewrite",
"word_embedder": {
"bert": {
"type": "bert-pretrained",
"pretrained_model": "bert-base-chinese",
"top_layer_only": true,
"requires_grad": true
},
"allow_unmatched_keys": true,
"embedder_to_indexer_map": {
"bert": [
"bert",
"bert-offsets",
"bert-type-ids"
]
}
},
"text_encoder": {
"type": "lstm",
"input_size": 768,
"hidden_size": 200,
"bidirectional": true,
"num_layers": 1
},
"inp_drop_rate": 0.2,
"out_drop_rate": 0.2,
"feature_sel": 83,
"loss_weights": [0.2, 0.2, 0.6],
"super_mode": "before",
"unet_down_channel": 64
},
"iterator": {
"type": "basic",
"batch_size": 12
},
"validation_iterator": {
"type": "basic",
"batch_size": 12
},
"trainer": {
"num_epochs": 100,
"cuda_device": 0,
"patience": 10,
"validation_metric": "+F3",
"optimizer": {
"type": "adam",
"parameter_groups": [
[
[
".*word_embedder.*"
],
{
"lr": 1e-5
}
]
],
"lr": 1e-3
},
"learning_rate_scheduler": {
"type": "reduce_on_plateau",
"factor": 0.5,
"mode": "max",
"patience": 5
},
"num_serialized_models_to_keep": 10,
"should_log_learning_rate": true
}
} | ContextualSP/incomplete_utterance_rewriting/configs/multi_bert.jsonnet/0 | {
"file_path": "ContextualSP/incomplete_utterance_rewriting/configs/multi_bert.jsonnet",
"repo_id": "ContextualSP",
"token_count": 801
} | 246 |
"""
Utility functions for reading the standardised text2sql datasets presented in
`"Improving Text to SQL Evaluation Methodology" <https://arxiv.org/abs/1806.09029>`_
"""
import json
import os
import sqlite3
from collections import defaultdict
from typing import List, Dict, Optional
from allennlp.common import JsonDict
class TableColumn:
"""
Representing the column of table
"""
def __init__(self,
name: str,
text: str,
column_type: str,
is_primary_key: bool,
refer_table,
foreign_key: Optional[List[str]]):
self.name = name
self.text = text
self.column_type = column_type
self.is_primary_key = is_primary_key
self.foreign_key = foreign_key
self.refer_table = refer_table
def __str__(self):
return f'{self.name}'
class Table:
"""
Representing the table
"""
def __init__(self,
name: str,
text: str,
columns: List[TableColumn]):
self.name = name
self.text = text
self.columns = columns
def read_dataset_schema(schema_path: str):
"""
Reading all table from `schema_path`.
:param schema_path: default from `tables.json` of sparc data folder.
:return:
"""
schemas: Dict[str, Dict[str, Table]] = defaultdict(dict)
schema_id_to_table: Dict[str, Dict[int, Table]] = defaultdict(dict)
schema_id_to_col: Dict[str, Dict[int, TableColumn]] = defaultdict(dict)
dbs_json_blob = json.load(open(schema_path, "r"))
for db in dbs_json_blob:
db_id = db['db_id']
column_id_to_table = {}
column_id_to_column = {}
for i, (column, text, column_type) in enumerate(zip(db['column_names_original'],
db['column_names'],
db['column_types'])):
table_id, column_name = column
_, column_text = text
table_name = db['table_names_original'][table_id]
if table_name not in schemas[db_id]:
table_text = db['table_names'][table_id]
table_obj = Table(table_name, table_text, [])
schemas[db_id][table_name] = table_obj
# TODO: we cannot add an extra command to handle * problem.
# we now use a special embedding for linking * and predicting action
# if column_name == '*':
# continue
table_obj = schemas[db_id][table_name]
if column_name == "*":
is_primary_key = False
else:
is_primary_key = i in db['primary_keys']
# allocate new column object
column_obj = TableColumn(column_name.lower(), column_text, column_type,
is_primary_key, table_obj, None)
schemas[db_id][table_name].columns.append(column_obj)
column_id_to_column[i] = column_obj
for (c1, c2) in db['foreign_keys']:
foreign_key = column_id_to_column[c2].refer_table.name + ':' + column_id_to_column[c2].name
# TODO: we able multiple foreign keys existing to allow the shortcut join
if column_id_to_column[c1].foreign_key is None:
column_id_to_column[c1].foreign_key = []
column_id_to_column[c1].foreign_key.append(foreign_key)
for i, table_name in enumerate(db['table_names_original']):
column_id_to_table[i] = schemas[db_id][table_name]
# assign id to column and id to table
schema_id_to_table[db_id] = column_id_to_table
schema_id_to_col[db_id] = column_id_to_column
return {**schemas}, {**schema_id_to_col}, {**schema_id_to_table}
def read_dataset_values(db_id: str, dataset_path: str, tables: List[str]):
db = os.path.join(dataset_path, db_id, db_id + ".sqlite")
try:
conn = sqlite3.connect(db)
except Exception as e:
raise Exception(f"Can't connect to SQL: {e} in path {db}")
conn.text_factory = str
cursor = conn.cursor()
values = {}
for table in tables:
try:
cursor.execute(f"SELECT * FROM {table.name} LIMIT 5000")
values[table] = cursor.fetchall()
except:
conn.text_factory = lambda x: str(x, 'latin1')
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM {table.name} LIMIT 5000")
values[table] = cursor.fetchall()
return values
def ent_key_to_name(key):
parts = key.split(':')
if parts[0] == 'table':
return parts[1]
elif parts[0] == 'column':
_, _, table_name, column_name = parts
return f'{table_name}@{column_name}'
else:
return parts[1]
def fix_number_value(ex: JsonDict):
"""
There is something weird in the dataset files - the `query_toks_no_value` field anonymizes all values,
which is good since the evaluator doesn't check for the values. But it also anonymizes numbers that
should not be anonymized: e.g. LIMIT 3 becomes LIMIT 'value', while the evaluator fails if it is not a number.
"""
def split_and_keep(s, sep):
if not s: return [''] # consistent with string.split()
# Find replacement character that is not used in string
# i.e. just use the highest available character plus one
# Note: This fails if ord(max(s)) = 0x10FFFF (ValueError)
p = chr(ord(max(s)) + 1)
return s.replace(sep, p + sep + p).split(p)
# input is tokenized in different ways... so first try to make splits equal
query_toks = ex['query_toks']
ex['query_toks'] = []
for q in query_toks:
ex['query_toks'] += split_and_keep(q, '.')
i_val, i_no_val = 0, 0
while i_val < len(ex['query_toks']) and i_no_val < len(ex['query_toks_no_value']):
if ex['query_toks_no_value'][i_no_val] != 'value':
i_val += 1
i_no_val += 1
continue
i_val_end = i_val
while i_val + 1 < len(ex['query_toks']) and \
i_no_val + 1 < len(ex['query_toks_no_value']) and \
ex['query_toks'][i_val_end + 1].lower() != ex['query_toks_no_value'][i_no_val + 1].lower():
i_val_end += 1
if i_val == i_val_end and ex['query_toks'][i_val] in ["1", "2", "3"] and ex['query_toks'][i_val - 1].lower() == "limit":
ex['query_toks_no_value'][i_no_val] = ex['query_toks'][i_val]
i_val = i_val_end
i_val += 1
i_no_val += 1
return ex
| ContextualSP/interactive_text_to_sql/src/context/utils.py/0 | {
"file_path": "ContextualSP/interactive_text_to_sql/src/context/utils.py",
"repo_id": "ContextualSP",
"token_count": 3155
} | 247 |
Code and dataset are under cleaning. Coming soon.
| ContextualSP/knowledge_intensive_text_to_sql/README.md/0 | {
"file_path": "ContextualSP/knowledge_intensive_text_to_sql/README.md",
"repo_id": "ContextualSP",
"token_count": 11
} | 248 |
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import defaultdict, Counter
import numpy as np
from numpy.testing import assert_approx_equal
def last_k(tokens, k):
"""Get the last k elements of a list as a tuple."""
if not (0 <= k <= len(tokens)):
raise ValueError('k must be between 0 and len(tokens) = {}, got: {}'.format(len(tokens), k))
return tuple(tokens[len(tokens) - k:])
def replace_parens(tokens):
"""Replace all instances of -LRB- and -RRB- with actual parentheses."""
parens_map = {'-LRB-': '(', '-RRB-': ')'}
return [parens_map.get(s, s) for s in tokens] # return identity if not parens symbols
def normalize_counts(counts):
"""Return a normalized Counter object."""
normed = Counter()
total = sum(list(counts.values()), 0.0)
assert total > 0 # cannot normalize empty Counter
for key, ct in counts.items():
normed[key] = ct / total
normed.old_total = total # document what the total was before normalization
return normed
class LM(object, metaclass=ABCMeta):
"""Language model interface."""
START = '<START>'
END = '<END>'
@abstractmethod
def next_distribution(self, history):
"""Return a distribution over the next token.
Args:
history (List): a list of tokens generated so far
Returns (Counter): a distribution
"""
raise NotImplementedError
@abstractproperty
def max_context_size(self):
"""Return max allowed history context.
Returns (int): maximum size of history context to keep
"""
raise NotImplementedError
class CountLM(LM):
"""Naive language model.
Uses empirical counts from the largest context it has observed. No sophisticated backoff strategy.
Examples:
lm = CountLM(4)
# 'train' the language model
for line in lines:
tokens = line.split()
lm.record_counts(tokens, append_end=True)
"""
def __init__(self, max_context_size):
"""Construct a language model.
Args:
max_context_size (int): maximum # tokens to use as context
"""
self._max_context_size = max_context_size
self.contexts = defaultdict(Counter)
@property
def max_context_size(self):
return self._max_context_size
def _get_contexts(self, tokens):
"""List of contexts, from smallest to largest. Includes empty context.
Returns:
List[Tuple[str]]
"""
contexts = []
max_context = min(self._max_context_size, len(tokens)) # cannot go beyond max tokens
for k in range(max_context + 1):
contexts.append(last_k(tokens, k))
return contexts
def record_counts(self, tokens, append_end):
"""Record counts using `tokens` as a corpus.
Args:
tokens (List[str]): list of strings
"""
history = [LM.START]
if append_end:
tokens = tokens + [LM.END]
for tok in tokens:
for context in self._get_contexts(history):
self.contexts[context][tok] += 1
history.append(tok) # update history
def _largest_context(self, history, contexts):
"""Find the largest context which matches history.
Args:
history (List[str]): a sequence of tokens
contexts (Set[Tuple[str]]): a set of contexts, must include the empty context
Returns:
Tuple[str]: an item from contexts, which may be the empty context
"""
assert tuple() in contexts # empty context must be present
for context in reversed(self._get_contexts(history)):
if context in contexts:
return context
def _largest_known_context(self, history):
"""Find the largest recorded context which matches history."""
return self._largest_context(history, self.contexts)
def next_distribution(self, history):
"""Given a history, return a distribution (Counter) over the next token."""
context = self._largest_known_context(history)
counts = self.contexts[context]
normed = normalize_counts(counts)
normed.context = context
return normed
def sequence_probability(self, tokens):
"""Return the probability of each token in an article, based on the language model.
Args:
tokens (List): a list of tokens in the article
Returns:
List[Tuple[str, float]]: an ordered list of token-probability pairs"""
history = [LM.START]
probabilities = []
for word in tokens:
distr = self.next_distribution(history)
if word in distr:
probabilities.append((word, distr[word]))
else:
probabilities.append((word, 0.0))
history.append(word)
return probabilities
class KNNLM(LM):
def __init__(self, article_embeddings, max_context_size, k_nearest):
"""Construct k-nearest-neighbor language model.
Args:
article_embeddings (ArticleEmbeddings): embeddings of each article
max_context_size (int): max history to consider for CountLM
k_nearest (int): # neighbors to consider
"""
self.article_embeddings = article_embeddings
self.k = k_nearest
self.lm = CountLM(max_context_size)
def record_nearest_counts(self, vec):
name_score_pairs = self.article_embeddings.k_nearest_approx(vec, self.k)
articles = [self.article_embeddings.name_to_article(name) for name, score in name_score_pairs]
for art in articles:
self.lm.record_counts(art.tokens, append_end=True)
def next_distribution(self, history):
return self.lm.next_distribution(history)
@property
def max_context_size(self):
return self.lm.max_context_size
def sequence_probability(self, tokens):
return self.lm.sequence_probability(tokens)
class Generator(object, metaclass=ABCMeta):
"""Interface for language generator."""
@abstractmethod
def init_history(self):
"""Return a sequence of tokens to initialize the history."""
pass
@abstractmethod
def get_next(self, history):
"""Get next token, given history."""
pass
@abstractmethod
def stop_or_not(self, history):
"""Given what has been generated, decide whether to stop."""
pass
@abstractproperty
def max_context_size(self):
"""Return max allowed history context.
Returns (int): maximum size of history context to keep
"""
raise NotImplementedError
def truncate_history(self, history):
"""Truncate history when it grows much longer than max context size."""
if len(history) > 2 * self.max_context_size:
return list(last_k(history, self.max_context_size))
return history
def generate(self, history=None):
"""Generate a sequence of tokens."""
if not history:
history = self.init_history()
return self.generate_custom(history, self.get_next, self.stop_or_not)
def generate_custom(self, history, next_fxn, stop_fxn):
"""Generate a sequence using a custom next-token function and a custom stopping function.
Args:
history (List[T]): initial history
next_fxn (Callable[[List[T]], T]): given a history, produce the next token
stop_fxn (Callable[[List[T]], bool]): given a history, decide whether to stop
"""
generated = []
history = list(history) # make a copy
while True:
next = next_fxn(history)
history.append(next)
history = self.truncate_history(history)
if stop_fxn(history):
break
generated.append(next)
return generated
class LMSampler(Generator):
"""Generation by sampling from a language model."""
def __init__(self, lm):
"""Construct a LM sampler.
Args:
lm (LM): a language model
"""
self.lm = lm
@property
def max_context_size(self):
return self.lm.max_context_size
def _sample_from_distribution(self, distr):
"""Sample from a categorical distribution.
Args:
distr (Counter): values must sum to 1
Returns:
one of the keys of distr
"""
keys, probs = list(zip(*list(distr.items())))
assert_approx_equal(sum(probs), 1.)
return np.random.choice(keys, p=probs)
def init_history(self):
return [self.lm.START]
def get_next(self, history):
return self._sample_from_distribution(self.lm.next_distribution(history))
def stop_or_not(self, history):
return history[-1] == LM.END
@staticmethod
def format_generation(tokens):
return ' '.join(replace_parens(tokens))
class DistributionStats(object):
def __init__(self, distr):
self.total = distr.old_total
self.context = distr.context
probs = list(distr.values())
assert_approx_equal(sum(probs), 1.)
self.entropy = -1. * sum([p * np.log(p) for p in probs])
def __repr__(self):
return '{}:{}:{}'.format(len(self.context), self.total, self.entropy)
class LMSamplerWithStats(LMSampler):
def init_history(self):
return [(LM.START, 0)]
def get_next(self, history):
token_history, _ = list(zip(*history))
distr = self.lm.next_distribution(token_history)
next_token = self._sample_from_distribution(distr)
return next_token, DistributionStats(distr)
def stop_or_not(self, history):
word = lambda pair: pair[0]
return word(history[-1]) == LM.END
@staticmethod
def format_generation(token_stat_pairs):
tokens, stats = list(zip(*list(token_stat_pairs)))
tokens = replace_parens(tokens)
tokens = ['{:20}[{}]'.format(tok, stat) for tok, stat in zip(tokens, stats)]
return '\n'.join(tokens)
| ContextualSP/lemon/executor/gtd/lm.py/0 | {
"file_path": "ContextualSP/lemon/executor/gtd/lm.py",
"repo_id": "ContextualSP",
"token_count": 4219
} | 249 |
from abc import abstractmethod
from collections import Sequence, Mapping
import numpy as np
import pytest
import tensorflow as tf
from keras.engine import Input
from keras.layers import Dense
from numpy.testing import assert_array_almost_equal
from gtd.ml.framework import Feedable, KerasModel
from gtd.ml.utils import guarantee_initialized_variables, clean_session
from gtd.utils import Bunch
@pytest.yield_fixture
def clean_test_session():
with clean_session() as sess:
yield sess
def assert_array_collections_equal(correct, test, decimal=7):
"""Assert that two collections of numpy arrays have the same values.
Collections can be either a Sequence or a Mapping.
"""
if type(correct) != type(test):
raise ValueError('correct ({}) and test ({}) must have the same type.'.format(type(correct), type(test)))
assert_equal = lambda c, t: assert_array_almost_equal(c, t, decimal=decimal)
if isinstance(correct, Sequence):
assert len(correct) == len(test)
for c, t in zip(correct, test):
assert_equal(c, t)
elif isinstance(correct, Mapping):
# same keys
assert set(test.keys()) == set(correct.keys())
# same values
for key in test:
assert_equal(correct[key], test[key])
else:
raise TypeError('Inputs must be of type Mapping or Sequence, not {}.'.format(type(correct)))
class FeedableTester(object):
"""A template for testing Feedable classes.
Subclass this class and implement all of its abstractmethods.
NOTE:
You must decorate the implementation of each abstractmethod with a @pytest.fixture decorator.
See the `TestFeedable` class below for an example.
"""
@abstractmethod
def model(self):
"""The Model to be tested."""
pass
@abstractmethod
def inputs(self):
"""Inputs to the model.
Returns:
(list, dict): an args, kwargs pair
"""
pass
@classmethod
def as_args_kwargs(cls, *args, **kwargs):
return args, kwargs
@abstractmethod
def feed_dict(self):
"""Return the correct result of the model's `feed_dict` method."""
pass
@abstractmethod
def output_tensors(self):
"""Output tensors to be fetched.
Returns:
list[np.array]
"""
pass
@abstractmethod
def outputs(self):
"""Return the correct results of running model.compute(fetch=output_tensors, ...)
Returns:
list[np.array]
"""
pass
@pytest.mark.usefixtures('clean_test_session')
def test_inputs_to_feed_dict(self, model, inputs, feed_dict):
"""Test for correct feed_dict."""
args, kwargs = inputs
test_feed_dict = model.inputs_to_feed_dict(*args, **kwargs)
assert_array_collections_equal(feed_dict, test_feed_dict)
@pytest.mark.usefixtures('clean_test_session')
def test_outputs(self, model, inputs, output_tensors, outputs):
"""Test for correct output."""
sess = tf.get_default_session()
guarantee_initialized_variables(sess)
args, kwargs = inputs
test_outputs = model.compute(output_tensors, *args, **kwargs)
assert_array_collections_equal(outputs, test_outputs, decimal=4)
class KerasModelTester(FeedableTester):
@pytest.fixture
def output_tensors(self, model):
return model.output_tensors
@pytest.mark.usefixtures('clean_test_session')
def test_placeholders(self, model, feed_dict):
"""Test that model.placeholders matches the keys of feed_dict."""
assert set(model.placeholders) == set(feed_dict.keys())
class FeedableExample(Feedable):
def __init__(self):
x = tf.placeholder(tf.float32, shape=[], name='x')
y = tf.get_variable('y', shape=[], initializer=tf.constant_initializer(2.0))
z = x * y
self.x = x
self.y = y
self.z = z
def inputs_to_feed_dict(self, batch):
return {self.x: batch.x}
class TestFeedableExample(FeedableTester):
@pytest.fixture
def model(self):
return FeedableExample()
@pytest.fixture
def inputs(self):
return self.as_args_kwargs(Bunch(x=5.0))
@pytest.fixture
def feed_dict(self, model):
return {model.x: 5.0}
@pytest.fixture
def output_tensors(self, model):
return [model.z]
@pytest.fixture
def outputs(self):
return [10.0]
class KerasLayersModelExample(KerasModel):
"""A Model that is defined using Keras layers from beginning to end."""
def __init__(self):
x = Input([1])
y = np.array([[2.0]])
b = np.array([0.0])
mult = Dense(1, weights=(y, b))
z = mult(x)
self.x = x
self.mult = mult
self.z = z
@property
def placeholders(self):
return [self.x]
def inputs_to_feed_dict(self, batch):
return {self.x: np.array([[batch.x]])}
@property
def output_tensors(self):
return [self.z]
class TestKerasLayersModel(KerasModelTester):
@pytest.fixture
def model(self):
return KerasLayersModelExample()
@pytest.fixture
def inputs(self):
return self.as_args_kwargs(Bunch(x=5.0))
@pytest.fixture
def feed_dict(self, model):
return {model.x: 5.0}
@pytest.fixture
def outputs(self):
return [10.0] | ContextualSP/lemon/executor/gtd/tests/ml/test_framework.py/0 | {
"file_path": "ContextualSP/lemon/executor/gtd/tests/ml/test_framework.py",
"repo_id": "ContextualSP",
"token_count": 2267
} | 250 |
from collections import namedtuple
import numpy as np
from gtd.utils import flatten
from strongsup.case_weighter import get_case_weighter
from strongsup.value_function import get_value_function, ValueFunctionExample
class NormalizationOptions(object):
"""Constants for normalization options"""
LOCAL = 'local'
GLOBAL = 'global'
# used by the Decoder to compute gradients
WeightedCase = namedtuple('WeightedCase', ['case', 'weight'])
class Decoder(object):
"""A decoder does two things:
- Given a batch of examples, produce a Beam (list of ParsePaths) for each example.
Internally it uses an ExplorationPolicy to produce beams, and a ParseModel
to score the ParseCases.
- Given a batch of Beams, update the model parameters by passing appropriate
ParseCases to the TrainParseModel.
"""
def __init__(self, parse_model, config, domain):
"""Create a new decoder.
Args:
parse_model (TrainParseModel)
config (Config): The decoder section of the config
domain (Domain)
"""
self._parse_model = parse_model
self._value_function = get_value_function(
config.value_function, parse_model.parse_model)
self._case_weighter = get_case_weighter(
config.case_weighter, parse_model.parse_model,
self._value_function)
self._config = config
self._caching = config.inputs_caching
self._domain = domain
self._path_checker = domain.path_checker
# Normalization and update policy
self._normalization = config.normalization
if config.normalization == NormalizationOptions.GLOBAL:
raise ValueError('Global normalization is no longer supported.')
# Exploration policy
# TODO: Resolve this circular import differently
from strongsup.exploration_policy import get_exploration_policy
self._test_exploration_policy = get_exploration_policy(
self, config.test_exploration_policy,
self._normalization, train=False)
self._train_exploration_policy = get_exploration_policy(
self, config.train_exploration_policy,
self._normalization, train=True)
@property
def parse_model(self):
return self._parse_model
@property
def caching(self):
return self._caching
@property
def domain(self):
return self._domain
def exploration_policy(self, train):
"""Returns the train or test exploration policy depending on
train
Args:
train (bool)
Returns:
ExplorationPolicy
"""
if train:
return self._train_exploration_policy
else:
return self._test_exploration_policy
def path_checker(self, path):
"""Return False if the ParsePath should be pruned away; True otherwise.
Args:
path (ParsePath)
Returns:
bool
"""
return self._path_checker(path)
def get_probs(self, beam):
"""Return a numpy array containing the probabilities of the paths
in the given beam.
The entries may not sum to 1 for local normalization since we have
pruned away choices that are not executable.
Args:
beam (Beam)
Returns:
np.array of length len(beam) containing the probabilities.
"""
if len(beam) == 0:
return np.zeros(0)
if self._normalization == NormalizationOptions.LOCAL:
return np.exp(np.array([path.log_prob for path in beam]))
else:
stuff = np.array([path.score for path in beam])
stuff = np.array(stuff - np.min(stuff))
return stuff / np.sum(stuff)
################################
# Prediction
def predictions(self, examples, train, verbose=False):
"""Return the final beams for a batch of contexts.
Args:
contexts (list[Context]): a batch of Contexts
verbose (bool)
train (bool): If you're training or evaluating
Returns:
list[Beam]: a batch of Beams
"""
exploration_policy = self.exploration_policy(train)
beams = exploration_policy.get_beams(examples, verbose)
return [beam.get_terminated() for beam in beams]
def get_intermediate_beams(self, examples, train, verbose=False):
exploration_policy = self.exploration_policy(train)
return exploration_policy.get_intermediate_beams(examples, verbose)
def score_breakdown(self, paths):
"""Return the logits for all (parse case, choice, scorer) tuples.
Args:
paths (list[ParsePath])
Returns:
grouped_attentions:
a list of length(paths). Each entry is an np.array of shape
(>= len(utterance)) containing the attention scores
grouped_subscores:
a list of length len(paths). Each entry is an np.array of shape
(>= number of cases, len(choices), number of scorers)
containing the logits of each scorer on each choice.
By default there are 3 scorers: basic, attention, and soft copy.
"""
if len(paths) == 0:
return [], []
cumul = [0] # Used to group the results back
cases = []
for path in paths:
for case in path:
cases.append(case)
cumul.append(len(cases))
# Get the scores from the model
attentions, subscores = self._parse_model.score_breakdown(cases, ignore_previous_utterances=False,
caching=False)
# Group the scores by paths
grouped_attentions, grouped_subscores = [], []
for i in range(len(paths)):
grouped_attentions.append(attentions[cumul[i]:cumul[i+1]])
grouped_subscores.append(subscores[cumul[i]:cumul[i+1]])
return grouped_attentions, grouped_subscores
################################
# Training
def train_step(self, examples):
# sample a beam of logical forms for each example
beams = self.predictions(examples, train=True)
all_cases = [] # a list of ParseCases to give to ParseModel
all_case_weights = [] # the weights associated with the cases
for example, paths in zip(examples, beams):
case_weights = self._case_weighter(paths, example)
case_weights = flatten(case_weights)
cases = flatten(paths)
assert len(case_weights) == sum(len(p) for p in paths)
all_cases.extend(cases)
all_case_weights.extend(case_weights)
# for efficiency, prune cases with weight 0
cases_to_reinforce = []
weights_to_reinforce = []
for case, weight in zip(all_cases, all_case_weights):
if weight != 0:
cases_to_reinforce.append(case)
weights_to_reinforce.append(weight)
# update value function
vf_examples = []
for example, paths in zip(examples, beams):
vf_examples.extend(ValueFunctionExample.examples_from_paths(paths, example))
self._value_function.train_step(vf_examples)
# update parse model
self._parse_model.train_step(
cases_to_reinforce, weights_to_reinforce, caching=False)
@property
def step(self):
return self._parse_model.step
| ContextualSP/lemon/executor/strongsup/decoder.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/decoder.py",
"repo_id": "ContextualSP",
"token_count": 3240
} | 251 |
class EntrySelector(object):
"""Given a list of Entries, returns single Entry based on some
criteria.
Args:
entries (list[Entry]): the entries
"""
def __init__(self, entries):
self._entries = entries
@property
def best_any_seed(self):
"""Returns the Entry with the best ResultValue over any seed."""
if len(self._entries) == 0:
return None
return max(self._entries, key=lambda entry: entry.best[1])
@property
def best_avg(self):
"""Returns the Entry with the best ResultValue averaged over
all seeds."""
if len(self._entries) == 0:
return None
return max(entries, key=lambda entry: entry.avg)
| ContextualSP/lemon/executor/strongsup/results/entry_selector.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/results/entry_selector.py",
"repo_id": "ContextualSP",
"token_count": 295
} | 252 |
import itertools
import time
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import deque
import logging
from strongsup.parse_case import ParseCase
from strongsup.value import check_denotation
class StaticCase(object, metaclass=ABCMeta):
"""Like a ParseCase, but only statically analyzed, never dynamically executed.
Primarily used by StaticBatchExploration.
"""
@abstractmethod
def seeds(cls):
"""Return a list of seed cases to start searching from."""
pass
@abstractmethod
def extend(self, predicate):
"""Return a new StaticCase which extends from this one."""
pass
@abstractproperty
def choices(self):
"""Choices available from this state."""
pass
@abstractproperty
def length(self):
"""Length of episode so far."""
pass
@abstractproperty
def utterances_read(self):
"""Number of utterances processed so far in this episode."""
pass
@abstractproperty
def stack_depth(self):
"""Depth of execution stack."""
pass
@abstractproperty
def path(self):
"""Return a list of StaticCases."""
pass
class AlchemyCase(object):
__slots__ = ['predicate', 'prev_case', 'length', 'utterances_read', 'execution_stack', 'command_history']
choices = [
'r', 'y', 'g', 'o', 'p', 'b',
'1', '2', '3', '4', '5', '6', '7',
'-1',
'X1/1',
'PColor',
'APour', 'AMix', 'ADrain',
'all-objects', 'index',
'H0', 'H1', 'H2',
]
def __init__(self, predicate, prev_case, length, utterances_read, execution_stack, command_history):
self.predicate = predicate
self.prev_case = prev_case
self.length = length
self.utterances_read = utterances_read
self.execution_stack = execution_stack
self.command_history = command_history
@classmethod
def seeds(cls):
seeds = []
for p in cls.choices:
state = cls._update_state([], [], p)
if state is None:
continue
exec_stack, cmd_history = state
case = AlchemyCase(p, None, 1, 0, exec_stack, cmd_history)
seeds.append(case)
return seeds
def extend(self, predicate):
state = self._update_state(self.execution_stack, self.command_history, predicate)
if state is None:
return None # predicate leads to invalid state
exec_stack, cmd_history = state
utterances_read = self.utterances_read
if predicate[0] == 'A' or predicate == 'H0':
utterances_read += 1
return AlchemyCase(predicate, self, self.length + 1, utterances_read, exec_stack, cmd_history)
@property
def stack_depth(self):
return len(self.execution_stack)
@property
def path(self):
path = []
current = self
while True:
path.append(current)
current = current.prev_case
if current is None:
break
path.reverse()
return path
@classmethod
def _get_args_from_stack(cls, exec_stack, predicate):
if predicate in ('APour', 'ADrain', 'index'):
n = 2
elif predicate in ('AMix', 'PColor') or predicate[0] == 'H':
n = 1
else:
return None
if len(exec_stack) < n: # not enough arguments
return None
return exec_stack[-n:]
def __repr__(self):
return self.predicate
@classmethod
def _update_state(cls, exec_stack, command_history, predicate):
"""
We assume action clears stack.
Args:
exec_stack
command_history
predicate
Returns:
new_exec_stack, new_command_history
"""
# TYPES
COLOR = 'CLR'
BEAKER = 'BKR'
LIST = 'LST'
is_number = lambda s: s in ('1', '2', '3', '4', '5', '6', '7', '-1')
# SIMPLE VALUES
if predicate in ('r', 'y', 'g', 'o', 'p', 'b'):
# abstract to COLOR
return exec_stack + [COLOR], list(command_history)
if is_number(predicate):
# preserve numbers exactly
return exec_stack + [predicate], list(command_history)
if predicate == 'all-objects':
# abstract to LIST
return exec_stack + [LIST], list(command_history)
# FUNCTIONS
args = cls._get_args_from_stack(exec_stack, predicate)
if args is None:
return None # not enough arguments
logging.debug('Args peeked: {}'.format(args))
prefix = predicate[0]
# actions
if prefix == 'A':
logging.debug('Processing action')
logging.debug(exec_stack)
if len(args) != len(exec_stack): # action must clear stack
return None
# type check
if predicate == 'APour':
if args != [BEAKER, BEAKER]:
return None
if predicate == 'ADrain':
if args[0] != BEAKER or not is_number(args[1]):
return None
if predicate == 'AMix':
if args != [BEAKER]:
return None
new_stack = []
new_command_history = list(command_history)
new_command_history.append([predicate] + args)
return new_stack, new_command_history
if predicate == 'PColor':
if args[0] != COLOR:
return None
new_stack = exec_stack[:-1]
new_stack.append(LIST)
return new_stack, list(command_history)
if predicate == 'index':
if args[0] != LIST or not is_number(args[1]):
return None
new_stack = exec_stack[:-2]
new_stack.append(BEAKER)
return new_stack, list(command_history)
# history referencing predicates
if prefix == 'H':
arg_pos = int(predicate[1:])
history_idx_str = args[0]
if not is_number(history_idx_str):
return None
if history_idx_str in ('X1/1', '-1'):
return None
history_idx = int(history_idx_str) - 1
try:
referenced = command_history[history_idx][arg_pos]
except IndexError:
return None # failed to retrieve
return cls._update_state(exec_stack, command_history, referenced)
raise ValueError('Invalid predicate: {}'.format(predicate))
class StaticBatchExploration(object):
def __init__(self, examples, case_type, max_length, max_utterances, max_stack_depth):
"""Perform BFS to find silver logical forms.
Args:
examples (list[Example])
case_type: subclass of Case
max_length (int): max # predicates in a logical form
max_utterances (int): max # utterances processed by a logical form
max_stack_depth (int): max depth of execution stack
"""
# metrics for reporting
start_time = time.time()
visited = 0
longest_so_far = 0
max_queue_size = 0
queue = deque(case_type.seeds()) # seed the queue
complete = []
while len(queue) != 0:
case = queue.popleft()
# update metrics
visited += 1
max_queue_size = max(max_queue_size, len(queue))
if case.length > longest_so_far:
now = time.time()
print('reached length {} after visiting {} states ({} s)'.format(case.length, visited, now - start_time))
longest_so_far = max(longest_so_far, case.length)
if visited % 100000 == 0:
print('visited: {}, completed: {}, peak queue size: {}'.format(visited, len(complete), max_queue_size))
# prune
if case.stack_depth > max_stack_depth:
continue
has_terminated = case.utterances_read >= max_utterances
if has_terminated:
complete.append(case.path)
continue
if case.length >= max_length:
continue
# extend
for choice in case.choices:
new_case = case.extend(choice)
if new_case is None:
continue
queue.append(new_case)
self.complete = complete
self.complete = complete
# Here just for comparison with StaticBatchExploration
# Performs a typical search which uses dynamic execution for pruning.
def simple_bfs(example, path_checker, max_depth):
root = ParseCase.initial(example.context)
queue = deque([root])
terminated = []
start_time = time.time()
depth = 0
max_queue_size = 0
for i in itertools.count():
if len(queue) == 0:
break
max_queue_size = max(max_queue_size, len(queue))
case = queue.popleft()
zeros = [0.] * len(case.choices)
case.choice_logits = zeros # not used
case.choice_log_probs = zeros # not used
for choice in case.choices:
clone = case.copy_with_decision(choice) # set the decision
# don't extend cases with invalid denotation
denotation = clone.denotation
if isinstance(denotation, Exception):
continue
path = clone.path
if len(path) != depth:
depth = len(path)
now = time.time()
print('reached depth {} after visiting {} states ({}s)'.format(depth, i + 1, now - start_time))
print('peak queue size: {}'.format(max_queue_size))
if path.terminated: # terminates when all the utterances have been processed
terminated.append(path)
continue
if len(path) >= max_depth:
continue
# Path is not complete. Apply pruning to see if we should continue.
if not path_checker(path):
continue
# Decide to extend this path.
new_case = path.extend()
queue.append(new_case)
silver_lfs = []
for path in terminated:
try:
if check_denotation(example.answer, path.finalized_denotation):
silver_lfs.append(path)
except Exception:
pass
return silver_lfs | ContextualSP/lemon/executor/strongsup/static_exploration.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/static_exploration.py",
"repo_id": "ContextualSP",
"token_count": 4914
} | 253 |
# import pytest
import sys
sys.path.append('../../../')
from strongsup.example import Example, Context
from strongsup.rlong.exploration_policy import AlchemyOraclePathFinder
from strongsup.rlong.state import RLongAlchemyState
from strongsup.rlong.world import RLongAlchemyWorld
from strongsup.rlong.value import RLongStateValue
class TestAlchemyExplorationPolicy(object):
def test_exploration(self):
initial_state = RLongAlchemyState.from_raw_string(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg')
final_state = RLongAlchemyState.from_raw_string(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:_')
num_steps = 2
world = RLongAlchemyWorld(initial_state)
context = Context(world, [[""], [""]])
ex = Example(context, answer=[RLongStateValue(final_state)])
print()
print(' INIT:', initial_state)
print('FINAL:', final_state)
print('STEPS:', num_steps)
path_finder = AlchemyOraclePathFinder(ex, debug=True)
found = set()
for path in path_finder.all_actual_paths:
finalized = ex.context.executor.finalize(path.denotation)
assert finalized[0].state == final_state
found.add(' '.join(str(x) for x in path.decisions))
assert 'all-objects -1 index 2 ADrain -1 H1 -1 H2 -1 H0' in found
assert 'all-objects -1 index all-objects 2 index APour 1 H2 X1/1 ADrain' in found
if __name__ == '__main__':
tester = TestAlchemyExplorationPolicy()
tester.test_exploration() | ContextualSP/lemon/executor/strongsup/tests/rlong/test_exploration_policy.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/tests/rlong/test_exploration_policy.py",
"repo_id": "ContextualSP",
"token_count": 634
} | 254 |
import abc
import sys
from collections import namedtuple
import numpy as np
import tensorflow as tf
from gtd.ml.framework import Feedable, Model
from keras.layers import Dense
from strongsup.utils import OptimizerOptions, get_optimizer
from strongsup.value import check_denotation
class ValueFunctionExample(namedtuple('ValueFunctionExample', ['case', 'reward'])):
"""Represents a single training example for StateValueFunction.train_step.
Attributes:
case (ParseCase)
reward (float): typically 0 or 1
"""
__slots__ = ()
@classmethod
def examples_from_paths(cls, paths, example):
"""Return a list of ValueFunctionExamples derived from ParsePaths discovered during exploration.
Args:
paths (list[ParsePath])
example (strongsup.example.Example)
Returns:
list[ValueFunctionExample]
"""
vf_examples = []
for path in paths:
reward = 1 if check_denotation(example.answer, path.finalized_denotation) else 0
vf_examples.extend(ValueFunctionExample(case, reward) for case in path)
return vf_examples
class StateValueFunction(Model, metaclass=abc.ABCMeta):
"""Defines a value function that associates a value V to each state s as in RL"""
@abc.abstractmethod
def values(self, cases):
"""Returns the values for the states corresponding to a list of cases
in the same order.
Args:
cases (list[ParseCase]): the cases
Returns:
values (list[float]): the values in same order as cases
"""
raise NotImplementedError
@abc.abstractmethod
def loss(self, vf_examples):
"""Compute the loss for which we are performing gradient descent upon.
Args:
vf_examples (list[ValueFunctionExample])
Returns:
float
"""
raise NotImplementedError
@abc.abstractmethod
def train_step(self, vf_examples):
"""Takes a train step based on training examples
Args:
vf_examples (list[ValueFunctionExample])
"""
raise NotImplementedError
class ConstantValueFunction(StateValueFunction):
"""Gives every state the same value"""
def __init__(self, constant_value):
self._constant_value = constant_value
def values(self, cases):
return [self._constant_value] * len(cases)
@property
def constant_value(self):
return self._constant_value
def loss(self, vf_examples):
"""Loss in terms of mean squared error."""
if len(vf_examples) == 0:
return 0.0
c = self._constant_value
diffs = [(c - ex.reward) for ex in vf_examples]
return np.mean(np.power(diffs, 2))
def train_step(self, vf_examples):
"""Is a no-op"""
return
class LogisticValueFunction(StateValueFunction, Feedable):
def __init__(self, parse_model, learning_rate, optimizer_opt):
"""
Args:
parse_model (ParseModel)
learning_rate (float)
optimizer_opt (OptimizerOptions)
"""
with tf.name_scope("LogisticValueFunction"):
self._rewards = tf.placeholder(
tf.float32, shape=[None], name="rewards")
# Prevent gradient from updating the stuff that makes up encoding
encodings = tf.stop_gradient(parse_model.case_encodings)
self._values = tf.squeeze(
Dense(1, activation="sigmoid", bias=True)(encodings),
axis=[1])
loss = tf.reduce_mean(tf.contrib.losses.log_loss(
self._values, labels=self._rewards))
optimizer = get_optimizer(optimizer_opt)(learning_rate)
self._take_step = optimizer.minimize(loss)
self._parse_model = parse_model
# Hold it around for testing purposes
self._loss = loss
@classmethod
def _unpack_vf_examples(cls, vf_examples):
cases = [ex.case for ex in vf_examples]
rewards = [ex.reward for ex in vf_examples]
return cases, rewards
def values(self, cases, ignore_previous_utterances=False):
if len(cases) == 0:
# Should only happen if everything gets pruned off beam.
return []
fetch = {"values": self._values}
fetched = self.compute(
fetch, cases, rewards=None,
ignore_previous_utterances=ignore_previous_utterances)
return fetched["values"]
def loss(self, vf_examples):
if len(vf_examples) == 0:
return 0.0
cases, rewards = self._unpack_vf_examples(vf_examples)
return self.compute(self._loss, cases, rewards, ignore_previous_utterances=False)
def train_step(self, vf_examples):
# Make sure all rewards are between [0, 1] for log_loss
for ex in vf_examples:
assert 0 <= ex.reward <= 1
if len(vf_examples) == 0:
print(" WARNING: (ValueFunction) Zero cases \033[F", file=sys.stderr)
else:
print(" Updating (ValueFunction) ({} cases) \033[F".format(
len(vf_examples)), file=sys.stderr)
cases, rewards = self._unpack_vf_examples(vf_examples)
# Always acknowledge previous utterances on train steps
self.compute(
self._take_step, cases, rewards,
ignore_previous_utterances=False)
def inputs_to_feed_dict(self, cases, rewards=None,
ignore_previous_utterances=False):
feed = {}
if rewards:
feed[self._rewards] = rewards
if len(cases) == 0:
raise ValueError("No cases")
feed.update(self._parse_model.inputs_to_feed_dict(
cases, ignore_previous_utterances, caching=False))
return feed
def get_value_function(config, parse_model):
"""Needs to take the Config for ValueFunction"""
if config.type == "constant":
return ConstantValueFunction(config.constant_value)
elif config.type == "logistic":
return LogisticValueFunction(
parse_model, config.learning_rate,
OptimizerOptions(config.optimizer))
else:
raise ValueError(
"ValueFunction {} not supported.".format(config.type))
| ContextualSP/lemon/executor/strongsup/value_function.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/value_function.py",
"repo_id": "ContextualSP",
"token_count": 2757
} | 255 |
The file [dummy-predictions.csv](dummy-predictions.csv) is a valid example prediction file that can be submitted to the [ARC Challenge Leaderboard](https://leaderboard.allenai.org/).
This is a prediction that every question's correct answer is the first choice (either `A` or `1`), and scores about 23% correct.
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/data-challenge/README.md/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/data-challenge/README.md",
"repo_id": "ContextualSP",
"token_count": 83
} | 256 |
import os
import evaluator
import unittest
import tempfile
import typing
class TestAccuracy(unittest.TestCase):
def test_EverythingCorrect(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["A"]}
self.assertEqual(3.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_EverythingWrong(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_MixedResults(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"]}
self.assertEqual(2.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_PartialGuess(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A", "B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.5 / 3, evaluator.calculate_accuracy(qa, p))
def test_ExtraPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"], "QExtra": ["X"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_EXTRA)
def test_MissingPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTION_MISSING)
def temp_file_with_contents(lines: typing.List[str]) -> str:
t = tempfile.NamedTemporaryFile(mode='wt', delete=False)
t.writelines(lines)
t.close()
return t.name
class TestReadAnswers(unittest.TestCase):
def test_ReadAnswers(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q2", "answerKey": "B"}\n',
'{"id": "Q3", "answerKey": "C"}\n',
])
answers = evaluator.read_answers(t)
os.remove(t)
self.assertEqual(answers, {"Q1": "A", "Q2": "B", "Q3": "C"})
def test_ReadAnswersEmpty(self):
t = temp_file_with_contents([])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersCorrupted(self):
t = temp_file_with_contents(['this is not json'])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersRepeated(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q1", "answerKey": "B"}\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
class TestReadPredictions(unittest.TestCase):
def test_ReadPredictions(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2",A;B\n',
'Q3,"A;B;C"\n',
])
predictions = evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(predictions, {
"Q1": ["A"],
"Q2": ["A", "B"],
"Q3": ["A", "B", "C"],
})
def test_ReadPredictionsMissingColumn(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2"\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsRepeated(self):
t = temp_file_with_contents([
'Q1,A\n',
'Q1,A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyKey(self):
t = temp_file_with_contents([
',A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyLabels(self):
t = temp_file_with_contents([
'Q1,A;\n',
])
with self.assertRaises(SystemExit) as context:
p = evaluator.read_predictions(t)
print(p)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
if __name__ == '__main__':
unittest.main()
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/test_evaluator.py/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/test_evaluator.py",
"repo_id": "ContextualSP",
"token_count": 2567
} | 257 |
The mapping of chain ids to correct labels for the dev and test splits are in
these files:
* dev: chainid_to_label_dev.json
* test: chainid_to_label_test.json
## Dummy predictions
As a convenienece for testing the evaluator, two "dummy" prediction files
are provided which give a score of 0.5 to all chains for both splits:
* dev: dummy_predictions_dev.jsonl
* test: dummy_predictions_test.jsonl
These prediction files were created like this:
* dev: `cat chainid_to_label_dev.json | jq -c '. | keys[] | {"chain_id":., "score":0.5}' > dummy_predictions_dev.jsonl`
* test: `cat chainid_to_label_test.json | jq -c '. | keys[] | {"chain_id":., "score":0.5}' > dummy_predictions_test.jsonl`
You can use these as inputs to the predictor, to confirm that the evaluator is working as expected.
The scores you should expect from these dummy predictions are:
* dev: `{"auc_roc": 0.5, "explainP1": 0.23612622415669204, "explainNDCG": 0.4791226010631029}`
* test: `{"auc_roc": 0.5, "explainP1": 0.2174863387978142, "explainNDCG": 0.48617247810718606}`
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/data/README.md/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/data/README.md",
"repo_id": "ContextualSP",
"token_count": 369
} | 258 |
**/__pycache__
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/.dockerignore/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/.dockerignore",
"repo_id": "ContextualSP",
"token_count": 7
} | 259 |
from typing import List, NamedTuple, Dict
from process.constants import NO_LOCATION, CREATE, DESTROY, MOVE
class Input(NamedTuple):
participants: str
class Output(NamedTuple):
participants: str
class Conversion(NamedTuple):
created: str
destroyed: str
locations: str
step_id: str
class Move(NamedTuple):
participants: str
location_before: str
location_after: str
step_id: str
class Process(NamedTuple):
process_id: int
locations: Dict
actions: Dict
num_steps: int
# Q1: What are the inputs?
# - If a participant exists in state1, but does not exist in the end stateN, it's an input.
def inputs(self) -> List[Input]:
inputs = [] # type: List[Input]
for participant in self.locations.keys():
actions = self.actions[participant]
if _is_this_action_seq_of_an_input(actions):
inputs.append(Input(participants=_summarize_participants(participant)))
return inputs
# Q2: What are the outputs
# - If a participant does not exist in state1, but exists in the end stateN, it's an output.
def outputs(self) -> List[Output]:
outputs = [] # type: List[Output]
for participant in self.locations.keys():
actions = self.actions[participant]
if _is_this_action_seq_of_an_output(actions):
outputs.append(Output(participants=_summarize_participants(participant)))
return outputs
# Q3: What is converted?
# tuple: (participant-list-from, participant-list-to, loc-list, step-id)
# a. For any event with BOTH "D" and "C" in:
# The "D" participants are converted to the "C" participants at the union of the D and C locations
# b. IF an event has ONLY "D" but no "C" in ("M" is ok - irrelevant)
# AND the NEXT event has ONLY "C" but no "D" in ("M" is ok - irrelevant)
# THEN the "D" participants are converted to the "C" participants at the union of the D and C locations
def conversions(self) -> List[Conversion]:
conversions = [] # type: List[Conversion]
for step_id in range(1, self.num_steps + 1):
(created, c_locations) = self._get_created_at_step(step_id)
(destroyed, d_locations) = self._get_destroyed_at_step(step_id)
if created and destroyed:
conversions.append(Conversion(
destroyed=_conjunction(*destroyed),
created=_conjunction(*created),
locations=_conjunction(*set(c_locations + d_locations)),
step_id=str(step_id)
))
elif destroyed and step_id < self.num_steps - 1:
(created2, c_locations2) = self._get_created_at_step(step_id + 1)
(destroyed2, d_locations2) = self._get_destroyed_at_step(step_id + 1)
created_but_not_destroyed = set(created2) - set(destroyed)
if not destroyed2 and created_but_not_destroyed:
conversions.append(Conversion(
destroyed=_conjunction(*destroyed),
created=_conjunction(*created_but_not_destroyed),
locations=_conjunction(*set(c_locations2 + d_locations)),
step_id=str(step_id)
))
elif created and step_id < self.num_steps - 1:
(created2, c_locations2) = self._get_created_at_step(step_id + 1)
(destroyed2, d_locations2) = self._get_destroyed_at_step(step_id + 1)
destroyed_but_not_created = set(destroyed2) - set(created)
if not created2 and destroyed_but_not_created:
conversions.append(Conversion(
destroyed=_conjunction(*destroyed_but_not_created),
created=_conjunction(*created),
locations=_conjunction(*set(c_locations + d_locations2)),
step_id=str(step_id)
))
return conversions
# Q4: What is moved?
# tuple: (participant, from-loc, to-loc, step-id)
# return all moves
def moves(self):
moves = []
for participant in self.locations.keys():
locations = self.locations[participant]
actions = self.actions[participant]
for step_id in range(1, len(locations)):
is_moved = actions[step_id - 1] == MOVE or (
locations[step_id - 1] != NO_LOCATION and
locations[step_id] != NO_LOCATION and
locations[step_id - 1] != locations[step_id]
)
if not is_moved:
continue
moves.append(Move(
participants=_summarize_participants(participant),
location_before=locations[step_id - 1],
location_after=locations[step_id],
step_id=str(step_id)
))
return moves
def _get_created_at_step(self, step_id: int):
created = []
locations = []
for participant in self.locations.keys():
state_values = self.locations[participant]
is_creation = state_values[step_id - 1] == NO_LOCATION \
and state_values[step_id] != NO_LOCATION
if is_creation:
created.append(_summarize_participants(participant))
locations.append(state_values[step_id])
return created, locations
def _get_destroyed_at_step(self, step_id: int):
destroyed = []
locations = []
for participant in self.locations.keys():
state_values = self.locations[participant]
is_destruction = state_values[step_id - 1] != NO_LOCATION \
and state_values[step_id] == NO_LOCATION
if is_destruction:
destroyed.append(_summarize_participants(participant))
locations.append(state_values[step_id - 1])
return destroyed, locations
def _is_this_action_seq_of_an_output(actions) -> bool:
for action_id, _ in enumerate(actions):
no_destroy_move_before = DESTROY not in actions[0:action_id] and MOVE not in actions[0:action_id]
current_create = actions[action_id] == CREATE
no_destroy_later = DESTROY not in actions[action_id + 1:]
if no_destroy_move_before and current_create and no_destroy_later:
return True
return False
def _is_this_action_seq_of_an_input(actions) -> bool:
for action_id, _ in enumerate(actions):
no_create_before = CREATE not in actions[0:action_id] # last action_id must be checked
current_destroy = actions[action_id] == DESTROY
no_create_move_later = CREATE not in actions[action_id + 1:] and MOVE not in actions[action_id + 1:]
if no_create_before and current_destroy and no_create_move_later:
return True
return False
def _split_participants(participant) -> List[str]:
return [p.strip() for p in participant.split(';')]
def _summarize_participants(participant) -> str:
return ' OR '.join(_split_participants(participant))
def _conjunction(*things) -> str:
return ' AND '.join(things)
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/process.py/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/process.py",
"repo_id": "ContextualSP",
"token_count": 3392
} | 260 |
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import json
import datasets
datasets.set_caching_enabled(False)
import numpy as np
from datasets import load_dataset, load_metric
from parameters16g_es_corpusb import *
from copy import deepcopy
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from gan_dataset import DataCollatorForGAN
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from GenTrainer import GenTrainer
from modeling_t5_with_loss import T5ForConditionalGeneration ##### Softscore loss
# from modeling_bart import BartForConditionalGeneration ##### Softscore loss
os.environ["WANDB_DISABLED"] = "true"
logger = logging.getLogger(__name__)
# A list of all multilingual tokenizer which require src_lang and tgt_lang attributes.
MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer]
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a jsonlines)."})
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics (sacreblue) on "
"a jsonlines file."
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (sacreblue) on " "a jsonlines file."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": "The token to force as the first generated token after the :obj:`decoder_start_token_id`."
"Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token "
"needs to be the target language token.(Usually it is the target language token)"
},
)
data_dir: Optional[str] = field(
default=None,
metadata={"help": "Path for data files"},
)
prediction_mode: Optional[str] = field( ##################
default="gen",
metadata={"help": "Choose from [gen, ver]. gen-train: self-sampling; ver: create verifier adhoc corpus inference."},
)
batch_example_num: Optional[int] = field(
default=6,
metadata={
"help": "the number of instance (number pos+neg) per batch, default as 6"
"value if set."
},
)
gan_alpha: float = field(
default=0.9,
metadata={
"help": "the ratio of the teacher forcing loss in the gan loss"
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
# accepting both json and jsonl file extensions, as
# many jsonlines files actually have a .json extension
valid_extensions = ["json", "jsonl"]
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in valid_extensions, "`train_file` should be a jsonlines file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in valid_extensions, "`validation_file` should be a jsonlines file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
import os
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For translation, only JSON files are supported, with one field named "translation" containing two keys for the
# source and target languages (unless you adapt what follows).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
data_files = {}
if training_args.do_train: data_files["train"] = os.path.join(data_args.data_dir, data_args.train_file)
if training_args.do_eval: data_files['validation'] = os.path.join(data_args.data_dir, data_args.validation_file)
if training_args.do_predict: data_files['test'] = os.path.join(data_args.data_dir, data_args.test_file)
# data_files = {
# 'train': os.path.join(data_args.data_dir, data_args.train_file) if training_args.do_train else None,
# 'validation': os.path.join(data_args.data_dir, data_args.validation_file) if training_args.do_eval else None,
# 'test': os.path.join(data_args.data_dir, data_args.test_file) if training_args.do_predict else None,
# }
print(data_files)
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir, download_mode='force_redownload')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = T5ForConditionalGeneration.from_pretrained( #################
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
special_tokens=['[SEP]','[MASK]']
if training_args.do_train and not all([t in tokenizer.vocab for t in special_tokens]):
special_tokens_dict = {'additional_special_tokens': special_tokens}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
inputs = examples['input'] #[ex for ex in examples["input"]]
# scores = examples["ver_prob"]
targets = examples['conclusions']
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
if not (data_args.prediction_mode == "gen" and training_args.do_train):
output_labels = tokenizer([tar[-1] for tar in targets], max_length=max_target_length, padding=padding, truncation=True)
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
output_labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
output_labels = output_labels['input_ids']
else:
# Setup the tokenizer for targets
output_labels = []
with tokenizer.as_target_tokenizer():
for i in range(len(inputs)):
labels = tokenizer(targets[i],max_length=max_target_length, padding=padding, truncation=True)
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
output_labels.append(labels['input_ids'])
# labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
model_inputs["labels"] = output_labels
if data_args.prediction_mode == "gen" and training_args.do_train:
model_inputs["is_gold"] = examples['is_gold']
model_inputs["ver_prob"] = examples["ver_prob"]
return model_inputs
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
np.random.seed(training_args.seed)
indices = np.random.choice(len(train_dataset["input"]), data_args.max_train_samples, replace=False) # magic "input"
train_dataset = train_dataset.select(indices) # Mine
# train_dataset = train_dataset.select(range(data_args.max_train_samples)) ####### Original
# train_dataset = train_dataset.select(np.random.choice(len(train_dataset["input"]), int(0.12 * len(train_dataset["input"])), replace=False))
with training_args.main_process_first(desc="train dataset map pre-processing"):
# print(train_dataset.column_names)
# print([len(train_dataset[column]) for column in train_dataset.column_names])
# exit()
column_names = train_dataset.column_names
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset"
)
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
# eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) # Original
np.random.seed(training_args.seed)
indices = np.random.choice(len(eval_dataset ["input"]), data_args.max_train_samples, replace=False) # magic "input"
eval_dataset = eval_dataset.select(indices) # Mine
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
predict_dataset_copy = deepcopy(predict_dataset)
if data_args.max_predict_samples is not None:
# predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
np.random.seed(training_args.seed)
indices = np.random.choice(len(predict_dataset["input"]), data_args.max_predict_samples, replace=False) # magic "input"
predict_dataset_copy = predict_dataset_copy.select(indices)
predict_dataset = predict_dataset.select(indices) # Mine
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
batch_size = gen_per_device_examples_num
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
# if data_args.pad_to_max_length:
# data_collator = default_data_collator
# else:
if data_args.prediction_mode == "gen" and training_args.do_train:
data_collator = DataCollatorForGAN(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
max_instance_num=batch_size
)
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None
)
# Metric
metric = load_metric("sacrebleu")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
# Initialize our Trainer
trainer = GenTrainer( #########################
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
num_return_seq=num_return_seq,
num_beams=gen_num_beams,
gan_alpha=data_args.gan_alpha
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
predict_results = trainer.predict(
predict_dataset, metric_key_prefix="predict", max_length=max_length, num_beams=num_beams
)
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
outputs = predict_results.predictions
predictions = tokenizer.batch_decode(
outputs.reshape(outputs.shape[0] * outputs.shape[1], outputs.shape[-1]), skip_special_tokens=True,
clean_up_tokenization_spaces=True
)
predictions = [pred.strip() for pred in predictions]
# predictions = tokenizer.batch_decode(
# predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
# )
# predictions = [pred.strip() for pred in predictions]
output_prediction_file=None
if data_args.prediction_mode == "gen":
output_prediction_file = os.path.join(data_args.data_dir, unlabeled_gen_train_iter_file) #####
inputs = predict_dataset_copy["input"]
outputs = predict_dataset_copy["conclusions"]
is_golds = predict_dataset_copy["is_gold"]
with open(output_prediction_file, "w", encoding="utf-8") as f:
for index,(i,o,igs) in enumerate(zip(inputs, outputs ,is_golds)):
gold_idx = igs.index(1)
# print('-----------------------')
# print(len(predictions),index)
# print(predictions[index*num_return_seq:(index+1)*num_return_seq])
example1 = {"input": i, "conclusions": [o[gold_idx]]+predictions[index*num_return_seq:(index+1)*num_return_seq], "is_gold": [1]+[0]*num_return_seq, "ver_prob": [-1]*(num_return_seq+1)}
# example2 = {"input": i, "conclusion": p, "is_gold": 0, "ver_prob": -1}
json.dump(example1, f); f.write("\n")
# json.dump(example2, f); f.write("\n")
elif data_args.prediction_mode == "ver":
output_prediction_file = os.path.join(data_args.data_dir, unlabeled_ver_train_iter_file) #####
inputs = predict_dataset_copy["input"]
outputs = predict_dataset_copy["conclusions"]
is_golds = predict_dataset_copy["is_gold"]
with open(output_prediction_file, "w", encoding="utf-8") as f:
for index,(i,o,igs) in enumerate(zip(inputs, outputs ,is_golds)):
gold_idx = igs.index(1)
p = predictions[index * num_return_seq:(index + 1) * num_return_seq]
gt = o[gold_idx]
all_ids = list(range(len(igs)))
all_ids.remove(gold_idx)
es_negs = [o[nid] for nid in list(np.random.choice(list(all_ids),2,replace=False))]
example = {"input": i, "conclusions": [gt,p[0]]+es_negs, "is_gold":[1,0]+[0]*len(es_negs)}
json.dump(example, f)
f.write("\n")
## Default write to file
# with open(output_prediction_file, "w", encoding="utf-8") as writer:
# writer.write("\n".join(predictions))
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
# languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
# if len(languages) > 0:
# kwargs["language"] = languages
# if training_args.push_to_hub:
# trainer.push_to_hub(**kwargs)
# else:
# trainer.create_model_card(**kwargs)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | ContextualSP/logigan/pre-training/hf_generation_multi_es.py/0 | {
"file_path": "ContextualSP/logigan/pre-training/hf_generation_multi_es.py",
"repo_id": "ContextualSP",
"token_count": 13662
} | 261 |
import numpy as np
from collections import defaultdict
import re
from nltk.corpus import stopwords
from enum import Enum
from itertools import permutations
import re
import json
import random
# words = stopwords.words('english')
from collections import defaultdict
from functools import reduce
class ResType(Enum):
ENTITY = 1,
RELATION = 2
class Sample:
def __init__(self, query, sparql, tag):
self.query = query
self.sparql = sparql
self.tag = tag
def string():
return '\t'.join([self.query, self.sparql, self.tag])
class Helper:
def __init__(self):
self.stop_words = ["Did", "and", ",", "'s", "M0", "M1", "M2", "M3", "M4", "M5","M6", "whose", "Whose", \
"What", "did", "Was", "was", "Which", "Were", "were", "that", "M", "a"]
self.stop_words += stopwords.words('english')
def load_phrase_table(self):
self.dict = defaultdict(list)
data = open("./coor_file-0520.filter_wto_no")
for i in data:
i = eval(i.strip())[0]
self.dict[i[0]].append(i[1])
# maxlen = max(maxlen, len(i[0].split()))
# if len(i[0].split()) > 3:
# print(i)
# print(self.dict)
# self.indexTree = Trie()
# for key in self.dict:
# self.indexTree.add(key)
def count_var(self, lf):
value = {'?x0':2.5, '?x1':2, '?x2':1.5, '?x3':1, '?x4':0.5, '?x5':0}
a1, r, a2 = lf.split()
cn1, cn2, cn3 = 0, 0, 0
if a1.startswith("?x"):
cn1 = 5
cn1 += value[a1]
if a2.startswith("?x"):
cn2 = 3
cn2 += value[a2]
if r == 'a':
cn3 = -1
return cn1 + cn2 + cn3
def add_index(self, file):
data = np.array(open(file).readlines()).reshape(-1, 3)
res = []
for item in data:
_, src, trg = item
# print(_, src, trg)
src_new = []
for idx, token in enumerate(src.split(), 1):
src_new.append(f'{token} ({idx})')
res.append('\n'.join([_, ' '.join(src_new), trg]))
return "\n".join(res)
def update_output_format(self, version):
for type in ["formula2query", "query2formula"]:
file = f"/home/v-yinguo/DKI/GIZA_alignment/{version}/{type}/{type}.A3.final"
open(f'{file}.update', "w").write(self.add_index(file))
def statistic_coor(self, q2f, f2q):
## type = 1是query2formula的映射
## type = 0是formula2query的映射
def split_up(sent):
return [token for token in sent.split() if not token.startswith("(")]
def split_bottom(sent):
tokens = re.split(r' *\({[0-9 ]*}\) *', sent)[1:-1]
return tokens
def split_alignment(sent):
alignment = [re.findall(r'[0-9].', i) for i in re.findall(r'\({[0-9 ]*}\)', sent)]
##把开始的NULL去掉
# print(alignment)
alignment = [[int(ii) for ii in i] if len(i) else [0] for i in alignment[1:]]
## 将alignment 处理成连续的span
alignment_combine = []
# print(alignment)
for sub_align in alignment:
sub_align_span = []
start, end = 0, 0
for idx in range(len(sub_align) - 1):
if sub_align[idx + 1] == sub_align[idx] + 1:
end += 1
else:
sub_align_span.append((sub_align[start], sub_align[end]))
start, end = idx + 1, idx + 1
sub_align_span.append((sub_align[start], sub_align[end]))
alignment_combine.append(sub_align_span)
return alignment_combine
coor_dict = defaultdict(int)
for type, file in enumerate([f2q, q2f]):
# print(f"file name:{file}")
for index, line in enumerate(open(file),1):
line = line.strip()
# print(line)
if (not type and index % 5 == 3) or (type and index % 5 == 4) :
query = line
elif (not type and index % 5 == 4) or (type and index % 5 == 3) :
formula = line
elif index %5 == 0:
query_tokens = split_bottom(query) if type else split_up(query)
formula_tokens= split_up(formula) if type else split_bottom(formula)
alignment = split_alignment(query) if type else split_alignment(formula)
# print(f"len formula:{len(formula_tokens)}, len(query):{len(query_tokens)},len alignment:{len(alignment)}")
# print(f"query:{query_tokens}\nformula:{formula_tokens}\nalignment:{alignment}")
assert(len(formula_tokens) == len(alignment) or len(query_tokens) == len(alignment))
if not type:
for spans, formula_token in zip(alignment, formula_tokens):
for sub_span in spans:
s_pos, e_pos = sub_span[0], sub_span[1]
query_token = ' '.join(query_tokens[s_pos-1:e_pos])
if s_pos == e_pos:
if s_pos and not re.match(r'M[0-9]', query_token) and query_token not in self.stop_words:
# print(f"1-1 matching:{s_pos}-{e_pos} {query_token}-{formula_token}")
coor_dict[(query_token, formula_token)] += 1
else:
# print(f"multi matching:{s_pos}-{e_pos} {query_token}-{formula_token}")
coor_dict[(query_token, formula_token)] += 1
else:
# print(f"\n\nori query:{query}\nori formula:{formula}")
# print(f"query:{query_tokens}\nformula:{formula_tokens}\nalignment:{alignment}")
for spans, query_token in zip(alignment, query_tokens):
if query_token in self.stop_words or re.match(r'M[0-9]', query_token):
continue
for sub_span in spans:
s_pos, e_pos = sub_span[0], sub_span[1]
formula_token = ' '.join(formula_tokens[s_pos-1:e_pos])
if s_pos == e_pos:
if s_pos:
# print(f"1-1 matching:{s_pos}-{e_pos} {query_token}-{formula_token}")
coor_dict[(query_token, formula_token)] += 1
else:
# print(f"multi matching:{s_pos}-{e_pos} {query_token}-{formula_token}")
coor_dict[(query_token, formula_token)] += 1
pass
# print(coor_dict)
sorted_coor_dict = sorted(coor_dict.items(), key=lambda s:s[1], reverse = True)
# print(sorted_coor_dict)
return sorted_coor_dict
def filter_str(self, line):
qf, cnt = eval(line.strip())
key, v = qf
if key in self.stop_words:
return
if v.count("|||") > 1:
return
key_entities = re.findall(r'M[0-9]', key)
v_entities = re.findall(r'M[0-9]', v)
key_entities.sort()
v_entities.sort()
if len(key_entities) == 0:
v = re.sub(r'M[0-9]', 'M', v)
# coor_dict[(key, v)] += cnt
elif len(v_entities) == 0:
key = re.sub(r'M[0-9]', 'M', key)
elif len(key_entities) == len(v_entities):
key = re.sub(r'M[0-9]', 'M', key)
v = re.sub(r'M[0-9]', 'M', v)
else:
return
while(len(key)):
if key.split()[0] in self.stop_words and '#is#M' not in v:
key = ' '.join(key.split()[1:])
elif key.split()[-1] in self.stop_words and '#is#M' not in v:
key = ' '.join(key.split()[:-1])
else:
break
if len(key) == 0:
return
if len(key.split()) == 1 and len(v.split(" ")) > 2:
return
if len(key.split()) == 1 and (("FILTER" not in v and len(v.split("|||")) > 1) or ("FILTER" in v and len(v.split("|||")) > 2)):
return
if len(key.split()) > 1 and len(set(key.split()) - set(self.stop_words)) == 0 and not '#is#M' in v:
return
if v.startswith("FILTER"):
return
v = list(set(v.split()))
if len(v) > 1:
return
v.sort()
v = ' '.join(v)
v = re.sub(r'\?x[0-9]', '?x', v)
return (key, v), cnt
def filter_result(self, src1):
coor_dict = defaultdict(int)
## src1是 debug_opt
for file in [open(src1)]:
for line in file:
# print("after key:", key)
result = self.filter_str(line)
if result:
coor_dict[result[0]] += result[1]
# elif len(key_entities) == len(v_entities):
# key = re.sub(r'M[0-9]', 'M', key)
# v = re.sub(r'M[0-9]', 'M', v)
# coor_dict[(key, v)] += cnt
# coor_dict[(key, v)] += cnt
# coor_dict[(key, v)] += cnt
sorted_coor_dict = sorted(coor_dict.items(), key=lambda s:s[1], reverse = True)
return sorted_coor_dict
def filter_result_pred(self, src1):
coor_dict = defaultdict(int)
## src1是 debug_opt
for file in [open(src1)]:
for line in file:
# print("after key:", key)
result = self.filter_str(line)
if result:
key, v = result[0]
# v = "?x#ns:people.person.nationality#ns:m.0f8l9c"
a1, r, a2 = v.split('#')
if re.match(r'\?x[0-9]*|M[0-9]*', a1) and re.match(r'\?x[0-9]*|M[0-9]*', a2) and r!='is':
# print("1111111")
v = r[3:] if r.startswith('ns:') else r
else:
# print("222222222")
r = r[3:] if r.startswith('ns:') else r
a2 = a2[3:] if a2.startswith('ns:') else a2
a2 = 'm_'+a2[2:] if a2.startswith('m.') else a2
v = f"{r} {a2}"
# print(key, v)
coor_dict[(key, v)] += result[1]
# elif len(key_entities) == len(v_entities):
# key = re.sub(r'M[0-9]', 'M', key)
# v = re.sub(r'M[0-9]', 'M', v)
# coor_dict[(key, v)] += cnt
# coor_dict[(key, v)] += cnt
# coor_dict[(key, v)] += cnt
sorted_coor_dict = sorted(coor_dict.items(), key=lambda s:s[1], reverse = True)
return sorted_coor_dict
def term_extract(self, query):
terms = []
entities = []
query = query.split()
idx = 0
####三元组
while idx < len(query):
if re.match(r'M[0-9]', query[idx]):
entities.append(( query[idx:idx+1],query[idx:idx+1] ,(idx, idx)))
idx += 1
# elif idx +3 <= len(query) and ' '.join(query[idx:idx+3]) in self.dict:
# terms.append((' '.join(query[idx:idx+3]), self.dict.get(' '.join(query[idx:idx+3])),(idx, idx+2)))
# idx += 3
elif idx +1 <= len(query) and ' '.join(query[idx:idx+1]) in self.dict:
terms.append((' '.join(query[idx:idx+1]), self.dict.get(' '.join(query[idx:idx+1])), (idx, idx)))
idx += 1
else:
idx +=1
## 二元组
idx = 0
while idx < len(query) - 3:
if idx +3 <= len(query) and ' '.join(query[idx:idx+3]) in self.dict:
terms.append((' '.join(query[idx:idx+3]), self.dict.get(' '.join(query[idx:idx+3])),(idx, idx+2)))
idx += 1
idx = 0
while idx < len(query) - 2:
if idx +2 <= len(query) and' '.join(query[idx:idx+2]) in self.dict:
terms.append(( ' '.join(query[idx:idx+2]), self.dict.get(' '.join(query[idx:idx+2])), (idx, idx+1)))
idx += 1
terms = sorted(terms, key = lambda s:s[2][0])
return entities, terms
pass
def term_extract_v2(self, query, type):
## 0520
##改了新版的兼容 识别Did M
terms = []
entities = []
if query.startswith("Did M") or query.startswith("Was M") or query.startswith("Were M") or query.startswith("Was a"):
if type in ['mcd2', 'mcd3']:
nl_pattern = query.split()[0] +" " + query.split()[1]
terms.append((nl_pattern, [f'?x0#is#{query.split()[1]}'], (0, 1)))
else:
nl_pattern = query.split()[0] +" M"
terms.append((nl_pattern, ['?x0#is#M'], (0, 1)))
# print("terms:", terms)
query = query.split()
idx = 0
####三元组
while idx < len(query):
if re.match(r'M[0-9]', query[idx]):
entities.append(( query[idx:idx+1],query[idx:idx+1] ,(idx, idx)))
idx += 1
# elif idx +3 <= len(query) and ' '.join(query[idx:idx+3]) in self.dict:
# terms.append((' '.join(query[idx:idx+3]), self.dict.get(' '.join(query[idx:idx+3])),(idx, idx+2)))
# idx += 3
elif idx +1 <= len(query) and ' '.join(query[idx:idx+1]) in self.dict:
terms.append((' '.join(query[idx:idx+1]), self.dict.get(' '.join(query[idx:idx+1])), (idx, idx)))
idx += 1
else:
idx +=1
## 二元组
idx = 0
while idx < len(query) - 3:
if idx +3 <= len(query) and ' '.join(query[idx:idx+3]) in self.dict:
terms.append((' '.join(query[idx:idx+3]), self.dict.get(' '.join(query[idx:idx+3])),(idx, idx+2)))
idx += 1
idx = 0
while idx < len(query) - 2:
if idx +2 <= len(query) and' '.join(query[idx:idx+2]) in self.dict:
terms.append(( ' '.join(query[idx:idx+2]), self.dict.get(' '.join(query[idx:idx+2])), (idx, idx+1)))
idx += 1
terms = sorted(terms, key = lambda s:s[2][0])
# print(query, entities, terms)
return entities, terms
pass
def fill_skeleton(self, query, skeleton):
## fill skeleton 是之前的细粒度版本
## 就是?x a M, ?x nationality 以及 gender的都做区分
## v2的版本把他们都做成?x P M
def preprocess_sparql(query):
tokens = []
for token in query:
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens)
def transform_term_to_pattern(term):
# print("term here:", term)
term_split = []
for i in term.split():
term_split += i.split("|||")
# print("term split:", term_split)
skeleton_list = []
term_list = []
for i in term_split:
if i.startswith("FILTER"):
continue
# print(i)
i = preprocess_sparql(i.split("#"))
a1, r, a2 = i.split()
if a1.startswith("?x") and a2.startswith("?x"):
skeleton_list.append(f"{a1} P {a2}")
elif a1.startswith("?x") and a2.startswith("M"):
skeleton_list.append(f"{a1} P M")
elif a2.startswith("?x") and a1.startswith("M"):
skeleton_list.append(f"M P {a2}")
elif a1.startswith("M") and a2.startswith("M"):
skeleton_list.append(f"M P M")
elif r == "a":
skeleton_list.append(f"{a1} a M")
else:
skeleton_list.append(f"{a1} V S")
term_list.append(i)
return skeleton_list, ' . '.join(term_list)
entities, terms = self.term_extract(query)
# print(f"\nquery:{query}\nskeleton:{skeleton}\nsparql:{sparql}")
candidate_terms = defaultdict(set)
for term in terms:
for sub_term in term[1]:
sub_pattern , sub_term = transform_term_to_pattern(sub_term)
# print(sub_pattern)
if " ".join(sub_pattern) in skeleton:
candidate_terms[" ".join(sub_pattern)].add(sub_term)
candidate_triplets = defaultdict(list)
# print("candidate_term:", candidate_terms)
for candidate_skeleton, candidate_terms in candidate_terms.items():
# a1, r, a2 = candidate_term.split("#")
for candidate_term in candidate_terms:
candidate_term = candidate_term.replace("#", " ")
if candidate_term.count("M") == 1:
candidate_triplets[candidate_skeleton] += [''.join(candidate_term.replace("M", entity[0][0])) for entity in entities]
elif candidate_term.count("M") == 2:
candidate_term = list(candidate_term)
index_m = candidate_term.index('M')
candidate_term[index_m] = 'W'
index_m = candidate_term.index('M')
candidate_term[index_m] = 'Y'
candidate_term = ''.join(candidate_term)
for i in permutations(entities, 2):
a1, a2 = i[0][0][0], i[1][0][0]
# print(a1, a2, candidate_term)
candidate_term_ = candidate_term.replace("W", a1)
candidate_term_ = candidate_term_.replace("Y", a2)
candidate_triplets[candidate_skeleton].append(candidate_term_)
else:
candidate_triplets[candidate_skeleton].append(candidate_term)
# print(entities)
# for i in terms:
# print(i)
# print(terms)
# print("candidate_terms:", candidate_terms)
# print("candidate_triplets:", candidate_triplets)
return candidate_triplets
# print(terms)
# for term in terms:
# for candidate_term in term:
# if candiidate_term
# for pattern in skeleton:
# pass
def fill_skeleton_v2(self, query, skeleton):
## 通过query + 对齐的双语词典align的结果得到候选的candidate triples
## 候选的cndidate_triples通过给定的skeleton来过滤
## 就是?x a M, ?x nationality 以及 gender的都做区分
## v2的版本把他们都做成?x P M
def preprocess_sparql(query):
tokens = []
for token in query:
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens)
def transform_term_to_pattern(term):
term_split = []
for i in term.split():
term_split += i.split("|||")
# print("term split:", term_split)
skeleton_list = []
term_list = []
for i in term_split:
if i.startswith("FILTER"):
continue
# print(i)
i = preprocess_sparql(i.split("#"))
a1, r, a2 = i.split()
if a1.startswith("?x") and a2.startswith("?x"):
## ?x P ?x
skeleton_list.append(f"{a1} P {a2}")
elif a1.startswith("?x") and a2.startswith("M"):
## ?x P M
skeleton_list.append(f"{a1} P M")
elif a2.startswith("?x") and a1.startswith("M"):
## M P ?x
skeleton_list.append(f"M P {a2}")
elif a1.startswith("M") and a2.startswith("M"):
## M P M
skeleton_list.append(f"M P M")
elif a1.startswith("?x") and r == "a":
## ?x a M => ?x P M
skeleton_list.append(f"{a1} P M")
else:
## ?x nationality/gender => ?x P M
skeleton_list.append(f"{a1} P M")
term_list.append(i)
return skeleton_list, ' . '.join(term_list)
entities, terms, Mflag = self.term_extract(query)
# print(f"\nquery:{query}\nskeleton:{skeleton}\nsparql:{sparql}")
candidate_terms = defaultdict(set)
for term in terms:
for sub_term in term[1]:
# print("sub_term:", sub_term)
sub_pattern , sub_term = transform_term_to_pattern(sub_term)
# print("sub_pattern:", sub_pattern)
if " ".join(sub_pattern) in skeleton:
candidate_terms[" ".join(sub_pattern)].add(sub_term)
candidate_triplets = defaultdict(list)
# print("candidate_term:", candidate_terms)
for candidate_skeleton, candidate_terms in candidate_terms.items():
# a1, r, a2 = candidate_term.split("#")
for candidate_term in candidate_terms:
candidate_term = candidate_term.replace("#", " ")
if candidate_term.count("M") == 1:
candidate_triplets[candidate_skeleton] += [''.join(candidate_term.replace("M", entity[0][0])) for entity in entities]
elif candidate_term.count("M") == 2:
candidate_term = list(candidate_term)
index_m = candidate_term.index('M')
candidate_term[index_m] = 'W'
index_m = candidate_term.index('M')
candidate_term[index_m] = 'Y'
candidate_term = ''.join(candidate_term)
for i in permutations(entities, 2):
a1, a2 = i[0][0][0], i[1][0][0]
# print(a1, a2, candidate_term)
candidate_term_ = candidate_term.replace("W", a1)
candidate_term_ = candidate_term_.replace("Y", a2)
candidate_triplets[candidate_skeleton].append(candidate_term_)
else:
candidate_triplets[candidate_skeleton].append(candidate_term)
# print(entities)
# for i in terms:
# print(i)
# print(terms)
# print("candidate_terms:", candidate_terms)
# print("candidate_triplets:", candidate_triplets)
return candidate_triplets
def fill_skeleton_v3(self, query, skeleton, split):
## 通过query + 对齐的双语词典align的结果得到候选的candidate triples
## 候选的cndidate_triples通过给定的skeleton来过滤
## 就是?x a M, ?x nationality 以及 gender的都做区分
## v3的版本是把原始的M P ?x 换成了?x版本 无M开头的sparql
# Mflag = False
# for triple in skeleton.split(" . "):
# if triple.strip().startswith("M"):
# Mflag
def preprocess_sparql(query):
tokens = []
for token in query:
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens)
def check_valid(skeleton_list, skeleton_pattern):
skeleton_pattern = re.sub(r'\?x[0-9]', "?x", skeleton_pattern)
# skeleton = re.sub(r'\?x[0-9]', "?x", skeleton)
for skeleton in skeleton_list:
if re.sub(r'\?x[0-9]', "?x", skeleton) not in skeleton_pattern:
return False
return True
def transform_term_to_pattern(term):
term_split = []
for i in term.split():
term_split += i.split("|||")
skeleton_list = []
term_list = []
for i in term_split:
if i.startswith("FILTER"):
continue
i = preprocess_sparql(i.split("#"))
a1, r, a2 = i.split()
if a1.startswith("?x") and a2.startswith("?x"):
## ?x P ?x
skeleton_list.append(f"{a1} P {a2}")
elif a1.startswith("?x") and a2.startswith("M"):
## ?x P M
skeleton_list.append(f"{a1} P M")
elif a1.startswith("?x") and r == "a":
## ?x a M => ?x P M
skeleton_list.append(f"{a1} a M")
else:
skeleton_list.append(f"{a1} V S")
term_list.append(i)
skeleton_str = []
return skeleton_list, ' . '.join(term_list)
entities, terms = self.term_extract_v2(query, split)
# print("terms:", terms)
# print(f"\nquery:{query}\nskeleton:{skeleton}\nsparql:{sparql}")
candidate_terms = defaultdict(set)
for term in terms:
for sub_term in term[1]:
sub_pattern , sub_term = transform_term_to_pattern(sub_term)
if check_valid(sub_pattern, skeleton):
candidate_terms[" ".join(sub_pattern)].add(sub_term)
candidate_triplets = defaultdict(list)
# print("candidate_term:", candidate_terms)
for candidate_skeleton, candidate_terms in candidate_terms.items():
# a1, r, a2 = candidate_term.split("#")
for candidate_term in candidate_terms:
candidate_term = candidate_term.replace("#", " ")
if candidate_term.count("M") == 1:
if candidate_term.startswith("?x0 is M") and split in ['mcd2', 'mcd3']:
candidate_triplets[candidate_skeleton] += [candidate_term]
else:
candidate_triplets[candidate_skeleton] += [''.join(candidate_term.replace("M", entity[0][0])) for entity in entities]
elif candidate_term.count("M") == 2:
candidate_term = list(candidate_term)
index_m = candidate_term.index('M')
candidate_term[index_m] = 'W'
index_m = candidate_term.index('M')
candidate_term[index_m] = 'Y'
candidate_term = ''.join(candidate_term)
for i in permutations(entities, 2):
a1, a2 = i[0][0][0], i[1][0][0]
# print(a1, a2, candidate_term)
candidate_term_ = candidate_term.replace("W", a1)
candidate_term_ = candidate_term_.replace("Y", a2)
candidate_triplets[candidate_skeleton].append(candidate_term_)
else:
candidate_triplets[candidate_skeleton].append(candidate_term)
# print(entities)
# for i in terms:
# print(i)
# print(terms)
# print("candidate_terms:", candidate_terms)
# print("candidate_triplets:", candidate_triplets)
return candidate_triplets
def modify_skeleton(self,sparql):
sparql = sparql.replace("SELECT count(*) WHERE { ", " ")
sparql = sparql.replace("SELECT DISTINCT ?x0 WHERE { ", " ")
sparql_list = sparql.strip().split(" . ")
skeleton_list = []
# print(sparql)
for item in sparql_list:
if item.startswith("FILTER"):
continue
a1, r, a2 = item.strip().split()
# print(a1, r, a2)
if a1.startswith("?x") and a2.startswith("?x"):
skeleton_list.append(f"{a1} P {a2}")
elif a1.startswith("?x") and a2.startswith("M"):
skeleton_list.append(f"{a1} P M")
elif a2.startswith("?x") and a1.startswith("M"):
skeleton_list.append(f"M P {a2}")
elif a1.startswith("M") and a2.startswith("M"):
skeleton_list.append(f"M P M")
elif a1.startswith("?x") and r == "a":
skeleton_list.append(f"{a1} a M")
elif re.match(r'M[0-9]', a1):
skeleton_list.append(f"M V S")
else:
skeleton_list.append(f"{a1} V S")
skeleton_set = list(set(skeleton_list))
skeleton_set.sort(key=skeleton_list.index)
return sparql, " . ".join(skeleton_set)
def clear_sparql(self, sparql):
return re.findall(r'[{](.*?)[}]', sparql.replace('\n', ' '))[0]
def clear_skeleton(self, skeleton):
# print(skeleton)
# print(re.findall(r'[{](.*?)[}]', skeleton.strip())[0])
skeleton = [i for i in re.findall(r'[{](.*?)[}]', skeleton)[0].strip().split(" . ") if not i.startswith("FILTER")]
for idx, item in enumerate(skeleton):
a1, r, a2 = item.split()
if re.match(r'M[0-9]', a1):
a1 = 'M'
if re.match(r'M[0-9]', a2):
a2 = 'M'
if re.match(r'P[0-9]', r):
r = 'P'
skeleton[idx] = " ".join([a1, r, a2])
return " . ".join(skeleton)
def split_skeleton(self, skeleton, flag):
# print("skeleton:", skeleton)
if isinstance(skeleton, list):
skeleton = " . ".join(skeleton)
sparql_groups_part= self.split_sub_skeleton(skeleton, flag, 0)
sparql_groups = []
for i in sparql_groups_part:
xidx = 0
isplit = i.split(" . ")
for kkidx, kk in enumerate(isplit):
if kk.startswith('?x'):
xidx =kkidx
##记录当前分割开的sparql是用那个中间变量!
variable_idx = int(kk[2])
break
if xidx > 0:
sub_sparql_groups_p1 = self.split_sub_skeleton(' . '.join(isplit[xidx:]), False, variable_idx)
sub_sparql_groups_p2 = self.split_sub_skeleton(' . '.join(isplit[:xidx]), False, variable_idx)
for i in sub_sparql_groups_p2:
for j in sub_sparql_groups_p1:
sparql_groups.append(i+' . ' + j)
else:
sparql_groups.append(i)
return sparql_groups
def split_sub_skeleton(self, sparql, Mflag, count):
# split_lf_results = []
# split_lf_combine_results = []
def trans_tuple_str(tuple_list):
t_all = tuple_list[0]
for i in range(1, len(tuple_list)):
if isinstance(tuple_list[i], tuple):
t_all += tuple_list[i]
else:
return False
return ' '.join(t_all)
results, triples, FILTER_triples = [], [], []
for clf in sparql.split(" . "):
if clf.startswith("FILTER"):
continue
elif len(clf.split()) != 3:
continue
a1, r, a2 = clf.split()
var_cnt = self.count_var(clf)
triples.append((a1, r, a2, var_cnt))
split_dict = defaultdict(list)
sorted_triples = sorted(triples, key=lambda k: k[-1])
# print("sorted triples:", sparql, "\n", sorted_triples)
##划分方法
for triple in sorted_triples:
if isinstance(triple, tuple) and len(triple) == 4:
arg1, rel, arg2, _ = triple
triple = (arg1, rel, arg2)
## 对于两个变量的三元组
## 把他们尽可能的插入之前已有的三元组中
## [?x0 ?x1] [?x1, ?x2] [?x2, ?x3]
if arg1.startswith('?x') and arg2.startswith('?x'):
##对于链式 的特定修正!!!
## 每次需要更新他们匹配的组
arg_max = arg1 if arg1 > arg2 else arg2
arg_min = arg2 if arg1 > arg2 else arg1
if len(split_dict[arg_max]) > 0:
for cur_list in split_dict[arg_max]:
cur_list_ = cur_list[:]
cur_list_.insert(0, triple)
if Mflag and len(split_dict[arg_min]) > 0:
for j in split_dict[arg_min]:
j+=cur_list_
else:
split_dict[arg_min].append(cur_list_)
else:
split_dict[arg_max].append([triple])
## 如果只有一个变量
## 看能不能为之前添加的做补充
## 形如(?x, r, M)为之前(M, r, ?x)的做补充
elif (not Mflag and arg1.startswith('?x') and not arg1.startswith("?x0")) \
or (Mflag and arg1.startswith('?x')):
flag = True
for t in split_dict[arg1]:
if t[0][0] != arg1:
t.append(triple)
flag = False
if flag:
split_dict[arg1].append([triple])
# print("h:",split_dict)
##都没有 为该变量的第一个三元组关系
elif (not Mflag and arg1.startswith("M") and arg2.startswith("?x") and not arg2.startswith("?x0")) \
or (Mflag and arg1.startswith("M") and arg2.startswith("?x")):
flag =True
for t in split_dict[arg2]:
t.append(triple)
flag = False
if flag:
split_dict[arg2].append([triple])
else:
variable = arg2 if arg2.startswith("?x") else arg1
split_dict[variable].append([triple])
else:
split_dict[triple] = [triple]
final_split = []
for v in split_dict.values():
for vv in v:
vv_len = len(vv)
xidx, xflag = 0, False
for idx in range(vv_len):
# if SPOUSE_PRED in vv[idx] or SIBLING_PRED in vv[idx]:
# # print("vvidx:", vv[idx])
# a1, r, a2 = vv[idx]
# vv.append(f"FILTER ( {a1} != {a2} )")
vv[idx] = ' '.join(vv[idx])
if not xflag and vv[idx].startswith("?x"):
xidx, xflag = idx, True
vv = ' . '.join(vv[:xidx] + sorted(list(set(vv[xidx:vv_len]))) + vv[vv_len:])
# print(vv,vv[2], count, vv.startswith('?x'), vv[2]!=count)
if not (vv.startswith('?x') and int(vv[2])!=count):
##去掉不合法的?x
final_split.append(vv)
return final_split
def distribute_triples_to_skeleton(self, skeleton_groups, candidate_triplets):
print("skeleton_groups:", skeleton_groups)
print("candidate_triplets:", candidate_triplets)
fn = lambda x, code=',': reduce(lambda x, y: [str(i)+code+str(j) for i in x for j in y], x)
ans = []
def replace_variable(pattern, candidates):
# print("pattern:", pattern)
# print("candidates:", candidates)
a1, _, a2 = pattern.split()
modify_candidates = []
for idx, candidate in enumerate(candidates):
a1_c, r_c, a2_c = candidate.split()
a1_c = a1 if a1_c == "?x" else a1_c
a2_c = a2 if a2_c == "?x" else a2_c
modify_candidates.append(' '.join([a1_c, r_c, a2_c]))
# print("modify:", modify_candidates)
return modify_candidates
for skeleton_group in skeleton_groups:
skeleton_group = skeleton_group.split(" . ")
if len(skeleton_group) == 1:
if skeleton_group[0] in candidate_triplets:
ans += candidate_triplets.get(skeleton_group[0])
temp_candidates = candidate_triplets.get(re.sub(r'\?x[0-9]', '?x', skeleton_group[0]), [])
ans += replace_variable(skeleton_group[0], temp_candidates)
else:
# print("11",re.sub(r'\?x[0-9]', '?x', skeleton_group[1]))
# print("22",candidate_triplets.get(re.sub(r'\?x[0-9]', '?x', skeleton_group[1])))
triples_groups = [replace_variable(skeleton_item, candidate_triplets.get(re.sub(r'\?x[0-9]', '?x', skeleton_item), [])) for skeleton_item in skeleton_group]
# print(triples_groups)
ans += fn(triples_groups, ' . ')
return ans
def generate_samples(self, query, sparql, triples, type):
# triples = triples.split(" . ")
pos_ans = []
neg_ans = []
valid_cnt = len([i for i in sparql.split(" . ") if not i.startswith("FILTER")])
# print(f"query:{query}\nsparql:{sparql}")
coverage_sparql = set()
for triple_group in triples:
flag = True
for triple in triple_group.split(" . "):
# print(triple)
if triple not in sparql.split(" . "):
flag = False
continue
else:
coverage_sparql.add(triple)
# print(flag, triple_group)
if flag:
pos_ans.append(Sample(query, triple_group, flag).__dict__)
# print(triple_group)
else:
neg_ans.append(Sample(query, triple_group, flag).__dict__)
# coverage_sparql = " . ".join(coverage_sparql).split(" . ")
coverage = True if len(coverage_sparql) == valid_cnt else False
# if not coverage:
# print(coverage_sparql)
# print(sparql)
# print(f"query:{query}\nsparql:{sparql}")
# print(pos_ans, triples)
if type == "train":
# print(len(pos_ans), len(neg_ans))
return coverage, pos_ans + random.sample(neg_ans, min(len(neg_ans), len(pos_ans)))
else:
return coverage, pos_ans + neg_ans
def generate_samples_v2(self, query, trie_sparql, triples, type):
##目的是为了统计primitive prediction中 的P/R/F1
pos_ans = []
neg_ans = []
# valid_cnt = len([i for i in sparql.split(" . ") if not i.startswith("FILTER")])
valid_paths = [path.strip() for path in trie_sparql.split("###")]
# print(f"query:{query}\nsparql:{sparql}")
# print("triples:", triples)
coverage_sparql = set()
for triple_group in triples:
if triple_group in valid_paths:
pos_ans.append(Sample(query, triple_group, True).__dict__)
# print(triple_group)
else:
# print("triple_group:", triple_group)
# print("valid_paths:", valid_paths)
neg_ans.append(Sample(query, triple_group, False).__dict__)
p = float(len(set(triples) & set(valid_paths))) / float(len(triples))
r = float(len(set(triples) & set(valid_paths))) / float(len(valid_paths))
# f = 2 * p*r / (p+r)
# print(p, r)
if type == "train":
# print(len(pos_ans), len(neg_ans))
return (p, r), pos_ans + random.sample(neg_ans, min(len(neg_ans), len(pos_ans)))
else:
return (p, r), len(pos_ans), len(neg_ans), pos_ans + neg_ans
if __name__ == '__main__':
helper = Helper()
#####一些GIZA++的指令
#参加博客:http://codepothunter.github.io/2016/07/11/How-to-use-GIZA-for-alignment/
############################################生成.update文件############################################
# helper.update_output_format("v4-0520")
############################################生成统计共现文件#############################################
# qf_file统计结果在debug_opt中
# fq_file的统计结果在debug_opt2中
# qf_file = f"/home/v-yinguo/DKI/GIZA_alignment/v4-0520/query2formula/query2formula.A3.final.update"
# fq_file = f"/home/v-yinguo/DKI/GIZA_alignment/v4-0520/formula2query/formula2query.A3.final.update"
# res_file = open(f"/home/v-yinguo/DKI/GIZA_alignment/coor_file-0520","w")
# sorted_res = helper.statistic_coor(qf_file, fq_file)
# for k in sorted_res:
# # print("here")
# res_file.write(str(k) + '\n')
# print(k)
############################################只过滤共献词表3################################################
# helper = Helper()
# src = "/home/v-yinguo/DKI/GIZA_alignment/coor_file-0520"
# sorted_res = helper.filter_result(src)
# f = open("./coor_file-0520.filter_wto_no", "w")
# for k in sorted_res:
# # print("k")
# if k[-1] > 200:
# f.write(str(k) + '\n')
###########################################只保留词和predicate的对应################################################
##生成phrase_table.pred文件
# helper = Helper()
# src = "/home/v-yinguo/DKI/GIZA_alignment/coor_file-0520"
# sorted_res = helper.filter_result_pred(src)
# f = open("./coor_file-0520.filter.pred", "w")
# for k in sorted_res:
# # print("k")
# if k[-1] > 30:
# f.write(str(k) + '\n')
############################################过滤共现词表################################################
# src1, src2 = "/home/v-yinguo/DKI/GIZA_alignment/debug_opt", "/home/v-yinguo/DKI/GIZA_alignment/debug_opt"
# sorted_res = helper.filter_result(src1, src2)
# f = open("./debug_opt4", "w")
# for k in sorted_res:
# # print("here")
# if k[-1] > 50:
# f.write(str(k) + '\n')
# f.close()
| ContextualSP/poset_decoding/data/generate_phrase_table.py/0 | {
"file_path": "ContextualSP/poset_decoding/data/generate_phrase_table.py",
"repo_id": "ContextualSP",
"token_count": 16375
} | 262 |
coverage:
status:
project:
default:
# basic
target: auto
threshold: 3%
base: auto
# advanced
branches: null
if_no_uploads: error
if_not_found: success
if_ci_failed: error
only_pulls: false
flags: null
paths: null
patch:
default:
threshold: 1% | ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/.codecov.yml/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/.codecov.yml",
"repo_id": "ContextualSP",
"token_count": 193
} | 263 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../matchzoo'))
sys.path.insert(0, os.path.abspath('../../matchzoo/auto'))
sys.path.insert(0, os.path.abspath('../../matchzoo/data_pack'))
sys.path.insert(0, os.path.abspath('../../matchzoo/dataloader'))
sys.path.insert(0, os.path.abspath('../../matchzoo/datasets'))
sys.path.insert(0, os.path.abspath('../../matchzoo/engine'))
sys.path.insert(0, os.path.abspath('../../matchzoo/embedding'))
sys.path.insert(0, os.path.abspath('../../matchzoo/losses'))
sys.path.insert(0, os.path.abspath('../../matchzoo/models'))
sys.path.insert(0, os.path.abspath('../../matchzoo/modules'))
sys.path.insert(0, os.path.abspath('../../matchzoo/metrics'))
sys.path.insert(0, os.path.abspath('../../matchzoo/preprocessors'))
sys.path.insert(0, os.path.abspath('../../matchzoo/utils'))
sys.path.insert(0, os.path.abspath('../../matchzoo/tasks'))
sys.path.insert(0, os.path.abspath('../../matchzoo/trainers'))
# -- Project information -----------------------------------------------------
project = 'MatchZoo-py'
author = 'MatchZoo'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'autoapi.extension',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
]
autoapi_dirs = ['../../matchzoo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# from recommonmark.parser import CommonMarkParser
# source_parsers = {
# '.md':CommonMarkParser
# }
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MatchZoodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MatchZoo.tex', 'MatchZoo Documentation',
'MatchZoo', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'matchzoo', 'MatchZoo Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MatchZoo', 'MatchZoo Documentation',
author, 'MatchZoo', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/source/conf.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/source/conf.py",
"repo_id": "ContextualSP",
"token_count": 1869
} | 264 |
from . import callbacks
from .dataset import Dataset
from .dataloader import DataLoader
from .dataloader_builder import DataLoaderBuilder
from .dataset_builder import DatasetBuilder
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/__init__.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/__init__.py",
"repo_id": "ContextualSP",
"token_count": 52
} | 265 |
from .embedding import Embedding
from .embedding import load_from_file
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/embedding/__init__.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/embedding/__init__.py",
"repo_id": "ContextualSP",
"token_count": 20
} | 266 |
"""Average precision metric for ranking."""
import numpy as np
from matchzoo.engine.base_metric import RankingMetric
from . import Precision
class AveragePrecision(RankingMetric):
"""Average precision metric."""
ALIAS = ['average_precision', 'ap']
def __init__(self, threshold: float = 0.):
"""
:class:`AveragePrecision` constructor.
:param threshold: The label threshold of relevance degree.
"""
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS[0]}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate average precision (area under PR curve).
Example:
>>> y_true = [0, 1]
>>> y_pred = [0.1, 0.6]
>>> round(AveragePrecision()(y_true, y_pred), 2)
0.75
>>> round(AveragePrecision()([], []), 2)
0.0
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Average precision.
"""
precision_metrics = [Precision(k + 1) for k in range(len(y_pred))]
out = [metric(y_true, y_pred) for metric in precision_metrics]
if not out:
return 0.
return np.mean(out).item()
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/average_precision.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/average_precision.py",
"repo_id": "ContextualSP",
"token_count": 596
} | 267 |
"""A simple densely connected baseline model."""
import typing
import torch
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class DenseBaseline(BaseModel):
"""
A simple densely connected baseline model.
Examples:
>>> model = DenseBaseline()
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 300
>>> model.params['mlp_num_fan_out'] = 128
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params['mlp_num_units'] = 256
params.get('mlp_num_units').hyper_space = \
hyper_spaces.quniform(16, 512)
params.get('mlp_num_layers').hyper_space = \
hyper_spaces.quniform(1, 5)
return params
def build(self):
"""Build."""
self.embeddinng = self._make_default_embedding_layer()
self.mlp = self._make_multi_layer_perceptron_layer(
2 * self._params['embedding_output_dim']
)
self.out = self._make_output_layer(
self._params['mlp_num_fan_out']
)
def forward(self, inputs):
"""Forward."""
input_left, input_right = inputs['text_left'], inputs['text_right']
input_left = self.embeddinng(input_left.long()).sum(1)
input_right = self.embeddinng(input_right.long()).sum(1)
x = torch.cat((input_left, input_right), dim=1)
return self.out(self.mlp(x))
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/dense_baseline.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/dense_baseline.py",
"repo_id": "ContextualSP",
"token_count": 802
} | 268 |
"""Attention module."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""
Attention module.
:param input_size: Size of input.
:param mask: An integer to mask the invalid values. Defaults to 0.
Examples:
>>> import torch
>>> attention = Attention(input_size=10)
>>> x = torch.randn(4, 5, 10)
>>> x.shape
torch.Size([4, 5, 10])
>>> x_mask = torch.BoolTensor(4, 5)
>>> attention(x, x_mask).shape
torch.Size([4, 5])
"""
def __init__(self, input_size: int = 100):
"""Attention constructor."""
super().__init__()
self.linear = nn.Linear(input_size, 1, bias=False)
def forward(self, x, x_mask):
"""Perform attention on the input."""
x = self.linear(x).squeeze(dim=-1)
x = x.masked_fill(x_mask, -float('inf'))
return F.softmax(x, dim=-1)
class BidirectionalAttention(nn.Module):
"""Computing the soft attention between two sequence."""
def __init__(self):
"""Init."""
super().__init__()
def forward(self, v1, v1_mask, v2, v2_mask):
"""Forward."""
similarity_matrix = v1.bmm(v2.transpose(2, 1).contiguous())
v2_v1_attn = F.softmax(
similarity_matrix.masked_fill(
v1_mask.unsqueeze(2), -1e-7), dim=1)
v1_v2_attn = F.softmax(
similarity_matrix.masked_fill(
v2_mask.unsqueeze(1), -1e-7), dim=2)
attended_v1 = v1_v2_attn.bmm(v2)
attended_v2 = v2_v1_attn.transpose(1, 2).bmm(v1)
attended_v1.masked_fill_(v1_mask.unsqueeze(2), 0)
attended_v2.masked_fill_(v2_mask.unsqueeze(2), 0)
return attended_v1, attended_v2
class MatchModule(nn.Module):
"""
Computing the match representation for Match LSTM.
:param hidden_size: Size of hidden vectors.
:param dropout_rate: Dropout rate of the projection layer. Defaults to 0.
Examples:
>>> import torch
>>> attention = MatchModule(hidden_size=10)
>>> v1 = torch.randn(4, 5, 10)
>>> v1.shape
torch.Size([4, 5, 10])
>>> v2 = torch.randn(4, 5, 10)
>>> v2_mask = torch.ones(4, 5).to(dtype=torch.uint8)
>>> attention(v1, v2, v2_mask).shape
torch.Size([4, 5, 20])
"""
def __init__(self, hidden_size, dropout_rate=0):
"""Init."""
super().__init__()
self.v2_proj = nn.Linear(hidden_size, hidden_size)
self.proj = nn.Linear(hidden_size * 4, hidden_size * 2)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, v1, v2, v2_mask):
"""Computing attention vectors and projection vectors."""
proj_v2 = self.v2_proj(v2)
similarity_matrix = v1.bmm(proj_v2.transpose(2, 1).contiguous())
v1_v2_attn = F.softmax(
similarity_matrix.masked_fill(
v2_mask.unsqueeze(1).bool(), -1e-7), dim=2)
v2_wsum = v1_v2_attn.bmm(v2)
fusion = torch.cat([v1, v2_wsum, v1 - v2_wsum, v1 * v2_wsum], dim=2)
match = self.dropout(F.relu(self.proj(fusion)))
return match
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/attention.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/attention.py",
"repo_id": "ContextualSP",
"token_count": 1550
} | 269 |
from matchzoo.data_pack import DataPack
from .units import Vocabulary
from .build_unit_from_data_pack import build_unit_from_data_pack
def build_vocab_unit(
data_pack: DataPack,
mode: str = 'both',
verbose: int = 1
) -> Vocabulary:
"""
Build a :class:`preprocessor.units.Vocabulary` given `data_pack`.
The `data_pack` should be preprocessed forehand, and each item in
`text_left` and `text_right` columns of the `data_pack` should be a list
of tokens.
:param data_pack: The :class:`DataPack` to build vocabulary upon.
:param mode: One of 'left', 'right', and 'both', to determine the source
data for building the :class:`VocabularyUnit`.
:param verbose: Verbosity.
:return: A built vocabulary unit.
"""
return build_unit_from_data_pack(
unit=Vocabulary(),
data_pack=data_pack,
mode=mode,
flatten=True, verbose=verbose
)
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/build_vocab_unit.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/build_vocab_unit.py",
"repo_id": "ContextualSP",
"token_count": 348
} | 270 |
import typing
import numpy as np
from .unit import Unit
class TruncatedLength(Unit):
"""
TruncatedLengthUnit Class.
Process unit to truncate the text that exceeds the set length.
Examples:
>>> from matchzoo.preprocessors.units import TruncatedLength
>>> truncatedlen = TruncatedLength(3)
>>> truncatedlen.transform(list(range(1, 6))) == [3, 4, 5]
True
>>> truncatedlen.transform(list(range(2))) == [0, 1]
True
"""
def __init__(
self,
text_length: int,
truncate_mode: str = 'pre'
):
"""
Class initialization.
:param text_length: the specified maximum length of text.
:param truncate_mode: String, `pre` or `post`:
remove values from sequences larger than :attr:`text_length`,
either at the beginning or at the end of the sequences.
"""
self._text_length = text_length
self._truncate_mode = truncate_mode
def transform(self, input_: list) -> list:
"""
Truncate the text that exceeds the specified maximum length.
:param input_: list of tokenized tokens.
:return tokens: list of tokenized tokens in fixed length
if its origin length larger than :attr:`text_length`.
"""
if len(input_) <= self._text_length:
truncated_tokens = input_
else:
if self._truncate_mode == 'pre':
truncated_tokens = input_[-self._text_length:]
elif self._truncate_mode == 'post':
truncated_tokens = input_[:self._text_length]
else:
raise ValueError('{} is not a vaild '
'truncate mode.'.format(self._truncate_mode))
return truncated_tokens
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/truncated_length.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/truncated_length.py",
"repo_id": "ContextualSP",
"token_count": 803
} | 271 |
import typing
import torch
from torch import nn
from torch import optim
import matchzoo
from matchzoo.engine.base_metric import (
BaseMetric, RankingMetric, ClassificationMetric
)
activation = nn.ModuleDict([
['relu', nn.ReLU()],
['hardtanh', nn.Hardtanh()],
['relu6', nn.ReLU6()],
['sigmoid', nn.Sigmoid()],
['tanh', nn.Tanh()],
['softmax', nn.Softmax()],
['softmax2d', nn.Softmax2d()],
['logsoftmax', nn.LogSoftmax()],
['elu', nn.ELU()],
['selu', nn.SELU()],
['celu', nn.CELU()],
['hardshrink', nn.Hardshrink()],
['leakyrelu', nn.LeakyReLU()],
['logsigmoid', nn.LogSigmoid()],
['softplus', nn.Softplus()],
['softshrink', nn.Softshrink()],
['prelu', nn.PReLU()],
['softsign', nn.Softsign()],
['softmin', nn.Softmin()],
['tanhshrink', nn.Tanhshrink()],
['rrelu', nn.RReLU()],
['glu', nn.GLU()],
])
loss = nn.ModuleDict([
['l1', nn.L1Loss()],
['nll', nn.NLLLoss()],
['kldiv', nn.KLDivLoss()],
['mse', nn.MSELoss()],
['bce', nn.BCELoss()],
['bce_with_logits', nn.BCEWithLogitsLoss()],
['cosine_embedding', nn.CosineEmbeddingLoss()],
['ctc', nn.CTCLoss()],
['hinge_embedding', nn.HingeEmbeddingLoss()],
['margin_ranking', nn.MarginRankingLoss()],
['multi_label_margin', nn.MultiLabelMarginLoss()],
['multi_label_soft_margin', nn.MultiLabelSoftMarginLoss()],
['multi_margin', nn.MultiMarginLoss()],
['smooth_l1', nn.SmoothL1Loss()],
['soft_margin', nn.SoftMarginLoss()],
['cross_entropy', nn.CrossEntropyLoss()],
['triplet_margin', nn.TripletMarginLoss()],
['poisson_nll', nn.PoissonNLLLoss()]
])
optimizer = dict({
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'sparse_adam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD
})
def _parse(
identifier: typing.Union[str, typing.Type[nn.Module], nn.Module],
dictionary: nn.ModuleDict,
target: str
) -> nn.Module:
"""
Parse loss and activation.
:param identifier: activation identifier, one of
- String: name of a activation
- Torch Modele subclass
- Torch Module instance (it will be returned unchanged).
:param dictionary: nn.ModuleDict instance. Map string identifier to
nn.Module instance.
:return: A :class:`nn.Module` instance
"""
if isinstance(identifier, str):
if identifier in dictionary:
return dictionary[identifier]
else:
raise ValueError(
f'Could not interpret {target} identifier: ' + str(identifier)
)
elif isinstance(identifier, nn.Module):
return identifier
elif issubclass(identifier, nn.Module):
return identifier()
else:
raise ValueError(
f'Could not interpret {target} identifier: ' + str(identifier)
)
def parse_activation(
identifier: typing.Union[str, typing.Type[nn.Module], nn.Module]
) -> nn.Module:
"""
Retrieves a torch Module instance.
:param identifier: activation identifier, one of
- String: name of a activation
- Torch Modele subclass
- Torch Module instance (it will be returned unchanged).
:return: A :class:`nn.Module` instance
Examples::
>>> from torch import nn
>>> from matchzoo.utils import parse_activation
Use `str` as activation:
>>> activation = parse_activation('relu')
>>> type(activation)
<class 'torch.nn.modules.activation.ReLU'>
Use :class:`torch.nn.Module` subclasses as activation:
>>> type(parse_activation(nn.ReLU))
<class 'torch.nn.modules.activation.ReLU'>
Use :class:`torch.nn.Module` instances as activation:
>>> type(parse_activation(nn.ReLU()))
<class 'torch.nn.modules.activation.ReLU'>
"""
return _parse(identifier, activation, 'activation')
def parse_loss(
identifier: typing.Union[str, typing.Type[nn.Module], nn.Module],
task: typing.Optional[str] = None
) -> nn.Module:
"""
Retrieves a torch Module instance.
:param identifier: loss identifier, one of
- String: name of a loss
- Torch Module subclass
- Torch Module instance (it will be returned unchanged).
:param task: Task type for determining specific loss.
:return: A :class:`nn.Module` instance
Examples::
>>> from torch import nn
>>> from matchzoo.utils import parse_loss
Use `str` as loss:
>>> loss = parse_loss('mse')
>>> type(loss)
<class 'torch.nn.modules.loss.MSELoss'>
Use :class:`torch.nn.Module` subclasses as loss:
>>> type(parse_loss(nn.MSELoss))
<class 'torch.nn.modules.loss.MSELoss'>
Use :class:`torch.nn.Module` instances as loss:
>>> type(parse_loss(nn.MSELoss()))
<class 'torch.nn.modules.loss.MSELoss'>
"""
return _parse(identifier, loss, 'loss')
def _parse_metric(
metric: typing.Union[str, typing.Type[BaseMetric], BaseMetric],
Metrix: typing.Type[BaseMetric]
) -> BaseMetric:
"""
Parse metric.
:param metrc: Input metric in any form.
:param Metrix: Base Metric class. Either
:class:`matchzoo.engine.base_metric.RankingMetric` or
:class:`matchzoo.engine.base_metric.ClassificationMetric`.
:return: A :class:`BaseMetric` instance
"""
if isinstance(metric, str):
metric = metric.lower() # ignore case
for subclass in Metrix.__subclasses__():
if metric == subclass.ALIAS or metric in subclass.ALIAS:
return subclass()
elif isinstance(metric, Metrix):
return metric
elif issubclass(metric, Metrix):
return metric()
raise ValueError(f'`{metric}` can not be used in current task.')
def parse_metric(
metric: typing.Union[str, typing.Type[BaseMetric], BaseMetric],
task: str
) -> BaseMetric:
"""
Parse input metric in any form into a :class:`BaseMetric` instance.
:param metric: Input metric in any form.
:param task: Task type for determining specific metric.
:return: A :class:`BaseMetric` instance
Examples::
>>> from matchzoo import metrics
>>> from matchzoo.utils import parse_metric
Use `str` as MatchZoo metrics:
>>> mz_metric = parse_metric('map', 'ranking')
>>> type(mz_metric)
<class 'matchzoo.metrics.mean_average_precision.MeanAveragePrecision'>
Use :class:`matchzoo.engine.BaseMetric` subclasses as MatchZoo metrics:
>>> type(parse_metric(metrics.AveragePrecision, 'ranking'))
<class 'matchzoo.metrics.average_precision.AveragePrecision'>
Use :class:`matchzoo.engine.BaseMetric` instances as MatchZoo metrics:
>>> type(parse_metric(metrics.AveragePrecision(), 'ranking'))
<class 'matchzoo.metrics.average_precision.AveragePrecision'>
"""
if task is None:
raise ValueError(
'Should specify one `BaseTask`.'
)
if task == 'ranking':
return _parse_metric(metric, RankingMetric)
if task == 'classification':
return _parse_metric(metric, ClassificationMetric)
else:
raise ValueError(
'Should be a Ranking or Classification task.'
)
def parse_optimizer(
identifier: typing.Union[str, typing.Type[optim.Optimizer]],
) -> optim.Optimizer:
"""
Parse input metric in any form into a :class:`Optimizer` class.
:param optimizer: Input optimizer in any form.
:return: A :class:`Optimizer` class
Examples::
>>> from torch import optim
>>> from matchzoo.utils import parse_optimizer
Use `str` as optimizer:
>>> parse_optimizer('adam')
<class 'torch.optim.adam.Adam'>
Use :class:`torch.optim.Optimizer` subclasses as optimizer:
>>> parse_optimizer(optim.Adam)
<class 'torch.optim.adam.Adam'>
"""
if isinstance(identifier, str):
identifier = identifier.lower() # ignore case
if identifier in optimizer:
return optimizer[identifier]
else:
raise ValueError(
f'Could not interpret optimizer identifier: ' + str(identifier)
)
elif issubclass(identifier, optim.Optimizer):
return identifier
else:
raise ValueError(
f'Could not interpret optimizer identifier: ' + str(identifier)
)
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/parse.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/parse.py",
"repo_id": "ContextualSP",
"token_count": 3624
} | 272 |
"""
These tests are simplied because the original verion takes too much time to
run, making CI fails as it reaches the time limit.
"""
import torch
import pytest
from pathlib import Path
import shutil
import matchzoo as mz
@pytest.fixture(scope='module', params=[
mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=2)),
mz.tasks.Classification(num_classes=2),
])
def task(request):
return request.param
@pytest.fixture(scope='module')
def train_raw(task):
return mz.datasets.toy.load_data('train', task)[:10]
@pytest.fixture(scope='module', params=mz.models.list_available())
def model_class(request):
return request.param
@pytest.fixture(scope='module')
def embedding():
return mz.datasets.toy.load_embedding()
@pytest.fixture(scope='module')
def setup(task, model_class, train_raw, embedding):
return mz.auto.prepare(
task=task,
model_class=model_class,
data_pack=train_raw,
embedding=embedding
)
@pytest.fixture(scope='module')
def model(setup):
return setup[0]
@pytest.fixture(scope='module')
def preprocessor(setup):
return setup[1]
@pytest.fixture(scope='module')
def dataset_builder(setup):
return setup[2]
@pytest.fixture(scope='module')
def dataloader_builder(setup):
return setup[3]
@pytest.fixture(scope='module')
def dataloader(train_raw, preprocessor, dataset_builder, dataloader_builder):
return dataloader_builder.build(
dataset_builder.build(preprocessor.transform(train_raw)))
@pytest.fixture(scope='module')
def optimizer(model):
return torch.optim.Adam(model.parameters())
@pytest.fixture(scope='module')
def save_dir():
return Path('.matchzoo_test_save_load_tmpdir')
@pytest.mark.slow
def test_model_fit_eval_predict(model, optimizer, dataloader, save_dir):
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=dataloader,
validloader=dataloader,
epochs=2,
save_dir=save_dir,
verbose=0
)
trainer.run()
if save_dir.exists():
shutil.rmtree(save_dir)
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/models/test_models.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/models/test_models.py",
"repo_id": "ContextualSP",
"token_count": 839
} | 273 |
<jupyter_start><jupyter_code>%run init.ipynb
preprocessor = mz.models.Bert.get_default_preprocessor()
train_pack_processed = preprocessor.transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
padding_callback = mz.models.Bert.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
callback=padding_callback
)
model = mz.models.Bert()
model.params['task'] = ranking_task
model.params['mode'] = 'bert-base-uncased'
model.params['dropout_rate'] = 0.2
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 5e-5},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
from pytorch_transformers import AdamW, WarmupLinearSchedule
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, betas=(0.9, 0.98), eps=1e-8)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=6, t_total=-1)
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
scheduler=scheduler,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output> | ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/bert.ipynb/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/bert.ipynb",
"repo_id": "ContextualSP",
"token_count": 752
} | 274 |
<jupyter_start><jupyter_code>import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
import torch
from torch.nn.functional import softmax
from copy import deepcopy
import enchant
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
from tqdm import trange, tqdm
import json
import nltk
import spacy
import string
import re
import pickle
import inflect
nlp = spacy.load('en_core_web_sm')<jupyter_output><empty_output><jupyter_text>Global Variables<jupyter_code>bigram_reservered_words = ['against', 'and', 'area', 'average', 'since', 'away', 'section', 'by', 'class', 'club', 'code', 'cup', 'current', 'date', 'data', 'district', 'elected', 'engine', 'episode', 'event', 'final', 'finish', 'first', 'for', 'from', 'game', 'games', 'goals', 'gold', 'grid', 'height', 'high', 'home', 'id', 'in', 'incumbent', 'international', 'laps', 'league', 'list', 'log', 'loss', 'losses', 'lost', 'method', 'age', 'name', 'nation', 'no', 'notes', 'number', 'of', 'one', 'two', 'three', 'four', 'yes', 'no', 'yards', 'five' 'other', 'outcome', 'overall', 'par', 'party', 'per', 'pick', 'played', 'player', 'points', 'pos', 'rank', 'record', 'region', 'release', 'report', 'res', 'result', 'results','round', 'score', 'season', 'second', 'series', 'singles', 'start', 'end', 'state', 'status', 'table', 'team', 'types', 'the', 'first', 'second', 'third', 'time', 'to', 'total', 'type', 'up', 'week', 'weeks', 'year', 'unit', 'version', 'years', 'ends', 'ended', 'min', 'max', 'make', 'statistics', 'stats', 'in', 'on', 'to', 'see', 'feet', 'subject']
preps = ["aboard","about","above","across","after","against","along","amid","among","as","at","before","behind","below","beneath","beside","besides","between","beyond","but","by","concerning","considering","despite","down","during","except","excepting","excluding","following","for","from","in","inside","into","like","minus","near","of","off","on","onto","opposite","outside","over","past","per","plus","regarding","round","save","since","than","through","to","toward","towards","under","underneath","unlike","until","up","upon","versus","via","with","within","without"]
bigram_reservered_words = list(set(bigram_reservered_words + preps))
### Templates to be used for checking
# template1 = lambda table_name, col: f"We are told of the {table_name}'s {trim_col_name(table_name, col)}."
# template2 = lambda table_name, col: f"We are informed of the {trim_col_name(table_name, col)} of the {table_name}."
# template3 = lambda table_name, col: f"We know {trim_col_name(table_name, col)} of the {table_name}."
# template4 = lambda table_name, col: f"We collect {table_name}'s {trim_col_name(table_name, col)}."
# TEMPLATES = [template1, template2, template3, template4] if STRICT_MODE else [template1, template2]
tablename_black_list = ["statistics", "data", "table", "summary", "sketch", "list"]
date_marks = ["date", "dates", "year", "years", "month", "months",
"day", "days", "daytime", "minute", "minutes", "second", "seconds", "time"]
num_marks = ["num", "number", "sum", "amount", "count", "total", "#", "No.", "no.", "scores",
"rating", "rank", "height", "weight", "age", "time", "times", "temperature",
"year", "years", "month", "months", "day", "days", "minute", "minutes",
"second", "seconds", "average", "sum", "grade", "fee", "cost", "value",
"rate"] # words explicitly has numeric implications
def template1(table_name, col, col_type):
"""Template 1 placeholder filling for give table name, column name, and column type"""
table_name = get_singular_word(table_name)
capital_tname = table_name[0].capitalize() + table_name[1:]
trimmed_col_name = trim_col_naive(table_name, col)
type_prompt = trim_type(col_type)
if col_type == "date" and any([dm in trimmed_col_name for dm in date_marks]):
type_prompt = ""
if col_type == "number" and any([nm in trimmed_col_name for nm in num_marks]):
type_prompt = ""
return f"{capital_tname} {trimmed_col_name}{trim_type(col_type)}."
TEMPLATES = [template1]
device = "cuda"
nli_tokenizer = AutoTokenizer.from_pretrained("roberta-large-mnli")
nli_model = AutoModelForSequenceClassification.from_pretrained("roberta-large-mnli").to(device)<jupyter_output><empty_output><jupyter_text>Preprocessing Functions<jupyter_code>inflect_engine = inflect.engine()
def get_singular_word(word):
"""
Reduce a given word (string) to singular form
"""
ans = inflect_engine.singular_noun(word)
return ans if ans else word
def read_dense_table_vectors(path, delim="\t"):
"""
Read backend dense table vectors
"""
with open(path, "r") as f:
tid2vals = {}
for line in f.readlines():
if len(line) == 0: continue
units = line.split(delim)
table_id, vals = units[0], units[1:]
tid2vals[table_id] = np.array(vals).astype(float)
return tid2vals
def trim_col(tname, col):
"""
Normalize table name.
:tname: table name
:col: column name to be trained
"""
if tname == '' or col == '': return col
if tname == ' ' or col == ' ': return col
tname_tokens = nlp(tname)
col_tokens = nlp(col)
if tname_tokens[-1].lemma_ == col_tokens[0].lemma_:
return col_tokens[1:]
return col
# template1 = lambda table_name, col: f"{table_name[0].capitalize() + table_name[1:]} {trim_col(table_name, col)}."
def trim_col_naive(tname, col):
if tname == '' or col == '': return col
if tname.lower() == col.lower():
return 'name'
return col.lower()
def trim_type(col_type):
if col_type in ["text", "bool"]:
return ""
return " " + "time" if col_type == "date" else "number"<jupyter_output><empty_output><jupyter_text>Load datasets<jupyter_code>with open("./data/tid2tables.pkl","rb") as f:
tid2tables = pickle.load(f)
idx2tid = {i:tid for i,tid in enumerate(tid2tables.keys())}
tid2idx = {tid:i for i,tid in enumerate(tid2tables.keys())}
spiders = []
with open("./data/spider/spider-tables.jsonl", "r") as f:
for line in f.readlines():
table = json.loads(line)
spiders.append(table)
wtqs = []
with open("./data/WTQ/wtq-tables.jsonl", "r") as f: # no table name, no domain name
for line in f.readlines():
table = json.loads(line) # dict_keys(['file_name', 'table_name', 'column_types', 'column_names', 'column_values'])
wtqs.append(table)
wsqls_train = []
with open("./data/wikisql/wikisql-tables.jsonl", "r") as f:
for line in f.readlines():
table = json.loads(line) # dict_keys(['refer_cols_index', 'domain', 'table_id', 'table_name', 'column_names', 'column_types', 'column_values'])
wsqls_train.append(table)
with open("./data/wikisql/wikisql_train.tables2question.json", "r") as f:
wsql_train_table2qs = json.load(f)
with open("./data/spider/spider-table2questions.json", "r") as f:
spider_table2qs = json.load(f)
with open("./data/WTQ/wtq-table2questions.json", "r") as f:
wtq_table2qs = json.load(f)
idx2word, word2idx = {}, {}
word2vec = {}
with open("./data/numberbatch/nb_emb.txt", "r") as f:
cnt = -2
for line in tqdm(f.readlines(), desc="Building word2vec...", leave=True):
cnt += 1
if cnt == -1: continue
units = line.split(" ")
word, emb = units[0], np.array(units[1:]).astype(float)
word2vec[word] = emb
idx2word[cnt] = word
word2idx[word] = cnt
EMB_DIM=300
with open("./data/syndict_pipeline.json") as f:
synonym_dic = json.load(f)<jupyter_output><empty_output><jupyter_text>Dense Retrieval Setup<jupyter_code>import os
import torch
import torch.nn as nn
import numpy as np
from transformers import TapasModel, TapasConfig, TapasTokenizer, BertModel, BertTokenizer
def build_projection_layer(weight_path: str):
with open(weight_path, 'rb') as f:
weights = torch.from_numpy(np.load(f))
linear = nn.Linear(weights.size(0), weights.size(1), bias=False)
linear.weight.data = weights
return linear
MAX_LEN = 1024
DUMMY_TABLE = pd.DataFrame({})
basepath = os.path.join("tapas-torch", "tapas_retrieval")
table_model_path = os.path.join(basepath, "tapas_nq_hn_retriever_large_table", "checkpoint")
table_model = TapasModel.from_pretrained(table_model_path).to(device)
tapas_tokenizer = TapasTokenizer.from_pretrained(table_model_path)
table_model_config = TapasConfig.from_pretrained(table_model_path)
query_model_path = os.path.join(basepath, "tapas_nq_hn_retriever_large_query", "checkpoint")
query_model = TapasModel.from_pretrained(query_model_path).to(device)
text_projection_layer = build_projection_layer(os.path.join(basepath, "projection_layer", "text_projection.npy")).to(device)
table_projection_layer = build_projection_layer(os.path.join(basepath, "projection_layer", "table_projection.npy")).to(device)
def form_table(dic_table, col_name_key="column_names", max_row_limit=10, max_cell_val_len=50): # output a dataframe
"""
Build source table text for dense vector computation. Done via resampling strategy.
:dic_table: table as a dictionary
:col_name_key: the key name in passed dic_table storing column names (as a list).
:max_row_limit: maximum number of rows for cosntructed table
:max_cell_val_len: cell values will be truncated to this length.
"""
col_names = dic_table["column_names"]
col_vals = {k : list(set(v)) for k,v in dic_table["column_values"].items()}
try:
longest_unique = min(max([len(v) for v in col_vals.values()]), max_row_limit)
except:
if len(dic_table[col_name_key]) == 0:
return DUMMY_TABLE
else:
return pd.DataFrame({k:[] for k in dic_table[col_name_key]})
col2vals = {n : [str(elem)[:max_cell_val_len] for elem in np.random.choice(v, longest_unique, replace=True)] for n,v in col_vals.items()}
return pd.DataFrame(col2vals)
# takes a while to load 615144 * 2 vectors...
wdc_dense_a = read_dense_table_vectors(path="./wdc/wdc_dense_A.txt")
wdc_dense_b = read_dense_table_vectors(path="./wdc/wdc_dense_B.txt")
idx2tid = {i:k for i,k in enumerate(wdc_dense_a.keys())}
tid2idx = {k:i for i,k in idx2tid.items()}
bm_mat_A = torch.stack([torch.Tensor(vs) for vs in wdc_dense_a.values()], dim = 0).to(device)
bm_mat_B = torch.stack([torch.Tensor(vs) for vs in wdc_dense_b.values()], dim = 0).to(device)
bm_mat_A = bm_mat_A / torch.norm(bm_mat_A, dim=-1).unsqueeze(-1)
bm_mat_B = bm_mat_B / torch.norm(bm_mat_B, dim=-1).unsqueeze(-1)<jupyter_output><empty_output><jupyter_text>Core NLI algorithms<jupyter_code>def trim_col_name(table_name, col_name):
if table_name == col_name:
return table_name + " name"
return col_name
def check_spell(col_name):
"""
Check whether a column name (multiwords allowed) is valid english word.
"""
return all([checker.check(w) for w in col_name.split(" ") if w != ""])
def batchify(pair_dict):
"""
form batch of a pair of ori-rpl.
Two directions.
"""
split_idx = []
batch_ori = []
batch_rpl = []
prev_end_idx = 0
for dic in pair_dict:
key_map, pairs, _, _ = dic.values()
split_idx.append((prev_end_idx, prev_end_idx + 2 * len(pairs),))
prev_end_idx = prev_end_idx + 2 * len(pairs) # 2 * because of reverse
for (ori, rpl) in pairs:
batch_ori.append(ori)
batch_ori.append(rpl) # reverse
batch_rpl.append(rpl)
batch_rpl.append(ori) # reverse
return split_idx, batch_ori, batch_rpl
def aggregate(split_idx, scores, strict=True):
contras, neus, ents = [],[],[]
for s,e in split_idx:
one_rpl_scores = scores[s:e,:]
if strict:
""" For REPLACE cols
Prefer high PRECISION of repalceablility! (If we REPLACE with a UNreplaceable col, we run into trouble)
Reject as many LOW confidence candidate as possible.
If NLI give HIGH ent-score, then two columns should almost always be mutally replaceable!
"""
contra, neu, ent = torch.min(one_rpl_scores, dim=0)[0].squeeze()
else:
""" For ADD cols
Prefer high RECALL of repalceablility! (If we ADD a replaceable col, we run into trouble)
Accept as many LOW confidence candidate as possible.
If NLI still suggests LOW ent-score, then two columns should almost always be mutally UNreplaceable!
"""
contra, neu, ent = torch.max(one_rpl_scores, dim=0)[0].squeeze() # Prefer high recall
contras.append(float(contra.item()))
neus.append(float(neu.item()))
ents.append(float(ent.item()))
return contras, neus, ents
def construct_pairs_for_nli_test(tables, table_id_key="table_id", table_name_key="table_name",
col_type_key="column_types", col_name_key="column_names",
pending_rpls_key="column_names_syn"):
"""
Given a list of dictionary-represneted tables, and pending replacements cols,
construct pairs of ori-rpl
:table_id_key: key name in table dict for table name
:col_type_key: key name in table dict for types of columns
:col_name_key: key name in table dict for names of columns
:pending_rpls_key: key name in table dict for pending keys to be replaced
"""
assert isinstance(tables, list), "Please pass a list of tables."
assert table_id_key in tables[0], "Each Table must have an id."
assert table_name_key in tables[0], "Table name is required, but the key is missing."
assert col_name_key in tables[0], "column name key is required but missing"
assert col_type_key in tables[0], "column type key is required but missing"
assert pending_rpls_key in tables[0], "Pending replacement columns is required, but the key is missing."
constructed_pairs = []
for i in trange(len(tables)):
tab = tables[i]
tname = tab[table_name_key] if tab[table_name_key] != "s" else tab[table_name_key][:-1]
pending_rpls = tab[pending_rpls_key]
col2type = {col: tp for col,tp in zip(tab[col_name_key], tab[col_type_key])}
for ori_col, rpl_col_list in pending_rpls.items():
for rpl_col in rpl_col_list:
if not check_spell(rpl_col): continue
rpl_dic = {"key_map": None, "pairs": [], "table_id": tab[table_id_key], "table_name": tab[table_name_key]}
for template in TEMPLATES:
sent_ori = template(tname, ori_col, col2type[ori_col])
sent_rpl = template(tname, rpl_col, col2type[ori_col])
rpl_dic["key_map"] = (ori_col, rpl_col,)
rpl_dic["pairs"] += ((sent_ori, sent_rpl,),)
constructed_pairs.append(rpl_dic)
return constructed_pairs
def nli_test_across_tables(constructed_pairs, batch_size=256):
"""
The major interface for NLI verification.
Given constructed pairs (results from construct_pairs_for_nli_test),
use batch computation to speed up the verfication process.
"""
assert batch_size % 8 == 0, "Batch size must be a multiple of 8."
results = []
completed_pairs = 0
total_batches = len(constructed_pairs) // batch_size + 1
pbar = tqdm(total = total_batches)
with torch.no_grad():
while completed_pairs < len(constructed_pairs):
batch_contras, batch_neus, batch_ents = [],[],[]
prev_completed = completed_pairs
completed_pairs = min(completed_pairs + batch_size, len(constructed_pairs))
batch = constructed_pairs[prev_completed:completed_pairs]
split_idx, batch_ori, batch_rpl = batchify(batch)
inputs = nli_tokenizer(batch_ori, batch_rpl, padding="longest", return_tensors="pt").to(device)
logits = nli_model(**inputs).logits
scores = softmax(logits, dim=1) # [batch, 3]
batch_contras, batch_neus, batch_ents = aggregate(split_idx, scores)
del inputs; del logits; del scores; torch.cuda.empty_cache()
batch_contras, batch_neus, batch_ents = np.array(batch_contras), np.array(batch_neus), np.array(batch_ents)
for b, c, n, e in zip(batch, batch_contras, batch_neus, batch_ents):
results.append({"key_map": b["key_map"], "scores": (c, n, e,)})
pbar.update(1)
pbar.close()
return results
def trim_name(text):
for ch in ['\\','`','*','{','}','[',']','(',')','>', '<', '#','+','\'', '"']:
if ch in text:
text = text.replace(ch, "")
text.replace("-", " ")
text.replace(".", " ")
return text
def extract_emb(list_of_names):
"""
Extract numberbatch word embeddings for a given list of strings
"""
assert isinstance(list_of_names, list), "Expected list as input"
output_matrix = np.zeros([len(list_of_names), EMB_DIM])
for i,name in enumerate(list_of_names):
name = trim_name(name)
units = name.split() # notice "_" is covered by our nb_emb!
name_emb = np.zeros(EMB_DIM)
for word in units:
if "_" in word and word2vec.get(word, None) is None:
sub_words = word.split("_")
local_emb_mat = extract_emb(sub_words)
emb = np.mean(local_emb_mat, axis=0)
else:
emb = word2vec.get(word, np.zeros(EMB_DIM))
name_emb += emb
name_emb /= len(units)
output_matrix[i,:] = name_emb
return output_matrix
def reranker(tgt_names, cand_names, topk=10):
"""
Do reranking (usually among few hundreds of candidates) and return topk per numberbatch word2vec similarity
"""
if len(cand_names) == 0:
return {}
tgt_mat = extract_emb(tgt_names)
cand_mat = extract_emb(cand_names)
sim_mat = tgt_mat @ cand_mat.T
topk = min(len(cand_names), topk)
top_scores, top_idx = [v.squeeze().numpy() for v in torch.topk(torch.Tensor(sim_mat), topk, dim=-1)]
rec_dic = {}
for i, tgt in enumerate(tgt_names):
if len(top_idx.shape) == 0: top_idx = np.array([top_idx])
if len(top_idx.shape) == 1: top_idx = top_idx[None, :]
rec_dic[tgt] = [cand_names[idx] for idx in top_idx[i]]
return rec_dic
def retriver(query_table, queries=None, retrieve_strategy="query_dense", topk_tables=50, col_name_key="column_names", target_expand_keys=100):
"""
Tapas based dense retrieval for finding topk most similar tabels from table base.
:query_table: The table whose topk similar will be found
:quries: the user NL queries attached with the query_table
:retrieve_strategy: Choose from ["query_dense", "table_dense"], qd uses NL query as retrieval query vector,
and td uses table as retreival query vector.
:topk_tables: Return k most similar tables
"""
ori_cols = set(query_table[col_name_key])
top_tables_tid = []
if retrieve_strategy == "query_dense":
top_tables_tid = retrieve_tables_query_dense(queries,k=topk_tables)
elif retrieve_strategy == "table_dense":
pass
elif retrieve_strategy == "tfidf":
pass
else:
raise NotImplementedError
top_tables = [tid2tables[tid] for tid in top_tables_tid]
expanded_cols = set()
for t in top_tables:
if len(expanded_cols) >= target_expand_keys: break
expanded_cols = expanded_cols.union(t[col_name_key])
expanded_cols = expanded_cols.difference(ori_cols)
return list(ori_cols), list(expanded_cols)
def retrieve_tables_tfidf(query_table, tfidf_mat, col_name_key="column_names", table_doc_key="doc"):
"""
TF-IDF based retrieval for finding most similar tables from DB.
"""
assert table_doc_key in query_table and col_name_key in query_table
query_tfidf = vectorizer.transform(query_table)
scores = cosine_similarity(query_tfidf, tfidf_mat)[0]
top_scores, indices = [t.squeeze().numpy() for t in torch.topk(torch.Tensor(scores), 1000)]
return [idx2tid[i] for i in indices]
def retrieve_tables_query_dense(queries, k=50):
"""'
Interface for finding most similar table via dense retrieval. call goes from here.
"""
assert isinstance(queries, list), "input queries must be a list of strings"
torch.cuda.empty_cache()
with torch.no_grad():
q_inputs = tapas_tokenizer(table=DUMMY_TABLE, queries=queries, padding=True, truncation=True, return_tensors="pt").to(device)
qb = query_model(**q_inputs).pooler_output
qb = text_projection_layer(qb)
qb = qb / torch.norm(qb, dim=-1).unsqueeze(-1)
cos = torch.matmul(qb, bm_mat_B.transpose(0, 1))
cos = torch.mean(cos, dim=0)
top_score, top_idx = [v.data.cpu().numpy() for v in torch.topk(cos, k=k)]
# top_idx = top_idx.data.cpu().numpy()
#### ab means table encoded with encoder A, query encoded with encoder B.
return [idx2tid[i] for i in top_idx]
checker = enchant.Dict("en_US")
def _ends_with_id(string):
if len(string) < 2: return False
return string[-2:].lower() == "id"
def _fill_type_info(string, col_type, delim):
"""
Add type description for a given column
"""
if col_type == "date" and not any([dm in string for dm in date_marks]):
return delim.join([string, "time"])
if col_type == "number" and not any([nm in string for nm in num_marks]):
return delim.join([string, "number"])
return string
def contains_number(text):
"""
Judege whether the passed string contain number
"""
return len(re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", text)) > 0
def _get_replacement(tok1, tok2, tok1_is_reserved, tok2_is_reserved):
"""
Given a bi-gram, replce the word whose IDF is higher with its synonym.
"""
if tok1_is_reserved and tok2_is_reserved:
return (None, None)
if tok1_is_reserved and (not check_spell(tok2) or contains_number(tok2)):
return (None, None)
if tok2_is_reserved and (not check_spell(tok1) or contains_number(tok1)):
return (None, None)
if tok1_is_reserved:
syn_dic = synonym_dic.get(tok2.lower(), None)
return (tok2, syn_dic) if syn_dic is not None else (None, None)
if tok2_is_reserved:
syn_dic = synonym_dic.get(tok1.lower(), None)
return (tok1, syn_dic) if syn_dic is not None else (None, None)
# both are not reserved, pick one with higher tfidf val
def extract_idf(vocab):
vocab_idx = vectorizer.vocabulary_.get(vocab, None)
idf = 0 if vocab_idx is None else vectorizer.idf_[vocab_idx]
return idf
first_tgt = tok1 if extract_idf(tok1) <= extract_idf(tok2) else tok2 # rare is better
second_tgt = tok2 if first_tgt == tok1 else tok1
syn_dic_first = synonym_dic.get(first_tgt.lower(), None)
if syn_dic_first is not None: return (first_tgt, syn_dic_first)
syn_dic_second = synonym_dic.get(second_tgt.lower(), None)
return (second_tgt, syn_dic_second) if syn_dic_second is not None else (None, None)<jupyter_output><empty_output><jupyter_text>REPLACE & ADD Interface<jupyter_code>def normalize_token(token):
"""
Do strict noramlization for a given token. All punctuations will be removed.
"""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(token))))
def trim_retrieval_results(replacement_dict):
"""
All overly short (char len < 4) / misspelled / contains numbers tokens are not replaceable. Filter them out.
"""
out_dic = deepcopy(replacement_dict)
for col in replacement_dict.keys():
replacements = replacement_dict[col]
filter_rpls = [r for r in replacements if len(r) > 4]
out_dic[col] = filter_rpls
return out_dic
def consider_rpl(token):
"""
Judge whether a token is suitable for replacement.
All overly short (char len < 4) / misspelled / contains numbers col are not replaceable.
"""
norm_token = normalize_token(token)
if len(norm_token) < 4: return False, token
if contains_number(norm_token): return False, token
if not check_spell(norm_token): return False, token
return True, norm_token<jupyter_output><empty_output><jupyter_text>core functions<jupyter_code>def replace_and_add_for_give_tables(path, table2qs, batch_size=512,
replace_threshold=0.75,
add_threshold=0.4,
output_prefix="", output_dir="./processed_data",
delim=" ",
topk_tables=50,
max_cands_per_col=10,
table_name_key="table_name",
col_type_key="column_types",
col_name_key="column_names"):
"""
The highest-level interface for replacement and addition across all tables stored in a given path.
One call, handle all.
:path: Target tables path
:table2qs: The queries corresponding to the each of the tables.
:batch_size: bsz for NLI checking. 512 recommended.
:replace_threshold: If NLI entailment score is higher than this threshold under STRICT mode, then the rpl pair is accepted.
:add_treshold: If NLI entailment score is lower than this threshold under LOOSE mode, then the add pair is accepted.
:output_prefix: File name prefix for output file.
:output_dir: Output file directory.
:delim: Delimiator for column names. Single white space by default.
:topk_tables: How many most similar tables to consider from dense retrieval.
:max_cands_per_col: Max nubmer of pairs to be considered for each column (both add and rpl).
This directly influences the final amount to be checked by NLI.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# STEP 0: Prepare tables
print("STEP 0 : Prepare tables...\n")
tables = []
tables_template = {}
cnt = 0
with open(path, "r") as f:
for line in f.readlines():
table = json.loads(line)
table_copy = deepcopy(table)
table_copy["rpls_retrieval"] = {}
table_copy["rpls_syndict"] = {}
tables.append(table_copy)
table["REPLACE"] = {tname:[] for tname in table[col_name_key]}
table["ADD"] = {tname:[] for tname in table[col_name_key]}
tables_template[table["table_id"]] = table
# if cnt == 5: break
cnt += 1
# STEP 1: "retrieval" for add / replacement
print('STEP 1: Dense retrieval for add / replacement...\n')
for tab in tqdm(tables, position=0):
tid = tab["table_id"]
queries = table2qs.get(tid, None)
if queries is not None:
queries = queries[:10] if len(queries) > 10 else queries
ori_cols, expanded_cols = retriver(query_table=tab, queries=queries, topk_tables=topk_tables)
rec_dic = reranker(ori_cols, expanded_cols, topk=max_cands_per_col) # We will find analog & synonyms in this list
tab["rpls_retrieval"] = trim_retrieval_results(rec_dic)
# STEP 2 : synonym dict for replacement
print('STEP 2 : synonym dict for replacement...\n')
for tab in tqdm(tables, position=1):
if isinstance(tab["column_types"], list):
col2type = {c:t for c,t in zip(tab["column_names"], tab["column_types"])}
else:
col2type = tab["column_types"]
for col in tab["column_names"]:
col_type = col2type.get(col, "text")
if _ends_with_id(col): continue
tokens = [w.lower() for w in col.split(delim)]
keep_original, normalized_tokens = [], []
tok2syn = {}
for tok in tokens:
can_rpl, tok = consider_rpl(tok)
normalized_tokens.append(tok)
syn_dic = synonym_dic.get(tok, None)
if syn_dic is None: syn_dic = synonym_dic.get(get_singular_word(tok), None)
keep_ori = (not can_rpl) or (syn_dic is None) # skip eihter because not replaceable or not in dic
keep_original.append(keep_ori)
if keep_ori == True: continue
rec_dic = reranker(tgt_names=[tok], cand_names=list(set(syn_dic["synonyms"])), topk=10)
tok2syn.update(rec_dic)
syn_rpl_candidates = set() # Genereate syn-replaced candidates
patience = 5 # if 5 in steps there is no new candidate added, break the loop.
while True:
if len(syn_rpl_candidates) >= max_cands_per_col or patience == 0:
syn_rpl_candidates = list(syn_rpl_candidates.difference(set([" ".join(normalized_tokens)])))
break
rpl_threshold = 1 if len(tokens) == 1 else (0.75 if len(tokens) == 2 else 0.5)
do_rpl_coins = np.random.rand(len(tokens)) <= rpl_threshold # only keep original for 20% of time
new_cand = []
for i, tok in enumerate(normalized_tokens):
if not keep_original[i] and do_rpl_coins[i]:
all_syns = tok2syn.get(tok, [tok])
syn = np.random.choice(all_syns)
new_cand.append(syn)
else:
new_cand.append(tok)
new_cand = delim.join(new_cand)
len_before = len(syn_rpl_candidates)
syn_rpl_candidates.add(new_cand)
if len(syn_rpl_candidates) > len_before:
patience = 5
else:
patience -= 1
tab["rpls_syndict"].update({col : syn_rpl_candidates})
# #STEP 3: filter syn dict replacement with NLI
print('STEP 3: filter syn dict replacement with NLI...\n')
STRICT_MODE = True
constructed_pairs_rpl_syndict = construct_pairs_for_nli_test(tables, pending_rpls_key="rpls_syndict",
table_name_key=table_name_key,
col_type_key=col_type_key,
col_name_key=col_name_key)
results_rpl_syndict = nli_test_across_tables(constructed_pairs_rpl_syndict, batch_size=batch_size)
for i, dic in enumerate(results_rpl_syndict):
table_id = constructed_pairs_rpl_syndict[i]["table_id"]
table = tables_template[table_id]
ent = dic["scores"][2]
if ent >= replace_threshold:
ori, rpl = dic["key_map"]
# print(f"{ori} -> {rpl}")
table["REPLACE"][ori].append(rpl) # update REPLACE key
# print(results_rpl_syndict)
# STEP 4: filter retrieval replacement with NLI
print('STEP 4: filter retrieval replacement with NLI...\n')
STRICT_MODE = True
constructed_pairs_rpl_retrieval = construct_pairs_for_nli_test(tables, pending_rpls_key="rpls_retrieval",
table_name_key=table_name_key,
col_type_key=col_type_key,
col_name_key=col_name_key)
results_rpl_retrieval = nli_test_across_tables(constructed_pairs_rpl_retrieval, batch_size=batch_size)
for i, dic in enumerate(results_rpl_retrieval):
table_id = constructed_pairs_rpl_retrieval[i]["table_id"]
table = tables_template[table_id]
ent = dic["scores"][2]
if ent >= replace_threshold:
ori, rpl = dic["key_map"]
table["REPLACE"][ori].append(rpl) # update REPLACE key
# STEP 5: prune all replaceable from retrieval results & filter substring overlap from original col
print('STEP 5: prune all replaceable from retrieval results & filter substring overlap from original col...\n')
for table in tables:
tid = table["table_id"]
all_columns = table[col_name_key]
rpl_dict = tables_template[tid]["REPLACE"]
for rpl_col in table["rpls_retrieval"].keys():
rpl_candidates = table["rpls_retrieval"][rpl_col]
add_candidates = [] # ADD operation candidates comes from here
for rpl in rpl_candidates:
if any([rpl in c for c in all_columns]) or any([rpl in c for c in rpl_dict[rpl_col]]):
continue
add_candidates.append(rpl)
table["rpls_retrieval"][rpl_col] = add_candidates
# print(tables[8]["rpls_retrieval"])
# STEP 6 : filter leftover retrieval ADD candidates with NLI
print('STEP 6 : filter leftover retrieval ADD candidates with NLI\n')
STRICT_MODE = False
constructed_pairs_add_retrieval = construct_pairs_for_nli_test(tables, pending_rpls_key="rpls_retrieval",
table_name_key=table_name_key,
col_type_key=col_type_key,
col_name_key=col_name_key)
results_add_retrieval = nli_test_across_tables(constructed_pairs_add_retrieval, batch_size=batch_size)
for i, dic in enumerate(results_add_retrieval):
table_id = constructed_pairs_add_retrieval[i]["table_id"]
table = tables_template[table_id]
ent = dic["scores"][2]
if ent <= add_threshold:
ori, rpl = dic["key_map"]
table["ADD"][ori].append(rpl) # update ADD key
# print(tables_template["SPIDER_8"]["ADD"])
# STEP 7: Write replace + add results to new file
print('STEP 7: Write REPLACE & ADD results to new file \n')
with open(f"{output_dir}/{output_prefix}-pipeline-output.jsonl", "w") as f:
for table in tables_template.values():
json.dump(table, f)
f.write("\n")<jupyter_output><empty_output><jupyter_text>Leave For Running Spider<jupyter_code>replace_and_add_for_give_tables("./data/spider/spider-tables.jsonl",
table2qs=spider_table2qs,
output_prefix="spider",
table_name_key="table_name",
replace_threshold=0.50,
batch_size=512)<jupyter_output><empty_output><jupyter_text>WTQ<jupyter_code>replace_and_add_for_give_tables("./data/WTQ/wtq-tables.jsonl",
table2qs=wtq_table2qs,
output_prefix="wtq",
table_name_key="pred_table_name",
replace_threshold=0.50,
batch_size=512)<jupyter_output><empty_output><jupyter_text>WikiSQL<jupyter_code>replace_and_add_for_give_tables("./data/wikisql/wikisql-tables.jsonl",
table2qs=wsql_train_table2qs,
output_prefix="wsql-train",
table_name_key="pred_table_name",
replace_threshold=0.70,
batch_size=128)<jupyter_output><empty_output> | ContextualSP/robustness_of_text_to_sql/CTA/pipeline.ipynb/0 | {
"file_path": "ContextualSP/robustness_of_text_to_sql/CTA/pipeline.ipynb",
"repo_id": "ContextualSP",
"token_count": 15976
} | 275 |
# Robustness of Text-to-SQL Models
This repository contains the data and code in the following paper:
> [**Towards Robustness of Text-to-SQL Models Against Natural and Realistic Adversarial Table Perturbation**](https://aclanthology.org/2022.acl-long.142.pdf) <br/>
> Xinyu Pi*, Bing Wang*, Yan Gao, Jiaqi Guo, Zhoujun Li, Jian-Guang Lou<br/>
> ACL 2022 Long Papers
## Introduction
This repository is the official implementation of our paper *Towards Robustness of Text-to-SQL Models Against Natural and Realistic Adversarial Table Perturbation*. In this paper, we curate **ADVETA**, the first robustness evaluation benchmark featuring natural and realistic adversarial table perturbation. To defend against this perturbation, we build a systematic adversarial training example generation framework **CTA**, tailored for better contextualization of tabular data.
## ADVETA
<img src="misc/ATP.png" height=450>
We manually curate the **ADVE**rsarial **T**able perturb**A**tion
(ADVETA) benchmark based on three mainstream Text-to-SQL datasets, Spider, WikiSQL and WTQ.
For each table from the original development set, we conduct RPL/ADD annotation separately, perturbing only table columns. We release our data in `adveta_1.0.zip` file.
## CTA
<img src="misc/CTA.png" height=400>
### Requirement
- python: 3.8
- cuda: 10.1
- torch: 1.7.1
install dependencies:
```bash
conda create -n cta python=3.8 -y
conda activate cta
conda install pytorch==1.7.1 cudatoolkit=10.1 -c pytorch -y
python -m spacy download en_core_web_sm
pip install -r requirements.txt
```
### Introduction
Contextualized Table Augmentation (CTA) framework as an adversarial training example generation approach tailored for tabular data. Before you run `pipeline.ipynb`, you should download data files and checkpoints from [Google Drive](https://drive.google.com/file/d/1HqP1P5QqytGZTM_Kx8Bbq0EuyKaD9raV/view?usp=sharing).
notes:
- We download number-batch word embedding from [here](https://conceptnet.s3.amazonaws.com/downloads/2019/numberbatch/numberbatch-en-19.08.txt.gz) as `./data/nb_emb.txt`.
- We pre-compute processed-WDC tables using Tapas dense retrieval models. Store output to `./wdc/wdc_dense_A.txt` and `./wdc/wdc_dense_B.txt` (Tapas have two encoders).
### Run
Just run the `pipeline.ipynb` and have fun.
## Cite
```
@inproceedings{pi-etal-2022-towards,
title = "Towards Robustness of Text-to-{SQL} Models Against Natural and Realistic Adversarial Table Perturbation",
author = "Pi, Xinyu and Wang, Bing and Gao, Yan and Guo, Jiaqi and Li, Zhoujun and Lou, Jian-Guang",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.acl-long.142",
pages = "2007--2022"
}
``` | ContextualSP/robustness_of_text_to_sql/README.md/0 | {
"file_path": "ContextualSP/robustness_of_text_to_sql/README.md",
"repo_id": "ContextualSP",
"token_count": 927
} | 276 |
set seed=1
set config_file=train_configs/concat.none.jsonnet
set model_file=checkpoints_sparc/sparc_concat_none_model
set tables_file=dataset_sparc/tables.json
set database_path=dataset_sparc/database
set dataset_path=dataset_sparc
set train_data_path=dataset_sparc/train.json
set validation_data_path=dataset_sparc/dev.json
set pretrained_file=glove/glove.twitter.27B.100d.txt
allennlp train -s %model_file% %config_file% ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
-o {"""model.serialization_dir""":"""%model_file%""","""random_seed""":"""%seed%""","""numpy_seed""":"""%seed%""","""pytorch_seed""":"""%seed%""","""dataset_reader.tables_file""":"""%tables_file%""","""dataset_reader.database_path""":"""%database_path%""","""train_data_path""":"""%train_data_path%""","""validation_data_path""":"""%validation_data_path%""","""model.text_embedder.tokens.pretrained_file""":"""%pretrained_file%""","""model.dataset_path""":"""%dataset_path%"""} | ContextualSP/semantic_parsing_in_context/bash_files/windows/train_sparc.bat/0 | {
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/windows/train_sparc.bat",
"repo_id": "ContextualSP",
"token_count": 384
} | 277 |
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor
# WARNING: Do not exclude these imports
from predictor.sparc_predictor import SparcPredictor
from dataset_reader.sparc_reader import SparcDatasetReader
from models.sparc_parser import SparcParser
class PredictManager:
def __init__(self, archive_file, tables_file, database_path):
overrides = "{\"dataset_reader.tables_file\":\"" + tables_file + "\",\"dataset_reader.database_path\":" +\
"\"" + database_path + "\"}"
archive = load_archive(archive_file,
overrides=overrides)
self.predictor = Predictor.from_archive(
archive, predictor_name="sparc")
def predict_result(self, ques_inter: str, ques_database: str):
param = {
"database_id": ques_database,
"question": ques_inter
}
restate = self.predictor.predict_json(param)["best_predict_sql"]
return restate
if __name__ == '__main__':
manager = PredictManager(archive_file="model.tar.gz",
tables_file="dataset_sparc/tables.json",
database_path="dataset_sparc/database")
# the input dialogue is separate by `;`, and the second argument is database_id
result = manager.predict_result("What are all the airlines;Of these, which is Jetblue Airways", "flight_2")
print(result)
| ContextualSP/semantic_parsing_in_context/predict.py/0 | {
"file_path": "ContextualSP/semantic_parsing_in_context/predict.py",
"repo_id": "ContextualSP",
"token_count": 605
} | 278 |
# pylint: disable=anomalous-backslash-in-string
"""
A ``Text2SqlTableContext`` represents the SQL context in which an utterance appears
for the any of the text2sql datasets, with the grammar and the valid actions.
"""
from typing import List, Dict
from dataset_readers.dataset_util.spider_utils import Table
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(query ws iue ws query)', '(query ws)']
GRAMMAR_DICTIONARY["iue"] = ['"intersect"', '"except"', '"union"']
GRAMMAR_DICTIONARY["query"] = ['(ws select_core ws groupby_clause ws orderby_clause ws limit)',
'(ws select_core ws groupby_clause ws orderby_clause)',
'(ws select_core ws groupby_clause ws limit)',
'(ws select_core ws orderby_clause ws limit)',
'(ws select_core ws groupby_clause)',
'(ws select_core ws orderby_clause)',
'(ws select_core)']
GRAMMAR_DICTIONARY["select_core"] = ['(select_with_distinct ws select_results ws from_clause ws where_clause)',
'(select_with_distinct ws select_results ws from_clause)',
'(select_with_distinct ws select_results ws where_clause)',
'(select_with_distinct ws select_results)']
GRAMMAR_DICTIONARY["select_with_distinct"] = ['(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = ['(ws select_result ws "," ws select_results)', '(ws select_result)']
GRAMMAR_DICTIONARY["select_result"] = ['"*"', '(table_source ws ".*")',
'expr', 'col_ref']
GRAMMAR_DICTIONARY["from_clause"] = ['(ws "from" ws table_source ws join_clauses)',
'(ws "from" ws source)']
GRAMMAR_DICTIONARY["join_clauses"] = ['(join_clause ws join_clauses)', 'join_clause']
GRAMMAR_DICTIONARY["join_clause"] = ['"join" ws table_source ws "on" ws join_condition_clause']
GRAMMAR_DICTIONARY["join_condition_clause"] = ['(join_condition ws "and" ws join_condition_clause)', 'join_condition']
GRAMMAR_DICTIONARY["join_condition"] = ['ws col_ref ws "=" ws col_ref']
GRAMMAR_DICTIONARY["source"] = ['(ws single_source ws "," ws source)', '(ws single_source)']
GRAMMAR_DICTIONARY["single_source"] = ['table_source', 'source_subq']
GRAMMAR_DICTIONARY["source_subq"] = ['("(" ws query ws ")")']
# GRAMMAR_DICTIONARY["source_subq"] = ['("(" ws query ws ")" ws "as" ws name)', '("(" ws query ws ")")']
GRAMMAR_DICTIONARY["limit"] = ['("limit" ws non_literal_number)']
GRAMMAR_DICTIONARY["where_clause"] = ['(ws "where" wsp expr ws where_conj)', '(ws "where" wsp expr)']
GRAMMAR_DICTIONARY["where_conj"] = ['(ws "and" wsp expr ws where_conj)', '(ws "and" wsp expr)']
GRAMMAR_DICTIONARY["groupby_clause"] = ['(ws "group" ws "by" ws group_clause ws "having" ws expr)',
'(ws "group" ws "by" ws group_clause)']
GRAMMAR_DICTIONARY["group_clause"] = ['(ws expr ws "," ws group_clause)', '(ws expr)']
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "order" ws "by" ws order_clause']
GRAMMAR_DICTIONARY["order_clause"] = ['(ordering_term ws "," ws order_clause)', 'ordering_term']
GRAMMAR_DICTIONARY["ordering_term"] = ['(ws expr ws ordering)', '(ws expr)']
GRAMMAR_DICTIONARY["ordering"] = ['(ws "asc")', '(ws "desc")']
GRAMMAR_DICTIONARY["col_ref"] = ['(table_name ws "." ws column_name)', 'column_name']
GRAMMAR_DICTIONARY["table_source"] = ['(table_name ws "as" ws table_alias)', 'table_name']
GRAMMAR_DICTIONARY["table_name"] = ["table_alias"]
GRAMMAR_DICTIONARY["table_alias"] = ['"t1"', '"t2"', '"t3"', '"t4"']
GRAMMAR_DICTIONARY["column_name"] = []
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["expr"] = ['in_expr',
# Like expressions.
'(value wsp "like" wsp string)',
# Between expressions.
'(value ws "between" wsp value ws "and" wsp value)',
# Binary expressions.
'(value ws binaryop wsp expr)',
# Unary expressions.
'(unaryop ws expr)',
'source_subq',
'value']
GRAMMAR_DICTIONARY["in_expr"] = ['(value wsp "not" wsp "in" wsp string_set)',
'(value wsp "in" wsp string_set)',
'(value wsp "not" wsp "in" wsp expr)',
'(value wsp "in" wsp expr)']
GRAMMAR_DICTIONARY["value"] = ['parenval', '"YEAR(CURDATE())"', 'number', 'boolean',
'function', 'col_ref', 'string']
GRAMMAR_DICTIONARY["parenval"] = ['"(" ws expr ws ")"']
GRAMMAR_DICTIONARY["function"] = ['(fname ws "(" ws "distinct" ws arg_list_or_star ws ")")',
'(fname ws "(" ws arg_list_or_star ws ")")']
GRAMMAR_DICTIONARY["arg_list_or_star"] = ['arg_list', '"*"']
GRAMMAR_DICTIONARY["arg_list"] = ['(expr ws "," ws arg_list)', 'expr']
# TODO(MARK): Massive hack, remove and modify the grammar accordingly
# GRAMMAR_DICTIONARY["number"] = ['~"\d*\.?\d+"i', "'3'", "'4'"]
GRAMMAR_DICTIONARY["non_literal_number"] = ['"1"', '"2"', '"3"', '"4"']
GRAMMAR_DICTIONARY["number"] = ['ws "value" ws']
GRAMMAR_DICTIONARY["string_set"] = ['ws "(" ws string_set_vals ws ")"']
GRAMMAR_DICTIONARY["string_set_vals"] = ['(string ws "," ws string_set_vals)', 'string']
# GRAMMAR_DICTIONARY["string"] = ['~"\'.*?\'"i']
GRAMMAR_DICTIONARY["string"] = ['"\'" ws "value" ws "\'"']
GRAMMAR_DICTIONARY["fname"] = ['"count"', '"sum"', '"max"', '"min"', '"avg"', '"all"']
GRAMMAR_DICTIONARY["boolean"] = ['"true"', '"false"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"+"', '"-"', '"*"', '"/"', '"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"and"', '"or"', '"like"']
GRAMMAR_DICTIONARY["unaryop"] = ['"+"', '"-"', '"not"', '"not"']
def update_grammar_with_tables(grammar_dictionary: Dict[str, List[str]],
schema: Dict[str, Table]) -> None:
table_names = sorted([f'"{table.lower()}"' for table in
list(schema.keys())], reverse=True)
grammar_dictionary['table_name'] += table_names
all_columns = set()
for table in schema.values():
all_columns.update([f'"{table.name.lower()}@{column.name.lower()}"' for column in table.columns if column.name != '*'])
sorted_columns = sorted([column for column in all_columns], reverse=True)
grammar_dictionary['column_name'] += sorted_columns
def update_grammar_to_be_table_names_free(grammar_dictionary: Dict[str, List[str]]):
"""
Remove table names from column names, remove aliases
"""
grammar_dictionary["column_name"] = []
grammar_dictionary["table_name"] = []
grammar_dictionary["col_ref"] = ['column_name']
grammar_dictionary["table_source"] = ['table_name']
del grammar_dictionary["table_alias"]
def update_grammar_flip_joins(grammar_dictionary: Dict[str, List[str]]):
"""
Remove table names from column names, remove aliases
"""
# using a simple rule such as join_clauses-> [(join_clauses ws join_clause), join_clause]
# resulted in a max recursion error, so for now just using a predefined max
# number of joins
grammar_dictionary["join_clauses"] = ['(join_clauses_1 ws join_clause)', 'join_clause']
grammar_dictionary["join_clauses_1"] = ['(join_clauses_2 ws join_clause)', 'join_clause']
grammar_dictionary["join_clauses_2"] = ['(join_clause ws join_clause)', 'join_clause'] | ContextualSP/unified_parser_text_to_sql/semparse/contexts/spider_db_grammar.py/0 | {
"file_path": "ContextualSP/unified_parser_text_to_sql/semparse/contexts/spider_db_grammar.py",
"repo_id": "ContextualSP",
"token_count": 3733
} | 279 |
import os, sys
import json
import sqlite3
import traceback
import argparse
import tqdm
from ..process_sql import get_sql
from .schema import Schema, get_schemas_from_json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True)
parser.add_argument("--tables", required=True)
parser.add_argument("--output", required=True)
args = parser.parse_args()
sql_path = args.input
output_file = args.output
table_file = args.tables
schemas, db_names, tables = get_schemas_from_json(table_file)
with open(sql_path) as inf:
sql_data = json.load(inf)
sql_data_new = []
for data in tqdm.tqdm(sql_data):
try:
db_id = data["db_id"]
schema = schemas[db_id]
table = tables[db_id]
schema = Schema(schema, table)
sql = data["query"]
sql_label = get_sql(schema, sql)
data["sql"] = sql_label
sql_data_new.append(data)
except:
print("db_id: ", db_id)
print("sql: ", sql)
raise
with open(output_file, "wt") as out:
json.dump(sql_data_new, out, sort_keys=True, indent=4, separators=(",", ": "))
| ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/parse_raw_json.py/0 | {
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/parse_raw_json.py",
"repo_id": "ContextualSP",
"token_count": 568
} | 280 |
from PIL import Image
import io
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class ImageNet_Withhold(Dataset):
def __init__(self, data_root, ann_file='', transform=None, train=True, task ='train'):
super(ImageNet_Withhold, self).__init__()
ann_file = ann_file + '/' + 'val_true.txt'
train_split = (task == 'train' or task == 'val')
self.data_root = data_root + '/'+ ('train' if train_split else 'val')
self.data = []
self.nb_classes = 0
folders = {}
cnt = 0
self.z = ZipReader()
# if train:
# for member in self.tarfile.getmembers():
# print(member)
# self.tarfile = tarfile.open(self.data_root)
f = open(ann_file)
prefix = 'data/sdb/imagenet'+'/'+ ('train' if train_split else 'val') + '/'
for line in f:
tmp = line.strip().split('\t')[0]
class_pic = tmp.split('/')
class_tmp = class_pic[0]
pic = class_pic[1]
if class_tmp in folders:
# print(self.tarfile.getmember(('train/' if train else 'val/') + tmp[0] + '.JPEG'))
self.data.append((class_tmp + '.zip', prefix + tmp + '.JPEG', folders[class_tmp]))
else:
folders[class_tmp] = cnt
cnt += 1
self.data.append((class_tmp + '.zip', prefix + tmp + '.JPEG',folders[class_tmp]))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if transform is not None:
self.transforms = transform
else:
if train:
self.transforms = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
self.transforms = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
self.nb_classes = cnt
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
# print('extract_file', time.time()-start_time)
iob = self.z.read(self.data_root + '/' + self.data[idx][0], self.data[idx][1])
iob = io.BytesIO(iob)
img = Image.open(iob).convert('RGB')
target = self.data[idx][2]
if self.transforms is not None:
img = self.transforms(img)
# print('open', time.time()-start_time)
return img, target
| Cream/AutoFormer/lib/imagenet_withhold.py/0 | {
"file_path": "Cream/AutoFormer/lib/imagenet_withhold.py",
"repo_id": "Cream",
"token_count": 1420
} | 281 |
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
from ..runner import load_checkpoint
from .weight_init import constant_init, kaiming_init
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False):
"""Bottleneck block.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
if style == 'pytorch':
conv1_stride = 1
conv2_stride = stride
else:
conv1_stride = stride
conv2_stride = 1
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
with_cp=False):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
return nn.Sequential(*layers)
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
frozen_stages=-1,
bn_eval=True,
bn_frozen=False,
with_cp=False):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
assert num_stages >= 1 and num_stages <= 4
block, stage_blocks = self.arch_settings[depth]
stage_blocks = stage_blocks[:num_stages]
assert len(strides) == len(dilations) == num_stages
assert max(out_indices) < num_stages
self.out_indices = out_indices
self.style = style
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_cp = with_cp
self.inplanes = 64
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_layers = []
for i, num_blocks in enumerate(stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp)
self.inplanes = planes * block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
if mode and self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
self.bn1.eval()
self.bn1.weight.requires_grad = False
self.bn1.bias.requires_grad = False
for i in range(1, self.frozen_stages + 1):
mod = getattr(self, 'layer{}'.format(i))
mod.eval()
for param in mod.parameters():
param.requires_grad = False
| Cream/CDARTS/CDARTS_detection/mmcv/cnn/resnet.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/cnn/resnet.py",
"repo_id": "Cream",
"token_count": 5249
} | 282 |
import numpy as np
from .colorspace import bgr2rgb, rgb2bgr
def imnormalize(img, mean, std, to_rgb=True):
img = img.astype(np.float32)
if to_rgb:
img = bgr2rgb(img)
return (img - mean) / std
def imdenormalize(img, mean, std, to_bgr=True):
img = (img * std) + mean
if to_bgr:
img = rgb2bgr(img)
return img
| Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/normalize.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/normalize.py",
"repo_id": "Cream",
"token_count": 167
} | 283 |
class Hook(object):
def before_run(self, runner):
pass
def after_run(self, runner):
pass
def before_epoch(self, runner):
pass
def after_epoch(self, runner):
pass
def before_iter(self, runner):
pass
def after_iter(self, runner):
pass
def before_train_epoch(self, runner):
self.before_epoch(runner)
def before_val_epoch(self, runner):
self.before_epoch(runner)
def after_train_epoch(self, runner):
self.after_epoch(runner)
def after_val_epoch(self, runner):
self.after_epoch(runner)
def before_train_iter(self, runner):
self.before_iter(runner)
def before_val_iter(self, runner):
self.before_iter(runner)
def after_train_iter(self, runner):
self.after_iter(runner)
def arch_after_train_iter(self, runner):
self.after_iter(runner)
def after_val_iter(self, runner):
self.after_iter(runner)
def every_n_epochs(self, runner, n):
return (runner.epoch + 1) % n == 0 if n > 0 else False
def every_n_inner_iters(self, runner, n):
return (runner.inner_iter + 1) % n == 0 if n > 0 else False
def every_n_iters(self, runner, n):
return (runner.iter + 1) % n == 0 if n > 0 else False
def end_of_epoch(self, runner):
return runner.inner_iter + 1 == len(runner.data_loader)
| Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/hook.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/hook.py",
"repo_id": "Cream",
"token_count": 608
} | 284 |
from .config import ConfigDict, Config
from .misc import (is_str, iter_cast, list_cast, tuple_cast, is_seq_of,
is_list_of, is_tuple_of, slice_list, concat_list,
check_prerequisites, requires_package, requires_executable)
from .path import (is_filepath, fopen, check_file_exist, mkdir_or_exist,
symlink, scandir, FileNotFoundError)
from .progressbar import ProgressBar, track_progress, track_parallel_progress
from .timer import Timer, TimerError, check_time
__all__ = [
'ConfigDict', 'Config', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'FileNotFoundError', 'ProgressBar', 'track_progress',
'track_parallel_progress', 'Timer', 'TimerError', 'check_time'
]
| Cream/CDARTS/CDARTS_detection/mmcv/utils/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/utils/__init__.py",
"repo_id": "Cream",
"token_count": 387
} | 285 |
from .color import Color, color_val
from .image import imshow, imshow_bboxes, imshow_det_bboxes
from .optflow import flowshow, flow2rgb, make_color_wheel
__all__ = [
'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes',
'flowshow', 'flow2rgb', 'make_color_wheel'
]
| Cream/CDARTS/CDARTS_detection/mmcv/visualization/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/visualization/__init__.py",
"repo_id": "Cream",
"token_count": 112
} | 286 |
from .anchor_generator import AnchorGenerator
from .anchor_target import anchor_target, anchor_inside_flags
from .guided_anchor_target import ga_loc_target, ga_shape_target
__all__ = [
'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target',
'ga_shape_target'
]
| Cream/CDARTS/CDARTS_detection/mmdet/core/anchor/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/anchor/__init__.py",
"repo_id": "Cream",
"token_count": 103
} | 287 |
import numpy as np
import torch
from .random_sampler import RandomSampler
class InstanceBalancedPosSampler(RandomSampler):
def _sample_pos(self, assign_result, num_expected, **kwargs):
pos_inds = torch.nonzero(assign_result.gt_inds > 0)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(assign_result.gt_inds == i.item())
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
| Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py",
"repo_id": "Cream",
"token_count": 959
} | 288 |
import copy
import torch
import torch.nn as nn
from mmcv.runner import OptimizerHook
from .utils import cast_tensor_type
from ..utils.dist_utils import allreduce_grads
class Fp16OptimizerHook(OptimizerHook):
"""FP16 optimizer hook.
The steps of fp16 optimizer is as follows.
1. Scale the loss value.
2. BP in the fp16 model.
2. Copy gradients from fp16 model to fp32 weights.
3. Update fp32 weights.
4. Copy updated parameters from fp32 weights to fp16 model.
Refer to https://arxiv.org/abs/1710.03740 for more details.
Args:
loss_scale (float): Scale factor multiplied with loss.
"""
def __init__(self,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
loss_scale=512.,
distributed=True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.loss_scale = loss_scale
self.distributed = distributed
def before_run(self, runner):
# keep a copy of fp32 weights
runner.optimizer.param_groups = copy.deepcopy(
runner.optimizer.param_groups)
# convert model to fp16
wrap_fp16_model(runner.model)
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
"""Copy gradients from fp16 model to fp32 weight copy."""
for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()):
if fp16_param.grad is not None:
if fp32_param.grad is None:
fp32_param.grad = fp32_param.data.new(fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(self, fp16_net, fp32_weights):
"""Copy updated params from fp32 weight copy to fp16 model."""
for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner):
# clear grads of last iteration
runner.model.zero_grad()
runner.optimizer.zero_grad()
# scale the loss value
scaled_loss = runner.outputs['loss'] * self.loss_scale
scaled_loss.backward()
# copy fp16 grads in the model to fp32 params in the optimizer
fp32_weights = []
for param_group in runner.optimizer.param_groups:
fp32_weights += param_group['params']
self.copy_grads_to_fp32(runner.model, fp32_weights)
# allreduce grads
if self.distributed:
allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
# scale the gradients back
for param in fp32_weights:
if param.grad is not None:
param.grad.div_(self.loss_scale)
if self.grad_clip is not None:
self.clip_grads(fp32_weights)
# update fp32 params
runner.optimizer.step()
# copy fp32 params to the fp16 model
self.copy_params_to_fp16(runner.model, fp32_weights)
def wrap_fp16_model(model):
# convert model to fp16
model.half()
# patch the normalization layers to make it work in fp32 mode
patch_norm_fp32(model)
# set `fp16_enabled` flag
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
def patch_norm_fp32(module):
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
def patch_forward_method(func, src_type, dst_type, convert_output=True):
"""Patch the forward method of a module.
Args:
func (callable): The original forward method.
src_type (torch.dtype): Type of input arguments to be converted from.
dst_type (torch.dtype): Type of input arguments to be converted to.
convert_output (bool): Whether to convert the output back to src_type.
Returns:
callable: The patched forward method.
"""
def new_forward(*args, **kwargs):
output = func(*cast_tensor_type(args, src_type, dst_type),
**cast_tensor_type(kwargs, src_type, dst_type))
if convert_output:
output = cast_tensor_type(output, dst_type, src_type)
return output
return new_forward
| Cream/CDARTS/CDARTS_detection/mmdet/core/fp16/hooks.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/fp16/hooks.py",
"repo_id": "Cream",
"token_count": 1997
} | 289 |
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .registry import DATASETS
@DATASETS.register_module
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
@DATASETS.register_module
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def __len__(self):
return self.times * self._ori_len
| Cream/CDARTS/CDARTS_detection/mmdet/datasets/dataset_wrappers.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/dataset_wrappers.py",
"repo_id": "Cream",
"token_count": 682
} | 290 |
from .backbones import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_extractors import * # noqa: F401,F403
from .anchor_heads import * # noqa: F401,F403
from .shared_heads import * # noqa: F401,F403
from .bbox_heads import * # noqa: F401,F403
from .mask_heads import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS,
LOSSES, DETECTORS)
from .builder import (build_backbone, build_neck, build_roi_extractor,
build_shared_head, build_head, build_loss,
build_detector)
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
| Cream/CDARTS/CDARTS_detection/mmdet/models/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/__init__.py",
"repo_id": "Cream",
"token_count": 419
} | 291 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import time
import numpy as np
from .fbnet_blocks import *
from .fbnet_arch import predefine_archs
import logging
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.cnn import constant_init, kaiming_init
from .utils import load_checkpoint
from ..registry import BACKBONES
@BACKBONES.register_module
class FBNet(nn.Module):
def __init__(self, arch='fbnet_c', out_indices=(5, 9, 17, 22), frozen_stages=-1):
super(FBNet, self).__init__()
print('Model is {}.'.format(arch))
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.arch = arch
self.input_size = 800
self.build_backbone(self.arch, self.input_size)
def build_backbone(self, arch, input_size):
genotypes = predefine_archs[arch]['genotypes']
strides = predefine_archs[arch]['strides']
out_channels = predefine_archs[arch]['out_channels']
self.layers = nn.ModuleList()
self.layers.append(ConvBNReLU(input_size, in_channels=3, out_channels=out_channels[0], kernel_size=3, stride=strides[0], padding=1,
bias=True, relu_type='relu', bn_type='bn'))
input_size = input_size // strides[0]
_in_channels = out_channels[0]
for genotype, stride, _out_channels in zip(genotypes[1:], strides[1:], out_channels[1:]):
if genotype.endswith('sb'):
self.layers.append(SUPER_PRIMITIVES[genotype](input_size, _in_channels, _out_channels, stride))
else:
self.layers.append(PRIMITIVES[genotype](input_size, _in_channels, _out_channels, stride))
input_size = input_size // stride
_in_channels = _out_channels
for m in self.modules():
if isinstance(m, nn.SyncBatchNorm):
m._specify_ddp_gpu_num(1)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x, alphas=None):
outs = []
cnt = 0
for i, layer in enumerate(self.layers):
x = layer(x)
if i in self.out_indices:
outs.append(x)
return outs | Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/fbnet.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/fbnet.py",
"repo_id": "Cream",
"token_count": 1263
} | 292 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
norm_cfg_ = {
'BN': nn.BatchNorm2d,
'SyncBN': nn.SyncBatchNorm,
'GN': nn.GroupNorm,
}
OPS = {
'skip': lambda input_size, in_channels, out_channels, stride, bn='BN': Identity(input_size, in_channels, out_channels, stride),
'ir_k3_e1': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 3, bn=bn),
'ir_k3_e1_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 3, dilation=2, bn=bn),
'ir_k3_e3': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 3, stride, 3, bn=bn),
'ir_k3_e6': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 3, bn=bn),
'ir_k3_e6_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 3, dilation=2, bn=bn),
'ir_k3_s2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 3, 2, bn=bn),
'ir_k5_e1': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 5, bn=bn),
'ir_k5_e1_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 5, dilation=2, bn=bn),
'ir_k5_e3': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 3, stride, 5, bn=bn),
'ir_k5_e6': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 5, bn=bn),
'ir_k5_e6_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 5, dilation=2, bn=bn),
'ir_k5_s2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 5, 2, bn=bn),
'ir_k7_e3': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 3, stride, 7, bn=bn),
'ir_k7_e6': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 7, bn=bn),
'sep_k3' : lambda input_size, in_channels, out_channels, stride, bn='BN': SepConv(input_size, in_channels, out_channels, 1, stride, 3),
'sep_k5' : lambda input_size, in_channels, out_channels, stride, bn='BN': SepConv(input_size, in_channels, out_channels, 1, stride, 5),
'conv1' : lambda input_size, in_channels, out_channels, stride, bn='BN': ConvBNReLU(input_size, in_channels, out_channels, 1, stride, bn_type=bn),
'conv3' : lambda input_size, in_channels, out_channels, stride, bn='BN': ConvBNReLU(input_size, in_channels, out_channels, 3, stride, bn_type=bn),
'conv5' : lambda input_size, in_channels, out_channels, stride, bn='BN': ConvBNReLU(input_size, in_channels, out_channels, 5, stride, bn_type=bn),
'avgpool': lambda input_size, in_channels, out_channels, stride, bn='BN': AvgPool(input_size, in_channels, stride),
}
class AvgPool(nn.Module):
def __init__(self, stride):
super(AvgPool, self).__init__()
self.stride = stride
def forward(self, x):
return F.avg_pool2d(x, self.stride)
class ChannelShuffle(nn.Module):
def __init__(self, groups=1):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
if self.groups == 1:
return x
N, C, H, W = x.size()
cpg = C // self.groups # channels per group
out = x.view(N, self.groups, cpg, H, W)
out = out.permute(0, 2, 1, 3, 4).contiguous()
out = out.view(N, C, H, W)
return out
class ConvBNReLU(nn.Module):
def __init__(self, input_size, in_channels, out_channels, kernel_size, stride, dilation=1, bias=False, relu_type='relu', bn_type='BN', groups=1):
super(ConvBNReLU, self).__init__()
assert(relu_type in ['relu', 'none'])
padding = (kernel_size - 1) * dilation // 2
if bn_type == 'none':
bias = True
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups)
nn.init.kaiming_normal_(self.conv.weight, mode="fan_out", nonlinearity="relu")
if self.conv.bias is not None:
nn.init.constant_(self.conv.bias, 0.0)
if bn_type == 'none' :
self.bn = nn.Sequential()
elif bn_type == 'GN':
norm_layer = norm_cfg_[bn_type]
self.bn = norm_layer(num_channels=out_channels, num_groups=32)
else:
norm_layer = norm_cfg_[bn_type]
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU(inplace=True) if relu_type == 'relu' else nn.Sequential()
def forward(self, x):
out = self.conv(x)
out = self.relu(self.bn(out))
return out
class SE(nn.Module):
def __init__(self, input_size, in_channels, se_ratio):
super(SE, self).__init__()
self.in_channels, self.se_ratio = in_channels, se_ratio
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Conv2d(in_channels, max(1, int(in_channels * se_ratio)), 1, bias=False)
self.fc2 = nn.Conv2d(max(1, int(in_channels * se_ratio)), in_channels, 1, bias=False)
def forward(self, x):
out = self.pooling(x)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
out = F.sigmoid(out)
return out
class Identity(nn.Module):
def __init__(self, input_size, in_channels, out_channels, stride):
super(Identity, self).__init__()
if in_channels != out_channels or stride != 1:
self.conv = ConvBNReLU(input_size, in_channels, out_channels, kernel_size=1, stride=stride,
padding=0, bias=False, relu_type='relu', bn_type='bn')
else:
self.conv = nn.Sequential()
def forward(self, x):
return self.conv(x)
class SepConv(nn.Module):
def __init__(self, input_size, in_channels, out_channels, expansion, stride, kernel_size, groups=1, bn_type='BN'):
super(SepConv, self).__init__()
self.conv1 = ConvBNReLU(input_size, in_channels, in_channels, kernel_size=kernel_size, stride=stride,
bias=False, relu_type='relu', bn_type=bn_type, groups=in_channels)
self.conv2 = ConvBNReLU(input_size//stride, in_channels, out_channels, kernel_size=1, stride=1,
bias=False, relu_type='none', bn_type=bn_type, groups=groups)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
return out
class MBBlock(nn.Module):
def __init__(self, input_size, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1, has_se=False, bn='BN'):
super(MBBlock, self).__init__()
self.in_channels = in_channels
self.out_channels =out_channels
self.has_se = has_se
self.stride = stride
self.groups = groups
mid_channels = in_channels * expansion
self.conv1 = ConvBNReLU(input_size, in_channels, mid_channels, kernel_size=1, stride=1, dilation=1,
bias=False, relu_type='relu', bn_type=bn, groups=groups)
self.conv2 = ConvBNReLU(input_size, mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, dilation=dilation,
bias=False, relu_type='relu', bn_type=bn, groups=mid_channels)
self.conv3 = ConvBNReLU(input_size//self.stride, mid_channels, out_channels, kernel_size=1, stride=1, dilation=1,
bias=False, relu_type='none', bn_type=bn, groups=groups)
if has_se == True:
self.se = SE(input_size, mid_channels, se_ratio=0.05)
if groups != 1:
self.shuffle = ChannelShuffle(input_size, in_channels, groups)
def forward(self, x):
out = self.conv1(x)
if self.groups != 1:
out = self.shuffle(out)
out = self.conv2(out)
if self.has_se:
out = out * self.se(out)
out = self.conv3(out)
if self.in_channels == self.out_channels and self.stride == 1:
out = out + x
return out | Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_ops.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_ops.py",
"repo_id": "Cream",
"token_count": 3853
} | 293 |
from .single_stage import SingleStageDetector
from ..registry import DETECTORS
@DETECTORS.register_module
class RetinaNet(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
| Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/retinanet.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/retinanet.py",
"repo_id": "Cream",
"token_count": 262
} | 294 |
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
import torch.nn as nn
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule
from mmdet.core import mask_target, force_fp32, auto_fp16
@HEADS.register_module
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
upsample_method='deconv',
upsample_ratio=2,
num_classes=81,
class_agnostic=False,
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']:
raise ValueError(
'Invalid upsample method {}, accepted methods '
'are "deconv", "nearest", "bilinear"'.format(upsample_method))
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size # WARN: not used and reserved
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = upsample_method
self.upsample_ratio = upsample_ratio
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
self.upsample = nn.ConvTranspose2d(
upsample_in_channels,
self.conv_out_channels,
self.upsample_ratio,
stride=self.upsample_ratio)
else:
self.upsample = nn.Upsample(
scale_factor=self.upsample_ratio, mode=self.upsample_method)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = nn.Conv2d(logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_target(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid().cpu().numpy()
assert isinstance(mask_pred, np.ndarray)
# when enabling mixed precision training, mask_pred may be float16
# numpy array
mask_pred = mask_pred.astype(np.float32)
cls_segms = [[] for _ in range(self.num_classes - 1)]
bboxes = det_bboxes.cpu().numpy()[:, :4]
labels = det_labels.cpu().numpy() + 1
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
for i in range(bboxes.shape[0]):
bbox = (bboxes[i, :] / scale_factor).astype(np.int32)
label = labels[i]
w = max(bbox[2] - bbox[0] + 1, 1)
h = max(bbox[3] - bbox[1] + 1, 1)
if not self.class_agnostic:
mask_pred_ = mask_pred[i, label, :, :]
else:
mask_pred_ = mask_pred[i, 0, :, :]
im_mask = np.zeros((img_h, img_w), dtype=np.uint8)
bbox_mask = mmcv.imresize(mask_pred_, (w, h))
bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype(
np.uint8)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[label - 1].append(rle)
return cls_segms
| Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/fcn_mask_head.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/fcn_mask_head.py",
"repo_id": "Cream",
"token_count": 3664
} | 295 |
from .non_local import NonLocal2D
from .generalized_attention import GeneralizedAttention
__all__ = ['NonLocal2D', 'GeneralizedAttention']
| Cream/CDARTS/CDARTS_detection/mmdet/models/plugins/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/plugins/__init__.py",
"repo_id": "Cream",
"token_count": 42
} | 296 |
from .functions.deform_conv import deform_conv, modulated_deform_conv
from .functions.deform_pool import deform_roi_pooling
from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
DeformConvPack, ModulatedDeformConvPack)
from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
ModulatedDeformRoIPoolingPack)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling'
]
| Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/__init__.py",
"repo_id": "Cream",
"token_count": 289
} | 297 |
import math
import torch
from torch.autograd import Function
from torch.nn.modules.utils import _pair
from .. import masked_conv2d_cuda
class MaskedConv2dFunction(Function):
@staticmethod
def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
assert mask.dim() == 3 and mask.size(0) == 1
assert features.dim() == 4 and features.size(0) == 1
assert features.size()[2:] == mask.size()[1:]
pad_h, pad_w = _pair(padding)
stride_h, stride_w = _pair(stride)
if stride_h != 1 or stride_w != 1:
raise ValueError(
'Stride could not only be 1 in masked_conv2d currently.')
if not features.is_cuda:
raise NotImplementedError
out_channel, in_channel, kernel_h, kernel_w = weight.size()
batch_size = features.size(0)
out_h = int(
math.floor((features.size(2) + 2 * pad_h -
(kernel_h - 1) - 1) / stride_h + 1))
out_w = int(
math.floor((features.size(3) + 2 * pad_w -
(kernel_h - 1) - 1) / stride_w + 1))
mask_inds = torch.nonzero(mask[0] > 0)
output = features.new_zeros(batch_size, out_channel, out_h, out_w)
if mask_inds.numel() > 0:
mask_h_idx = mask_inds[:, 0].contiguous()
mask_w_idx = mask_inds[:, 1].contiguous()
data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
mask_inds.size(0))
masked_conv2d_cuda.masked_im2col_forward(features, mask_h_idx,
mask_w_idx, kernel_h,
kernel_w, pad_h, pad_w,
data_col)
masked_output = torch.addmm(1, bias[:, None], 1,
weight.view(out_channel, -1), data_col)
masked_conv2d_cuda.masked_col2im_forward(masked_output, mask_h_idx,
mask_w_idx, out_h, out_w,
out_channel, output)
return output
@staticmethod
def backward(ctx, grad_output):
return (None, ) * 5
masked_conv2d = MaskedConv2dFunction.apply
| Cream/CDARTS/CDARTS_detection/mmdet/ops/masked_conv/functions/masked_conv.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/masked_conv/functions/masked_conv.py",
"repo_id": "Cream",
"token_count": 1284
} | 298 |
from torch.autograd import Function
from .. import roi_align_cuda
class RoIAlignFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0):
if isinstance(out_size, int):
out_h = out_size
out_w = out_size
elif isinstance(out_size, tuple):
assert len(out_size) == 2
assert isinstance(out_size[0], int)
assert isinstance(out_size[1], int)
out_h, out_w = out_size
else:
raise TypeError(
'"out_size" must be an integer or tuple of integers')
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
batch_size, num_channels, data_height, data_width = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h, out_w)
if features.is_cuda:
roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale,
sample_num, output)
else:
raise NotImplementedError
return output
@staticmethod
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
assert (feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, data_height,
data_width)
roi_align_cuda.backward(grad_output.contiguous(), rois, out_h,
out_w, spatial_scale, sample_num,
grad_input)
return grad_input, grad_rois, None, None, None
roi_align = RoIAlignFunction.apply
| Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/functions/roi_align.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/functions/roi_align.py",
"repo_id": "Cream",
"token_count": 1059
} | 299 |